Pārlūkot izejas kodu

Add dynamic inventory

This adds an `inventory.py` script to the `sample-inventory` that lists
all the necessary servers and groups dynamically, skipping the
`static_inventory` role as well as the `hosts` creation.

It also adds an `os_cinder` lookup function which is necessary for a
seamless Cinder OpenShift registry integration without a static
inventory.
Tomas Sedovic 7 gadi atpakaļ
vecāks
revīzija
51e0176478

+ 13 - 0
playbooks/provisioning/openstack/README.md

@@ -361,6 +361,19 @@ registry. Again in `OSEv3.yml`:
 The filesystem value here will be used in the initial formatting of
 the volume.
 
+If you're using the dynamic inventory, you must uncomment these two values as
+well:
+
+    #openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}"
+    #openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi"
+
+But note that they use the `os_cinder` lookup plugin we provide, so you must
+tell Ansible where to find it either in `ansible.cfg` (the one we provide is
+configured properly) or by exporting the
+`ANSIBLE_LOOKUP_PLUGINS=openshift-ansible-contrib/lookup_plugins` environment
+variable.
+
+
 
 ### Use an existing Cinder volume for the OpenShift registry
 

+ 3 - 0
playbooks/provisioning/openstack/sample-inventory/ansible.cfg

@@ -1,6 +1,7 @@
 # config file for ansible -- http://ansible.com/
 # ==============================================
 [defaults]
+ansible_user = openshift
 forks = 50
 # work around privilege escalation timeouts in ansible
 timeout = 30
@@ -14,6 +15,8 @@ fact_caching_connection = .ansible/cached_facts
 fact_caching_timeout = 900
 stdout_callback = skippy
 callback_whitelist = profile_tasks
+lookup_plugins = openshift-ansible-contrib/lookup_plugins
+
 
 [ssh_connection]
 ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no

+ 7 - 2
playbooks/provisioning/openstack/sample-inventory/group_vars/OSEv3.yml

@@ -27,9 +27,14 @@ openshift_hosted_registry_wait: True
 #openshift_hosted_registry_storage_access_modes: ['ReadWriteOnce']
 #openshift_hosted_registry_storage_openstack_filesystem: xfs
 
-## Configure this if you're attaching a Cinder volume you've set up.
+## NOTE(shadower): This won't work until the openshift-ansible issue #5657 is fixed:
+## https://github.com/openshift/openshift-ansible/issues/5657
 ## If you're using the `cinder_hosted_registry_name` option from
-## `all.yml`, this will be configured automaticaly.
+## `all.yml`, uncomment these lines:
+#openshift_hosted_registry_storage_openstack_volumeID: "{{ lookup('os_cinder', cinder_hosted_registry_name).id }}"
+#openshift_hosted_registry_storage_volume_size: "{{ cinder_hosted_registry_size_gb }}Gi"
+
+## If you're using a Cinder volume you've set up yourself, uncomment these lines:
 #openshift_hosted_registry_storage_openstack_volumeID: e0ba2d73-d2f9-4514-a3b2-a0ced507fa05
 #openshift_hosted_registry_storage_volume_size: 10Gi
 

+ 89 - 0
playbooks/provisioning/openstack/sample-inventory/inventory.py

@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+
+from __future__ import print_function
+
+import json
+import os
+import sys
+
+import shade
+
+
+if __name__ == '__main__':
+    cloud = shade.openstack_cloud()
+
+    inventory = {}
+
+    # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
+    # environment variable.
+    cluster_hosts = [
+        server for server in cloud.list_servers()
+        if 'metadata' in server and 'clusterid' in server.metadata]
+    
+    masters = [server.name for server in cluster_hosts
+               if server.metadata['host-type'] == 'master']
+
+    etcd = [server.name for server in cluster_hosts
+            if server.metadata['host-type'] == 'etcd']
+    if not etcd:
+        etcd = masters
+
+    infra_hosts = [server.name for server in cluster_hosts
+                   if server.metadata['host-type'] == 'node' and
+                        server.metadata['sub-host-type'] == 'infra']
+
+    app = [server.name for server in cluster_hosts
+           if server.metadata['host-type'] == 'node' and
+               server.metadata['sub-host-type'] == 'app']
+
+    nodes = list(set(masters + infra_hosts + app))
+
+    dns = [server.name for server in cluster_hosts
+           if server.metadata['host-type'] == 'dns']
+
+    lb = [server.name for server in cluster_hosts
+           if server.metadata['host-type'] == 'lb']
+
+    osev3 = list(set(nodes + etcd + lb))
+
+    groups = [server.metadata.group for server in cluster_hosts
+              if 'group' in server.metadata]
+
+    inventory['cluster_hosts'] = { 'hosts': [s.name for s in cluster_hosts] }
+    inventory['OSEv3'] = { 'hosts': osev3 }
+    inventory['masters'] = { 'hosts': masters }
+    inventory['etcd'] = { 'hosts': etcd }
+    inventory['nodes'] = { 'hosts': nodes }
+    inventory['infra_hosts'] = { 'hosts': infra_hosts }
+    inventory['app'] = { 'hosts': app }
+    inventory['dns'] = { 'hosts': dns }
+    inventory['lb'] = { 'hosts': lb }
+
+    for server in cluster_hosts:
+        if 'group' in server.metadata:
+            group = server.metadata.group
+            if group not in inventory:
+                inventory[group] = {'hosts': []}
+            inventory[group]['hosts'].append(server.name)
+
+    inventory['_meta'] = { 'hostvars': {} }
+
+    for server in cluster_hosts:
+        ssh_ip_address = server.public_v4 or server.private_v4
+        vars = {
+            'ansible_host': ssh_ip_address
+        }
+
+        if server.public_v4:
+            vars['public_v4'] = server.public_v4
+        # TODO(shadower): what about multiple networks?
+        if server.private_v4:
+            vars['private_v4'] = server.private_v4
+
+        node_labels = server.metadata.get('node_labels')
+        if node_labels:
+            vars['openshift_node_labels'] = node_labels
+
+        inventory['_meta']['hostvars'][server.name] = vars
+
+    print(json.dumps(inventory, indent=4, sort_keys=True))