inventory.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. #!/usr/bin/env python
  2. """
  3. This is an Ansible dynamic inventory for OpenStack.
  4. It requires your OpenStack credentials to be set in clouds.yaml or your shell
  5. environment.
  6. """
  7. from __future__ import print_function
  8. import argparse
  9. import json
  10. import os
  11. try:
  12. import ConfigParser
  13. except ImportError:
  14. import configparser as ConfigParser
  15. from keystoneauth1.exceptions.catalog import EndpointNotFound
  16. import shade
  17. def base_openshift_inventory(cluster_hosts):
  18. '''Set the base openshift inventory.'''
  19. inventory = {}
  20. masters = [server.name for server in cluster_hosts
  21. if server.metadata['host-type'] == 'master']
  22. etcd = [server.name for server in cluster_hosts
  23. if server.metadata['host-type'] == 'etcd']
  24. if not etcd:
  25. etcd = masters
  26. infra_hosts = [server.name for server in cluster_hosts
  27. if server.metadata['host-type'] == 'node' and
  28. server.metadata['sub-host-type'] == 'infra']
  29. app = [server.name for server in cluster_hosts
  30. if server.metadata['host-type'] == 'node' and
  31. server.metadata['sub-host-type'] == 'app']
  32. cns = [server.name for server in cluster_hosts
  33. if server.metadata['host-type'] == 'cns']
  34. load_balancers = [server.name for server in cluster_hosts
  35. if server.metadata['host-type'] == 'lb']
  36. # NOTE: everything that should go to the `[nodes]` group:
  37. nodes = list(set(masters + etcd + infra_hosts + app + cns))
  38. # NOTE: all OpenShift nodes, including `[lb]`, `[nfs]`, etc.:
  39. osev3 = list(set(nodes + load_balancers))
  40. inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}
  41. inventory['openstack_nodes'] = {'hosts': nodes}
  42. inventory['openstack_master_nodes'] = {'hosts': masters}
  43. inventory['openstack_etcd_nodes'] = {'hosts': etcd}
  44. inventory['openstack_infra_nodes'] = {'hosts': infra_hosts}
  45. inventory['openstack_compute_nodes'] = {'hosts': app}
  46. inventory['openstack_cns_nodes'] = {'hosts': cns}
  47. inventory['lb'] = {'hosts': load_balancers}
  48. inventory['localhost'] = {'ansible_connection': 'local'}
  49. return inventory
  50. def get_docker_storage_mountpoints(volumes):
  51. '''Check volumes to see if they're being used for docker storage'''
  52. docker_storage_mountpoints = {}
  53. for volume in volumes:
  54. if volume.metadata.get('purpose') == "openshift_docker_storage":
  55. for attachment in volume.attachments:
  56. if attachment.server_id in docker_storage_mountpoints:
  57. docker_storage_mountpoints[attachment.server_id].append(attachment.device)
  58. else:
  59. docker_storage_mountpoints[attachment.server_id] = [attachment.device]
  60. return docker_storage_mountpoints
  61. def _get_hostvars(server, docker_storage_mountpoints):
  62. ssh_ip_address = server.public_v4 or server.private_v4
  63. hostvars = {
  64. 'ansible_host': ssh_ip_address
  65. }
  66. public_v4 = server.public_v4 or server.private_v4
  67. if public_v4:
  68. hostvars['public_v4'] = server.public_v4
  69. hostvars['openshift_public_ip'] = server.public_v4
  70. # TODO(shadower): what about multiple networks?
  71. if server.private_v4:
  72. hostvars['private_v4'] = server.private_v4
  73. hostvars['openshift_ip'] = server.private_v4
  74. # NOTE(shadower): Yes, we set both hostname and IP to the private
  75. # IP address for each node. OpenStack doesn't resolve nodes by
  76. # name at all, so using a hostname here would require an internal
  77. # DNS which would complicate the setup and potentially introduce
  78. # performance issues.
  79. hostvars['openshift_hostname'] = server.metadata.get(
  80. 'openshift_hostname', server.private_v4)
  81. hostvars['openshift_public_hostname'] = server.name
  82. if server.metadata['host-type'] == 'cns':
  83. hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
  84. group_name = server.metadata.get('openshift_node_group_name')
  85. hostvars['openshift_node_group_name'] = group_name
  86. # check for attached docker storage volumes
  87. if 'os-extended-volumes:volumes_attached' in server:
  88. if server.id in docker_storage_mountpoints:
  89. hostvars['docker_storage_mountpoints'] = ' '.join(
  90. docker_storage_mountpoints[server.id])
  91. return hostvars
  92. def build_inventory():
  93. '''Build the dynamic inventory.'''
  94. cloud = shade.openstack_cloud()
  95. # Use an environment variable to optionally skip returning the app nodes.
  96. show_compute_nodes = os.environ.get('OPENSTACK_SHOW_COMPUTE_NODES', 'true').lower() == "true"
  97. # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
  98. # environment variable.
  99. cluster_hosts = [
  100. server for server in cloud.list_servers()
  101. if 'metadata' in server and 'clusterid' in server.metadata and
  102. (show_compute_nodes or server.metadata.get('sub-host-type') != 'app')]
  103. inventory = base_openshift_inventory(cluster_hosts)
  104. inventory['_meta'] = {'hostvars': {}}
  105. # Some clouds don't have Cinder. That's okay:
  106. try:
  107. volumes = cloud.list_volumes()
  108. except EndpointNotFound:
  109. volumes = []
  110. # cinder volumes used for docker storage
  111. docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)
  112. for server in cluster_hosts:
  113. inventory['_meta']['hostvars'][server.name] = _get_hostvars(
  114. server,
  115. docker_storage_mountpoints)
  116. stout = _get_stack_outputs(cloud)
  117. if stout is not None:
  118. try:
  119. inventory['localhost'].update({
  120. 'openshift_openstack_api_lb_provider':
  121. stout['api_lb_provider'],
  122. 'openshift_openstack_api_lb_port_id':
  123. stout['api_lb_vip_port_id'],
  124. 'openshift_openstack_api_lb_sg_id':
  125. stout['api_lb_sg_id']})
  126. except KeyError:
  127. pass # Not an API load balanced deployment
  128. try:
  129. inventory['OSEv3']['vars'][
  130. 'openshift_master_cluster_hostname'] = stout['private_api_ip']
  131. except KeyError:
  132. pass # Internal LB not specified
  133. inventory['localhost']['openshift_openstack_private_api_ip'] = \
  134. stout.get('private_api_ip')
  135. inventory['localhost']['openshift_openstack_public_api_ip'] = \
  136. stout.get('public_api_ip')
  137. inventory['localhost']['openshift_openstack_public_router_ip'] = \
  138. stout.get('public_router_ip')
  139. try:
  140. inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)
  141. except KeyError:
  142. pass # Not a kuryr deployment
  143. return inventory
  144. def _get_stack_outputs(cloud_client):
  145. """Returns a dictionary with the stack outputs"""
  146. cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')
  147. stack = cloud_client.get_stack(cluster_name)
  148. if stack is None or stack['stack_status'] not in (
  149. 'CREATE_COMPLETE', 'UPDATE_COMPLETE'):
  150. return None
  151. data = {}
  152. for output in stack['outputs']:
  153. data[output['output_key']] = output['output_value']
  154. return data
  155. def _get_kuryr_vars(cloud_client, data):
  156. """Returns a dictionary of Kuryr variables resulting of heat stacking"""
  157. settings = {}
  158. settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']
  159. if 'pod_subnet_pool' in data:
  160. settings['kuryr_openstack_pod_subnet_pool_id'] = data[
  161. 'pod_subnet_pool']
  162. settings['kuryr_openstack_pod_router_id'] = data['pod_router']
  163. settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']
  164. settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']
  165. settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']
  166. settings['kuryr_openstack_pod_project_id'] = (
  167. cloud_client.current_project_id)
  168. settings['kuryr_openstack_api_lb_ip'] = data['private_api_ip']
  169. settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']
  170. settings['kuryr_openstack_username'] = cloud_client.auth['username']
  171. settings['kuryr_openstack_password'] = cloud_client.auth['password']
  172. if 'user_domain_id' in cloud_client.auth:
  173. settings['kuryr_openstack_user_domain_name'] = (
  174. cloud_client.auth['user_domain_id'])
  175. else:
  176. settings['kuryr_openstack_user_domain_name'] = (
  177. cloud_client.auth['user_domain_name'])
  178. # FIXME(apuimedo): consolidate kuryr controller credentials into the same
  179. # vars the openstack playbook uses.
  180. settings['kuryr_openstack_project_id'] = cloud_client.current_project_id
  181. if 'project_domain_id' in cloud_client.auth:
  182. settings['kuryr_openstack_project_domain_name'] = (
  183. cloud_client.auth['project_domain_id'])
  184. else:
  185. settings['kuryr_openstack_project_domain_name'] = (
  186. cloud_client.auth['project_domain_name'])
  187. return settings
  188. def output_inventory(inventory, output_file):
  189. """Outputs inventory into a file in ini format"""
  190. config = ConfigParser.ConfigParser(allow_no_value=True)
  191. host_meta_vars = _get_host_meta_vars_as_dict(inventory)
  192. for key in sorted(inventory.keys()):
  193. if key == 'localhost':
  194. config.add_section('localhost')
  195. config.set('localhost', 'localhost')
  196. config.add_section('localhost:vars')
  197. for var, value in inventory['localhost'].items():
  198. config.set('localhost:vars', var, value)
  199. elif key not in ('localhost', '_meta'):
  200. if 'hosts' in inventory[key]:
  201. config.add_section(key)
  202. for host in inventory[key]['hosts']:
  203. if host in host_meta_vars.keys():
  204. config.set(key, host + " " + host_meta_vars[host])
  205. else:
  206. config.set(key, host)
  207. if 'vars' in inventory[key]:
  208. config.add_section(key + ":vars")
  209. for var, value in inventory[key]['vars'].items():
  210. config.set(key + ":vars", var, value)
  211. with open(output_file, 'w') as configfile:
  212. config.write(configfile)
  213. def _get_host_meta_vars_as_dict(inventory):
  214. """parse host meta vars from inventory as dict"""
  215. host_meta_vars = {}
  216. if '_meta' in inventory.keys():
  217. if 'hostvars' in inventory['_meta']:
  218. for host in inventory['_meta']['hostvars'].keys():
  219. host_meta_vars[host] = ' '.join(
  220. '{}={}'.format(key, val) for key, val in inventory['_meta']['hostvars'][host].items())
  221. return host_meta_vars
  222. def parse_args():
  223. """parse arguments to script"""
  224. parser = argparse.ArgumentParser(description="Create ansible inventory.")
  225. parser.add_argument('--static', type=str, default='',
  226. help='File to store a static inventory in.')
  227. parser.add_argument('--list', action="store_true", default=False,
  228. help='List inventory.')
  229. return parser.parse_args()
  230. if __name__ == '__main__':
  231. if parse_args().static:
  232. output_inventory(build_inventory(), parse_args().static)
  233. else:
  234. print(json.dumps(build_inventory(), indent=4, sort_keys=True))