inventory.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. #!/usr/bin/env python
  2. """
  3. This is an Ansible dynamic inventory for OpenStack.
  4. It requires your OpenStack credentials to be set in clouds.yaml or your shell
  5. environment.
  6. """
  7. from __future__ import print_function
  8. import argparse
  9. import json
  10. import os
  11. try:
  12. import ConfigParser
  13. except ImportError:
  14. import configparser as ConfigParser
  15. from keystoneauth1.exceptions.catalog import EndpointNotFound
  16. import shade
  17. def base_openshift_inventory(cluster_hosts):
  18. '''Set the base openshift inventory.'''
  19. inventory = {}
  20. masters = [server.name for server in cluster_hosts
  21. if server.metadata['host-type'] == 'master']
  22. etcd = [server.name for server in cluster_hosts
  23. if server.metadata['host-type'] == 'etcd']
  24. if not etcd:
  25. etcd = masters
  26. infra_hosts = [server.name for server in cluster_hosts
  27. if server.metadata['host-type'] == 'node' and
  28. server.metadata['sub-host-type'] == 'infra']
  29. app = [server.name for server in cluster_hosts
  30. if server.metadata['host-type'] == 'node' and
  31. server.metadata['sub-host-type'] == 'app']
  32. cns = [server.name for server in cluster_hosts
  33. if server.metadata['host-type'] == 'cns']
  34. load_balancers = [server.name for server in cluster_hosts
  35. if server.metadata['host-type'] == 'lb']
  36. # NOTE: everything that should go to the `[nodes]` group:
  37. nodes = list(set(masters + etcd + infra_hosts + app + cns))
  38. # NOTE: all OpenShift nodes, including `[lb]`, `[nfs]`, etc.:
  39. osev3 = list(set(nodes + load_balancers))
  40. inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}
  41. inventory['openstack_nodes'] = {'hosts': nodes}
  42. inventory['openstack_master_nodes'] = {'hosts': masters}
  43. inventory['openstack_etcd_nodes'] = {'hosts': etcd}
  44. inventory['openstack_infra_nodes'] = {'hosts': infra_hosts}
  45. inventory['openstack_compute_nodes'] = {'hosts': app}
  46. inventory['openstack_cns_nodes'] = {'hosts': cns}
  47. inventory['lb'] = {'hosts': load_balancers}
  48. inventory['localhost'] = {'ansible_connection': 'local'}
  49. return inventory
  50. def get_docker_storage_mountpoints(volumes):
  51. '''Check volumes to see if they're being used for docker storage'''
  52. docker_storage_mountpoints = {}
  53. for volume in volumes:
  54. if volume.metadata.get('purpose') == "openshift_docker_storage":
  55. for attachment in volume.attachments:
  56. if attachment.server_id in docker_storage_mountpoints:
  57. docker_storage_mountpoints[attachment.server_id].append(attachment.device)
  58. else:
  59. docker_storage_mountpoints[attachment.server_id] = [attachment.device]
  60. return docker_storage_mountpoints
  61. def _get_hostvars(server, docker_storage_mountpoints):
  62. ssh_ip_address = server.public_v4 or server.private_v4
  63. hostvars = {
  64. 'ansible_host': ssh_ip_address
  65. }
  66. public_v4 = server.public_v4 or server.private_v4
  67. if public_v4:
  68. hostvars['public_v4'] = server.public_v4
  69. hostvars['openshift_public_ip'] = server.public_v4
  70. # TODO(shadower): what about multiple networks?
  71. if server.private_v4:
  72. hostvars['private_v4'] = server.private_v4
  73. hostvars['openshift_ip'] = server.private_v4
  74. # NOTE(shadower): Yes, we set both hostname and IP to the private
  75. # IP address for each node. OpenStack doesn't resolve nodes by
  76. # name at all, so using a hostname here would require an internal
  77. # DNS which would complicate the setup and potentially introduce
  78. # performance issues.
  79. hostvars['openshift_hostname'] = server.metadata.get(
  80. 'openshift_hostname', server.private_v4)
  81. hostvars['openshift_public_hostname'] = server.name
  82. if server.metadata['host-type'] == 'cns':
  83. hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
  84. group_name = server.metadata.get('openshift_node_group_name')
  85. hostvars['openshift_node_group_name'] = group_name
  86. # check for attached docker storage volumes
  87. if 'os-extended-volumes:volumes_attached' in server:
  88. if server.id in docker_storage_mountpoints:
  89. hostvars['docker_storage_mountpoints'] = ' '.join(
  90. docker_storage_mountpoints[server.id])
  91. return hostvars
  92. def build_inventory():
  93. '''Build the dynamic inventory.'''
  94. cloud = shade.openstack_cloud()
  95. # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
  96. # environment variable.
  97. cluster_hosts = [
  98. server for server in cloud.list_servers()
  99. if 'metadata' in server and 'clusterid' in server.metadata]
  100. inventory = base_openshift_inventory(cluster_hosts)
  101. inventory['_meta'] = {'hostvars': {}}
  102. # Some clouds don't have Cinder. That's okay:
  103. try:
  104. volumes = cloud.list_volumes()
  105. except EndpointNotFound:
  106. volumes = []
  107. # cinder volumes used for docker storage
  108. docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)
  109. for server in cluster_hosts:
  110. inventory['_meta']['hostvars'][server.name] = _get_hostvars(
  111. server,
  112. docker_storage_mountpoints)
  113. stout = _get_stack_outputs(cloud)
  114. if stout is not None:
  115. try:
  116. inventory['localhost'].update({
  117. 'openshift_openstack_api_lb_provider':
  118. stout['api_lb_provider'],
  119. 'openshift_openstack_api_lb_port_id':
  120. stout['api_lb_vip_port_id'],
  121. 'openshift_openstack_api_lb_sg_id':
  122. stout['api_lb_sg_id']})
  123. except KeyError:
  124. pass # Not an API load balanced deployment
  125. try:
  126. inventory['OSEv3']['vars'][
  127. 'openshift_master_cluster_hostname'] = stout['private_api_ip']
  128. except KeyError:
  129. pass # Internal LB not specified
  130. inventory['localhost']['openshift_openstack_private_api_ip'] = \
  131. stout.get('private_api_ip')
  132. inventory['localhost']['openshift_openstack_public_api_ip'] = \
  133. stout.get('public_api_ip')
  134. inventory['localhost']['openshift_openstack_public_router_ip'] = \
  135. stout.get('public_router_ip')
  136. try:
  137. inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)
  138. except KeyError:
  139. pass # Not a kuryr deployment
  140. return inventory
  141. def _get_stack_outputs(cloud_client):
  142. """Returns a dictionary with the stack outputs"""
  143. cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')
  144. stack = cloud_client.get_stack(cluster_name)
  145. if stack is None or stack['stack_status'] not in (
  146. 'CREATE_COMPLETE', 'UPDATE_COMPLETE'):
  147. return None
  148. data = {}
  149. for output in stack['outputs']:
  150. data[output['output_key']] = output['output_value']
  151. return data
  152. def _get_kuryr_vars(cloud_client, data):
  153. """Returns a dictionary of Kuryr variables resulting of heat stacking"""
  154. settings = {}
  155. settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']
  156. if 'pod_subnet_pool' in data:
  157. settings['kuryr_openstack_pod_subnet_pool_id'] = data[
  158. 'pod_subnet_pool']
  159. if 'sg_allow_from_default' in data:
  160. settings['kuryr_openstack_sg_allow_from_default_id'] = data[
  161. 'sg_allow_from_default']
  162. if 'sg_allow_from_namespace' in data:
  163. settings['kuryr_openstack_sg_allow_from_namespace_id'] = data[
  164. 'sg_allow_from_namespace']
  165. settings['kuryr_openstack_pod_router_id'] = data['pod_router']
  166. settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']
  167. settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']
  168. settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']
  169. settings['kuryr_openstack_pod_project_id'] = (
  170. cloud_client.current_project_id)
  171. settings['kuryr_openstack_api_lb_ip'] = data['private_api_ip']
  172. settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']
  173. settings['kuryr_openstack_username'] = cloud_client.auth['username']
  174. settings['kuryr_openstack_password'] = cloud_client.auth['password']
  175. if 'user_domain_id' in cloud_client.auth:
  176. settings['kuryr_openstack_user_domain_name'] = (
  177. cloud_client.auth['user_domain_id'])
  178. else:
  179. settings['kuryr_openstack_user_domain_name'] = (
  180. cloud_client.auth['user_domain_name'])
  181. # FIXME(apuimedo): consolidate kuryr controller credentials into the same
  182. # vars the openstack playbook uses.
  183. settings['kuryr_openstack_project_id'] = cloud_client.current_project_id
  184. if 'project_domain_id' in cloud_client.auth:
  185. settings['kuryr_openstack_project_domain_name'] = (
  186. cloud_client.auth['project_domain_id'])
  187. else:
  188. settings['kuryr_openstack_project_domain_name'] = (
  189. cloud_client.auth['project_domain_name'])
  190. return settings
  191. def output_inventory(inventory, output_file):
  192. """Outputs inventory into a file in ini format"""
  193. config = ConfigParser.ConfigParser(allow_no_value=True)
  194. host_meta_vars = _get_host_meta_vars_as_dict(inventory)
  195. for key in sorted(inventory.keys()):
  196. if key == 'localhost':
  197. config.add_section('localhost')
  198. config.set('localhost', 'localhost')
  199. config.add_section('localhost:vars')
  200. for var, value in inventory['localhost'].items():
  201. config.set('localhost:vars', var, value)
  202. elif key not in ('localhost', '_meta'):
  203. if 'hosts' in inventory[key]:
  204. config.add_section(key)
  205. for host in inventory[key]['hosts']:
  206. if host in host_meta_vars.keys():
  207. config.set(key, host + " " + host_meta_vars[host])
  208. else:
  209. config.set(key, host)
  210. if 'vars' in inventory[key]:
  211. config.add_section(key + ":vars")
  212. for var, value in inventory[key]['vars'].items():
  213. config.set(key + ":vars", var, value)
  214. with open(output_file, 'w') as configfile:
  215. config.write(configfile)
  216. def _get_host_meta_vars_as_dict(inventory):
  217. """parse host meta vars from inventory as dict"""
  218. host_meta_vars = {}
  219. if '_meta' in inventory.keys():
  220. if 'hostvars' in inventory['_meta']:
  221. for host in inventory['_meta']['hostvars'].keys():
  222. host_meta_vars[host] = ' '.join(
  223. '{}={}'.format(key, val) for key, val in inventory['_meta']['hostvars'][host].items())
  224. return host_meta_vars
  225. def parse_args():
  226. """parse arguments to script"""
  227. parser = argparse.ArgumentParser(description="Create ansible inventory.")
  228. parser.add_argument('--static', type=str, default='',
  229. help='File to store a static inventory in.')
  230. parser.add_argument('--list', action="store_true", default=False,
  231. help='List inventory.')
  232. return parser.parse_args()
  233. if __name__ == '__main__':
  234. if parse_args().static:
  235. output_inventory(build_inventory(), parse_args().static)
  236. else:
  237. print(json.dumps(build_inventory(), indent=4, sort_keys=True))