resources.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. #!/usr/bin/env python
  2. """
  3. This library is used by the OpenStack's dynamic inventories.
  4. It produces the inventory in a Python dict structure based on the current
  5. environment.
  6. """
  7. from __future__ import print_function
  8. import argparse
  9. import json
  10. import os
  11. try:
  12. import ConfigParser
  13. except ImportError:
  14. import configparser as ConfigParser
  15. from keystoneauth1.exceptions.catalog import EndpointNotFound
  16. import shade
  17. def base_openshift_inventory(cluster_hosts):
  18. '''Set the base openshift inventory.'''
  19. inventory = {}
  20. masters = [server.name for server in cluster_hosts
  21. if server.metadata['host-type'] == 'master']
  22. etcd = [server.name for server in cluster_hosts
  23. if server.metadata['host-type'] == 'etcd']
  24. if not etcd:
  25. etcd = masters
  26. infra_hosts = [server.name for server in cluster_hosts
  27. if server.metadata['host-type'] == 'node' and
  28. server.metadata['sub-host-type'] == 'infra']
  29. app = [server.name for server in cluster_hosts
  30. if server.metadata['host-type'] == 'node' and
  31. server.metadata['sub-host-type'] == 'app']
  32. cns = [server.name for server in cluster_hosts
  33. if server.metadata['host-type'] == 'cns']
  34. load_balancers = [server.name for server in cluster_hosts
  35. if server.metadata['host-type'] == 'lb']
  36. # NOTE: everything that should go to the `[nodes]` group:
  37. nodes = list(set(masters + etcd + infra_hosts + app + cns))
  38. # NOTE: all OpenShift nodes, including `[lb]`, `[nfs]`, etc.:
  39. osev3 = list(set(nodes + load_balancers))
  40. inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}
  41. inventory['openstack_nodes'] = {'hosts': nodes}
  42. inventory['openstack_master_nodes'] = {'hosts': masters}
  43. inventory['openstack_etcd_nodes'] = {'hosts': etcd}
  44. inventory['openstack_infra_nodes'] = {'hosts': infra_hosts}
  45. inventory['openstack_compute_nodes'] = {'hosts': app}
  46. inventory['openstack_cns_nodes'] = {'hosts': cns}
  47. inventory['lb'] = {'hosts': load_balancers}
  48. inventory['localhost'] = {'ansible_connection': 'local'}
  49. return inventory
  50. def get_docker_storage_mountpoints(volumes):
  51. '''Check volumes to see if they're being used for docker storage'''
  52. docker_storage_mountpoints = {}
  53. for volume in volumes:
  54. if volume.metadata.get('purpose') == "openshift_docker_storage":
  55. for attachment in volume.attachments:
  56. if attachment.server_id in docker_storage_mountpoints:
  57. docker_storage_mountpoints[attachment.server_id].append(attachment.device)
  58. else:
  59. docker_storage_mountpoints[attachment.server_id] = [attachment.device]
  60. return docker_storage_mountpoints
  61. def _get_hostvars(server, docker_storage_mountpoints):
  62. ssh_ip_address = server.public_v4 or server.private_v4
  63. hostvars = {
  64. 'ansible_host': ssh_ip_address
  65. }
  66. public_v4 = server.public_v4 or server.private_v4
  67. private_v4 = server.private_v4 or server.public_v4
  68. if public_v4:
  69. hostvars['public_v4'] = public_v4
  70. hostvars['openshift_public_ip'] = public_v4
  71. # TODO(shadower): what about multiple networks?
  72. if private_v4:
  73. hostvars['private_v4'] = private_v4
  74. hostvars['openshift_ip'] = private_v4
  75. # NOTE(shadower): Yes, we set both hostname and IP to the private
  76. # IP address for each node. OpenStack doesn't resolve nodes by
  77. # name at all, so using a hostname here would require an internal
  78. # DNS which would complicate the setup and potentially introduce
  79. # performance issues.
  80. hostvars['openshift_hostname'] = server.metadata.get(
  81. 'openshift_hostname', private_v4)
  82. hostvars['openshift_public_hostname'] = server.name
  83. if server.metadata['host-type'] == 'cns':
  84. hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
  85. group_name = server.metadata.get('openshift_node_group_name')
  86. hostvars['openshift_node_group_name'] = group_name
  87. # check for attached docker storage volumes
  88. if 'os-extended-volumes:volumes_attached' in server:
  89. if server.id in docker_storage_mountpoints:
  90. hostvars['docker_storage_mountpoints'] = ' '.join(
  91. docker_storage_mountpoints[server.id])
  92. return hostvars
  93. def build_inventory():
  94. '''Build the dynamic inventory.'''
  95. cloud = shade.openstack_cloud()
  96. # Use an environment variable to optionally skip returning the app nodes.
  97. show_compute_nodes = os.environ.get('OPENSTACK_SHOW_COMPUTE_NODES', 'true').lower() == "true"
  98. # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
  99. # environment variable.
  100. cluster_hosts = [
  101. server for server in cloud.list_servers()
  102. if 'metadata' in server and 'clusterid' in server.metadata and
  103. (show_compute_nodes or server.metadata.get('sub-host-type') != 'app')]
  104. inventory = base_openshift_inventory(cluster_hosts)
  105. inventory['_meta'] = {'hostvars': {}}
  106. # Some clouds don't have Cinder. That's okay:
  107. try:
  108. volumes = cloud.list_volumes()
  109. except EndpointNotFound:
  110. volumes = []
  111. # cinder volumes used for docker storage
  112. docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)
  113. for server in cluster_hosts:
  114. inventory['_meta']['hostvars'][server.name] = _get_hostvars(
  115. server,
  116. docker_storage_mountpoints)
  117. stout = _get_stack_outputs(cloud)
  118. if stout is not None:
  119. try:
  120. inventory['localhost'].update({
  121. 'openshift_openstack_api_lb_provider':
  122. stout['api_lb_provider'],
  123. 'openshift_openstack_api_lb_port_id':
  124. stout['api_lb_vip_port_id'],
  125. 'openshift_openstack_api_lb_sg_id':
  126. stout['api_lb_sg_id']})
  127. except KeyError:
  128. pass # Not an API load balanced deployment
  129. try:
  130. inventory['OSEv3']['vars'][
  131. 'openshift_master_cluster_hostname'] = stout['private_api_ip']
  132. except KeyError:
  133. pass # Internal LB not specified
  134. inventory['localhost']['openshift_openstack_private_api_ip'] = \
  135. stout.get('private_api_ip')
  136. inventory['localhost']['openshift_openstack_public_api_ip'] = \
  137. stout.get('public_api_ip')
  138. inventory['localhost']['openshift_openstack_public_router_ip'] = \
  139. stout.get('public_router_ip')
  140. try:
  141. inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)
  142. except KeyError:
  143. pass # Not a kuryr deployment
  144. return inventory
  145. def _get_stack_outputs(cloud_client):
  146. """Returns a dictionary with the stack outputs"""
  147. cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')
  148. stack = cloud_client.get_stack(cluster_name)
  149. if stack is None or stack['stack_status'] not in (
  150. 'CREATE_COMPLETE', 'UPDATE_COMPLETE'):
  151. return None
  152. data = {}
  153. for output in stack['outputs']:
  154. data[output['output_key']] = output['output_value']
  155. return data
  156. def _get_kuryr_vars(cloud_client, data):
  157. """Returns a dictionary of Kuryr variables resulting of heat stacking"""
  158. settings = {}
  159. settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']
  160. if 'pod_subnet_pool' in data:
  161. settings['kuryr_openstack_pod_subnet_pool_id'] = data[
  162. 'pod_subnet_pool']
  163. if 'sg_allow_from_default' in data:
  164. settings['kuryr_openstack_sg_allow_from_default_id'] = data[
  165. 'sg_allow_from_default']
  166. if 'sg_allow_from_namespace' in data:
  167. settings['kuryr_openstack_sg_allow_from_namespace_id'] = data[
  168. 'sg_allow_from_namespace']
  169. settings['kuryr_openstack_pod_router_id'] = data['pod_router']
  170. settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']
  171. settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']
  172. settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']
  173. settings['kuryr_openstack_pod_project_id'] = (
  174. cloud_client.current_project_id)
  175. settings['kuryr_openstack_api_lb_ip'] = data['private_api_ip']
  176. settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']
  177. settings['kuryr_openstack_username'] = cloud_client.auth['username']
  178. settings['kuryr_openstack_password'] = cloud_client.auth['password']
  179. if 'user_domain_id' in cloud_client.auth:
  180. settings['kuryr_openstack_user_domain_name'] = (
  181. cloud_client.auth['user_domain_id'])
  182. else:
  183. settings['kuryr_openstack_user_domain_name'] = (
  184. cloud_client.auth['user_domain_name'])
  185. # FIXME(apuimedo): consolidate kuryr controller credentials into the same
  186. # vars the openstack playbook uses.
  187. settings['kuryr_openstack_project_id'] = cloud_client.current_project_id
  188. if 'project_domain_id' in cloud_client.auth:
  189. settings['kuryr_openstack_project_domain_name'] = (
  190. cloud_client.auth['project_domain_id'])
  191. else:
  192. settings['kuryr_openstack_project_domain_name'] = (
  193. cloud_client.auth['project_domain_name'])
  194. return settings
  195. def output_inventory(inventory, output_file):
  196. """Outputs inventory into a file in ini format"""
  197. config = ConfigParser.ConfigParser(allow_no_value=True)
  198. host_meta_vars = _get_host_meta_vars_as_dict(inventory)
  199. for key in sorted(inventory.keys()):
  200. if key == 'localhost':
  201. config.add_section('localhost')
  202. config.set('localhost', 'localhost')
  203. config.add_section('localhost:vars')
  204. for var, value in inventory['localhost'].items():
  205. config.set('localhost:vars', var, value)
  206. elif key not in ('localhost', '_meta'):
  207. if 'hosts' in inventory[key]:
  208. config.add_section(key)
  209. for host in inventory[key]['hosts']:
  210. if host in host_meta_vars.keys():
  211. config.set(key, host + " " + host_meta_vars[host])
  212. else:
  213. config.set(key, host)
  214. if 'vars' in inventory[key]:
  215. config.add_section(key + ":vars")
  216. for var, value in inventory[key]['vars'].items():
  217. config.set(key + ":vars", var, value)
  218. with open(output_file, 'w') as configfile:
  219. config.write(configfile)
  220. def _get_host_meta_vars_as_dict(inventory):
  221. """parse host meta vars from inventory as dict"""
  222. host_meta_vars = {}
  223. if '_meta' in inventory.keys():
  224. if 'hostvars' in inventory['_meta']:
  225. for host in inventory['_meta']['hostvars'].keys():
  226. host_meta_vars[host] = ' '.join(
  227. '{}={}'.format(key, val) for key, val in inventory['_meta']['hostvars'][host].items())
  228. return host_meta_vars
  229. def parse_args():
  230. """parse arguments to script"""
  231. parser = argparse.ArgumentParser(description="Create ansible inventory.")
  232. parser.add_argument('--static', type=str, default='',
  233. help='File to store a static inventory in.')
  234. parser.add_argument('--list', action="store_true", default=False,
  235. help='List inventory.')
  236. return parser.parse_args()
  237. def main(inventory_builder):
  238. """Ansible dynamic inventory entry point."""
  239. if parse_args().static:
  240. output_inventory(inventory_builder(), parse_args().static)
  241. else:
  242. print(json.dumps(inventory_builder(), indent=4, sort_keys=True))