resources.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. #!/usr/bin/env python
  2. """
  3. This library is used by the OpenStack's dynamic inventories.
  4. It produces the inventory in a Python dict structure based on the current
  5. environment.
  6. """
  7. from __future__ import print_function
  8. import argparse
  9. import json
  10. import os
  11. try:
  12. import ConfigParser
  13. except ImportError:
  14. import configparser as ConfigParser
  15. from keystoneauth1.exceptions.catalog import EndpointNotFound
  16. import shade
  17. OPENSHIFT_CLUSTER = os.getenv('OPENSHIFT_CLUSTER')
  18. def base_openshift_inventory(cluster_hosts):
  19. '''Set the base openshift inventory.'''
  20. inventory = {}
  21. masters = [server.name for server in cluster_hosts
  22. if server.metadata['host-type'] == 'master']
  23. etcd = [server.name for server in cluster_hosts
  24. if server.metadata['host-type'] == 'etcd']
  25. if not etcd:
  26. etcd = masters
  27. infra_hosts = [server.name for server in cluster_hosts
  28. if server.metadata['host-type'] == 'node' and
  29. server.metadata['sub-host-type'] == 'infra']
  30. app = [server.name for server in cluster_hosts
  31. if server.metadata['host-type'] == 'node' and
  32. server.metadata['sub-host-type'] == 'app']
  33. cns = [server.name for server in cluster_hosts
  34. if server.metadata['host-type'] == 'cns']
  35. load_balancers = [server.name for server in cluster_hosts
  36. if server.metadata['host-type'] == 'lb']
  37. # NOTE: everything that should go to the `[nodes]` group:
  38. nodes = list(set(masters + infra_hosts + app + cns))
  39. # NOTE: all OpenShift nodes + any "supporting" roles,
  40. # i.e.: `[etcd]`, `[lb]`, `[nfs]`, etc.:
  41. osev3 = list(set(nodes + etcd + load_balancers))
  42. inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}
  43. inventory['openstack_nodes'] = {'hosts': nodes}
  44. inventory['openstack_master_nodes'] = {'hosts': masters}
  45. inventory['openstack_etcd_nodes'] = {'hosts': etcd}
  46. inventory['openstack_infra_nodes'] = {'hosts': infra_hosts}
  47. inventory['openstack_compute_nodes'] = {'hosts': app}
  48. inventory['openstack_cns_nodes'] = {'hosts': cns}
  49. inventory['lb'] = {'hosts': load_balancers}
  50. inventory['localhost'] = {'ansible_connection': 'local'}
  51. return inventory
  52. def get_docker_storage_mountpoints(volumes):
  53. '''Check volumes to see if they're being used for docker storage'''
  54. docker_storage_mountpoints = {}
  55. for volume in volumes:
  56. if volume.metadata.get('purpose') == "openshift_docker_storage":
  57. for attachment in volume.attachments:
  58. if attachment.server_id in docker_storage_mountpoints:
  59. docker_storage_mountpoints[attachment.server_id].append(attachment.device)
  60. else:
  61. docker_storage_mountpoints[attachment.server_id] = [attachment.device]
  62. return docker_storage_mountpoints
  63. def _get_hostvars(server, docker_storage_mountpoints):
  64. ssh_ip_address = server.public_v4 or server.private_v4
  65. hostvars = {
  66. 'ansible_host': ssh_ip_address
  67. }
  68. public_v4 = server.public_v4 or server.private_v4
  69. private_v4 = server.private_v4 or server.public_v4
  70. if public_v4:
  71. hostvars['public_v4'] = public_v4
  72. hostvars['openshift_public_ip'] = public_v4
  73. # TODO(shadower): what about multiple networks?
  74. if private_v4:
  75. hostvars['private_v4'] = private_v4
  76. hostvars['openshift_ip'] = private_v4
  77. hostvars['openshift_public_hostname'] = server.name
  78. if server.metadata['host-type'] == 'cns':
  79. hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
  80. group_name = server.metadata.get('openshift_node_group_name')
  81. hostvars['openshift_node_group_name'] = group_name
  82. # check for attached docker storage volumes
  83. if 'os-extended-volumes:volumes_attached' in server:
  84. if server.id in docker_storage_mountpoints:
  85. hostvars['docker_storage_mountpoints'] = ' '.join(
  86. docker_storage_mountpoints[server.id])
  87. return hostvars
  88. def build_inventory():
  89. '''Build the dynamic inventory.'''
  90. cloud = shade.openstack_cloud()
  91. # Use an environment variable to optionally skip returning the app nodes.
  92. show_compute_nodes = os.environ.get('OPENSTACK_SHOW_COMPUTE_NODES', 'true').lower() == "true"
  93. # If `OPENSHIFT_CLUSTER` env variable is defined then it's used to
  94. # filter servers by metadata.clusterid attribute value.
  95. cluster_hosts = [
  96. server for server in cloud.list_servers()
  97. if 'clusterid' in server.get('metadata', []) and
  98. (OPENSHIFT_CLUSTER is None or server.metadata.clusterid == OPENSHIFT_CLUSTER) and
  99. (show_compute_nodes or server.metadata.get('sub-host-type') != 'app')]
  100. inventory = base_openshift_inventory(cluster_hosts)
  101. inventory['_meta'] = {'hostvars': {}}
  102. # Some clouds don't have Cinder. That's okay:
  103. try:
  104. volumes = cloud.list_volumes()
  105. except EndpointNotFound:
  106. volumes = []
  107. # cinder volumes used for docker storage
  108. docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)
  109. for server in cluster_hosts:
  110. inventory['_meta']['hostvars'][server.name] = _get_hostvars(
  111. server,
  112. docker_storage_mountpoints)
  113. stout = _get_stack_outputs(cloud)
  114. if stout is not None:
  115. try:
  116. inventory['localhost'].update({
  117. 'openshift_openstack_api_lb_provider':
  118. stout['api_lb_provider'],
  119. 'openshift_openstack_api_lb_port_id':
  120. stout['api_lb_vip_port_id'],
  121. 'openshift_openstack_api_lb_sg_id':
  122. stout['api_lb_sg_id']})
  123. except KeyError:
  124. pass # Not an API load balanced deployment
  125. try:
  126. inventory['OSEv3']['vars'][
  127. 'openshift_master_cluster_hostname'] = stout['private_api_ip']
  128. except KeyError:
  129. pass # Internal LB not specified
  130. inventory['localhost']['openshift_openstack_private_api_ip'] = \
  131. stout.get('private_api_ip')
  132. inventory['localhost']['openshift_openstack_public_api_ip'] = \
  133. stout.get('public_api_ip')
  134. inventory['localhost']['openshift_openstack_public_router_ip'] = \
  135. stout.get('public_router_ip')
  136. try:
  137. inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)
  138. except KeyError:
  139. pass # Not a kuryr deployment
  140. return inventory
  141. def _get_stack_outputs(cloud_client):
  142. """Returns a dictionary with the stack outputs"""
  143. cluster_name = OPENSHIFT_CLUSTER or 'openshift-cluster'
  144. stack = cloud_client.get_stack(cluster_name)
  145. if stack is None or stack['stack_status'] not in (
  146. 'CREATE_COMPLETE', 'UPDATE_COMPLETE'):
  147. return None
  148. data = {}
  149. for output in stack['outputs']:
  150. data[output['output_key']] = output['output_value']
  151. return data
  152. def _get_kuryr_vars(cloud_client, data):
  153. """Returns a dictionary of Kuryr variables resulting of heat stacking"""
  154. settings = {}
  155. settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']
  156. if 'pod_subnet_pool' in data:
  157. settings['kuryr_openstack_pod_subnet_pool_id'] = data[
  158. 'pod_subnet_pool']
  159. if 'sg_allow_from_default' in data:
  160. settings['kuryr_openstack_sg_allow_from_default_id'] = data[
  161. 'sg_allow_from_default']
  162. if 'sg_allow_from_namespace' in data:
  163. settings['kuryr_openstack_sg_allow_from_namespace_id'] = data[
  164. 'sg_allow_from_namespace']
  165. settings['kuryr_openstack_pod_router_id'] = data['pod_router']
  166. settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']
  167. settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']
  168. settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']
  169. settings['kuryr_openstack_pod_project_id'] = (
  170. cloud_client.current_project_id)
  171. settings['kuryr_openstack_api_lb_ip'] = data['private_api_ip']
  172. settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']
  173. settings['kuryr_openstack_username'] = cloud_client.auth['username']
  174. settings['kuryr_openstack_password'] = cloud_client.auth['password']
  175. if 'user_domain_id' in cloud_client.auth:
  176. settings['kuryr_openstack_user_domain_name'] = (
  177. cloud_client.auth['user_domain_id'])
  178. else:
  179. settings['kuryr_openstack_user_domain_name'] = (
  180. cloud_client.auth['user_domain_name'])
  181. # FIXME(apuimedo): consolidate kuryr controller credentials into the same
  182. # vars the openstack playbook uses.
  183. settings['kuryr_openstack_project_id'] = cloud_client.current_project_id
  184. if 'project_domain_id' in cloud_client.auth:
  185. settings['kuryr_openstack_project_domain_name'] = (
  186. cloud_client.auth['project_domain_id'])
  187. else:
  188. settings['kuryr_openstack_project_domain_name'] = (
  189. cloud_client.auth['project_domain_name'])
  190. return settings
  191. def output_inventory(inventory, output_file):
  192. """Outputs inventory into a file in ini format"""
  193. config = ConfigParser.ConfigParser(allow_no_value=True)
  194. host_meta_vars = _get_host_meta_vars_as_dict(inventory)
  195. for key in sorted(inventory.keys()):
  196. if key == 'localhost':
  197. config.add_section('localhost')
  198. config.set('localhost', 'localhost')
  199. config.add_section('localhost:vars')
  200. for var, value in inventory['localhost'].items():
  201. config.set('localhost:vars', var, value)
  202. elif key not in ('localhost', '_meta'):
  203. if 'hosts' in inventory[key]:
  204. config.add_section(key)
  205. for host in inventory[key]['hosts']:
  206. if host in host_meta_vars.keys():
  207. config.set(key, host + " " + host_meta_vars[host])
  208. else:
  209. config.set(key, host)
  210. if 'vars' in inventory[key]:
  211. config.add_section(key + ":vars")
  212. for var, value in inventory[key]['vars'].items():
  213. config.set(key + ":vars", var, value)
  214. with open(output_file, 'w') as configfile:
  215. config.write(configfile)
  216. def _get_host_meta_vars_as_dict(inventory):
  217. """parse host meta vars from inventory as dict"""
  218. host_meta_vars = {}
  219. if '_meta' in inventory.keys():
  220. if 'hostvars' in inventory['_meta']:
  221. for host in inventory['_meta']['hostvars'].keys():
  222. host_meta_vars[host] = ' '.join(
  223. '{}={}'.format(key, val) for key, val in inventory['_meta']['hostvars'][host].items())
  224. return host_meta_vars
  225. def parse_args():
  226. """parse arguments to script"""
  227. parser = argparse.ArgumentParser(description="Create ansible inventory.")
  228. parser.add_argument('--static', type=str, default='',
  229. help='File to store a static inventory in.')
  230. parser.add_argument('--list', action="store_true", default=False,
  231. help='List inventory.')
  232. return parser.parse_args()
  233. def main(inventory_builder):
  234. """Ansible dynamic inventory entry point."""
  235. if parse_args().static:
  236. output_inventory(inventory_builder(), parse_args().static)
  237. else:
  238. print(json.dumps(inventory_builder(), indent=4, sort_keys=True))