Jelajahi Sumber

Merge pull request #6319 from celebdor/openstack_configure_kuryr

Automatic merge from submit-queue.

Kuryr var generation in OSt dynamic inventory

This PR adds the necessary network resources for Kuryr in the heat
stack, including putting a l4 load balancer in front of the API.

It then automatically uses the resources to generate the OSEv3 variables
needed for the already existing kuryr role.

Change-Id: I6fc4c6bc9835217334db1289987daf358dcf287b
Signed-off-by: Antoni Segura Puimedon <antonisp@celebdor.com>
OpenShift Merge Robot 7 tahun lalu
induk
melakukan
74ef4d93da

+ 12 - 0
playbooks/openstack/README.md

@@ -144,7 +144,19 @@ $ vi inventory/group_vars/all.yml
 4. Set the `openshift_openstack_default_flavor` to the flavor you want your
    OpenShift VMs to use.
    - See `openstack flavor list` for the list of available flavors.
+5. If you opt to use Kuryr for the networking, make sure that you review all
+   the kuryr options in the file. At the very least, if you use Kuryr, you
+   should uncomment:
 
+```bash
+#openshift_use_kuryr: True
+#use_trunk_ports: True
+#openshift_use_openshift_sdn: False
+#os_sdn_network_plugin_name: cni
+#openshift_node_proxy_mode: userspace
+#openshift_hosted_manage_registry: false
+#kuryr_openstack_public_subnet_id: uuid of my public subnet
+```
 
 
 #### OpenShift configuration

+ 114 - 42
playbooks/openstack/inventory.py

@@ -11,6 +11,7 @@ from __future__ import print_function
 
 from collections import Mapping
 import json
+import os
 
 import shade
 
@@ -58,6 +59,7 @@ def base_openshift_inventory(cluster_hosts):
     inventory['glusterfs'] = {'hosts': cns}
     inventory['dns'] = {'hosts': dns}
     inventory['lb'] = {'hosts': load_balancers}
+    inventory['localhost'] = {'ansible_connection': 'local'}
 
     return inventory
 
@@ -75,6 +77,49 @@ def get_docker_storage_mountpoints(volumes):
     return docker_storage_mountpoints
 
 
+def _get_hostvars(server, docker_storage_mountpoints):
+    ssh_ip_address = server.public_v4 or server.private_v4
+    hostvars = {
+        'ansible_host': ssh_ip_address
+    }
+
+    public_v4 = server.public_v4 or server.private_v4
+    if public_v4:
+        hostvars['public_v4'] = server.public_v4
+        hostvars['openshift_public_ip'] = server.public_v4
+    # TODO(shadower): what about multiple networks?
+    if server.private_v4:
+        hostvars['private_v4'] = server.private_v4
+        hostvars['openshift_ip'] = server.private_v4
+
+        # NOTE(shadower): Yes, we set both hostname and IP to the private
+        # IP address for each node. OpenStack doesn't resolve nodes by
+        # name at all, so using a hostname here would require an internal
+        # DNS which would complicate the setup and potentially introduce
+        # performance issues.
+        hostvars['openshift_hostname'] = server.metadata.get(
+            'openshift_hostname', server.private_v4)
+    hostvars['openshift_public_hostname'] = server.name
+
+    if server.metadata['host-type'] == 'cns':
+        hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
+
+    node_labels = server.metadata.get('node_labels')
+    # NOTE(shadower): the node_labels value must be a dict not string
+    if not isinstance(node_labels, Mapping):
+        node_labels = json.loads(node_labels)
+
+    if node_labels:
+        hostvars['openshift_node_labels'] = node_labels
+
+    # check for attached docker storage volumes
+    if 'os-extended-volumes:volumes_attached' in server:
+        if server.id in docker_storage_mountpoints:
+            hostvars['docker_storage_mountpoints'] = ' '.join(
+                docker_storage_mountpoints[server.id])
+    return hostvars
+
+
 def build_inventory():
     '''Build the dynamic inventory.'''
     cloud = shade.openstack_cloud()
@@ -97,51 +142,78 @@ def build_inventory():
     inventory['_meta'] = {'hostvars': {}}
 
     # cinder volumes used for docker storage
-    docker_storage_mountpoints = get_docker_storage_mountpoints(cloud.list_volumes())
-
+    docker_storage_mountpoints = get_docker_storage_mountpoints(
+        cloud.list_volumes())
     for server in cluster_hosts:
-        ssh_ip_address = server.public_v4 or server.private_v4
-        hostvars = {
-            'ansible_host': ssh_ip_address
-        }
-
-        public_v4 = server.public_v4 or server.private_v4
-        if public_v4:
-            hostvars['public_v4'] = server.public_v4
-            hostvars['openshift_public_ip'] = server.public_v4
-        # TODO(shadower): what about multiple networks?
-        if server.private_v4:
-            hostvars['private_v4'] = server.private_v4
-            hostvars['openshift_ip'] = server.private_v4
-
-            # NOTE(shadower): Yes, we set both hostname and IP to the private
-            # IP address for each node. OpenStack doesn't resolve nodes by
-            # name at all, so using a hostname here would require an internal
-            # DNS which would complicate the setup and potentially introduce
-            # performance issues.
-            hostvars['openshift_hostname'] = server.metadata.get(
-                'openshift_hostname', server.private_v4)
-        hostvars['openshift_public_hostname'] = server.name
-
-        if server.metadata['host-type'] == 'cns':
-            hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
-
-        node_labels = server.metadata.get('node_labels')
-        # NOTE(shadower): the node_labels value must be a dict not string
-        if not isinstance(node_labels, Mapping):
-            node_labels = json.loads(node_labels)
-
-        if node_labels:
-            hostvars['openshift_node_labels'] = node_labels
-
-        # check for attached docker storage volumes
-        if 'os-extended-volumes:volumes_attached' in server:
-            if server.id in docker_storage_mountpoints:
-                hostvars['docker_storage_mountpoints'] = ' '.join(docker_storage_mountpoints[server.id])
-
-        inventory['_meta']['hostvars'][server.name] = hostvars
+        inventory['_meta']['hostvars'][server.name] = _get_hostvars(
+            server,
+            docker_storage_mountpoints)
+
+    stout = _get_stack_outputs(cloud)
+    if stout is not None:
+        try:
+            inventory['localhost'].update({
+                'openshift_openstack_api_lb_provider':
+                stout['api_lb_provider'],
+                'openshift_openstack_api_lb_port_id':
+                stout['api_lb_vip_port_id'],
+                'openshift_openstack_api_lb_sg_id':
+                stout['api_lb_sg_id']})
+        except KeyError:
+            pass  # Not an API load balanced deployment
+
+        try:
+            inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)
+        except KeyError:
+            pass  # Not a kuryr deployment
     return inventory
 
 
+def _get_stack_outputs(cloud_client):
+    """Returns a dictionary with the stack outputs"""
+    cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')
+
+    stack = cloud_client.get_stack(cluster_name)
+    if stack is None or stack['stack_status'] not in (
+            'CREATE_COMPLETE', 'UPDATE_COMPLETE'):
+        return None
+
+    data = {}
+    for output in stack['outputs']:
+        data[output['output_key']] = output['output_value']
+    return data
+
+
+def _get_kuryr_vars(cloud_client, data):
+    """Returns a dictionary of Kuryr variables resulting of heat stacking"""
+    settings = {}
+    settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']
+    settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']
+    settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']
+    settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']
+    settings['kuryr_openstack_pod_project_id'] = (
+        cloud_client.current_project_id)
+
+    settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']
+    settings['kuryr_openstack_username'] = cloud_client.auth['username']
+    settings['kuryr_openstack_password'] = cloud_client.auth['password']
+    if 'user_domain_id' in cloud_client.auth:
+        settings['kuryr_openstack_user_domain_name'] = (
+            cloud_client.auth['user_domain_id'])
+    else:
+        settings['kuryr_openstack_user_domain_name'] = (
+            cloud_client.auth['user_domain_name'])
+    # FIXME(apuimedo): consolidate kuryr controller credentials into the same
+    #                  vars the openstack playbook uses.
+    settings['kuryr_openstack_project_id'] = cloud_client.current_project_id
+    if 'project_domain_id' in cloud_client.auth:
+        settings['kuryr_openstack_project_domain_name'] = (
+            cloud_client.auth['project_domain_id'])
+    else:
+        settings['kuryr_openstack_project_domain_name'] = (
+            cloud_client.auth['project_domain_name'])
+    return settings
+
+
 if __name__ == '__main__':
     print(json.dumps(build_inventory(), indent=4, sort_keys=True))

+ 3 - 0
playbooks/openstack/openshift-cluster/provision.yml

@@ -2,6 +2,9 @@
 - name: Create the OpenStack resources for cluster installation
   hosts: localhost
   tasks:
+  - name: retrieve cluster name from the environment if present
+    set_fact:
+      openshift_openstack_stack_name: "{{ lookup('env', 'OPENSHIFT_CLUSTER') | ternary (lookup('env', 'OPENSHIFT_CLUSTER'), omit) }}"
   - name: provision cluster
     import_role:
       name: openshift_openstack

+ 44 - 0
playbooks/openstack/sample-inventory/group_vars/all.yml

@@ -20,6 +20,50 @@ openshift_openstack_external_network_name: "public"
 # # NOTE: this is only supported with Flannel SDN yet
 #openstack_private_data_network_name: "openshift-ansible-{{ openshift_openstack_stack_name }}-data-net"
 
+## Kuryr networking
+# TODO: Allow the user to specify pre-existing subnets for pod and services
+#openshift_openstack_kuryr_service_subnet_cidr: "172.30.0.0/16"
+
+#
+## You can alter the port pooling defaults here
+#kuryr_openstack_enable_pools: True
+#kuryr_openstack_pool_max: 0
+#kuryr_openstack_pool_min: 1
+#kuryr_openstack_pool_batch: 5
+#kuryr_openstack_pool_update_frequency: 20
+#
+## You should set the following if you want to use Kuryr/Neutron as your SDN
+#openshift_use_kuryr: True
+#openshift_use_openshift_sdn: False
+
+# NOTE: you must uncomment these for Kuryr to work properly as well:
+# openshift_master_open_ports:
+# - service: dns tcp
+#   port: 53/tcp
+# - service: dns udp
+#   port: 53/udp
+# openshift_node_open_ports:
+# - service: dns tcp
+#   port: 53/tcp
+# - service: dns udp
+#   port: 53/udp
+
+#use_trunk_ports: True
+#os_sdn_network_plugin_name: cni
+#openshift_node_proxy_mode: userspace
+# # Kuryr needs to have the pod based registry (if desired in the cluster)
+# deployed after kuryr is up and running. This can be done with oadm
+# #Disable management of the OpenShift Registry
+#openshift_hosted_manage_registry: false
+# # Kuryr needs to know the subnet you will be taking Floating IPs for the
+# loadbalancer services from.
+# kuryr_openstack_public_subnet_id: uuid_of_my_fip_subnet
+
+# If you VM images will name the ethernet device different than 'eth0',
+# override this
+#kuryr_cni_link_interface: eth0
+
+
 ## If you want to use a provider network, set its name here.
 ## NOTE: the `openshift_openstack_external_network_name` and
 ## `openshift_openstack_private_network_name` options will be ignored when using a

+ 8 - 1
roles/kuryr/defaults/main.yaml

@@ -12,17 +12,24 @@ kuryr_openstack_user_domain_name: default
 kuryr_openstack_project_domain_name: default
 
 # Kuryr OpenShift namespace
-kuryr_namespace: kube-system
+kuryr_namespace: openshift-infra
 
 # Whether to run the cni plugin in debug mode
 kuryr_cni_debug: "false"
 
+# Default pod-in-VM link interface
+kuryr_cni_link_interface: eth0
+
 # The version of cni binaries
 cni_version: v0.5.2
 
 # Path to bin dir (where kuryr execs get installed)
 bin_dir: /usr/bin
 
+# Default controller and CNI images
+openshift_openstack_kuryr_controller_image: kuryr/controller:latest
+openshift_openstack_kuryr_cni_image: kuryr/cni:latest
+
 # Path to the cni binaries
 cni_bin_dir: /opt/cni/bin
 

+ 1 - 1
roles/kuryr/templates/cni-daemonset.yaml.j2

@@ -23,7 +23,7 @@ spec:
       serviceAccountName: kuryr-controller
       containers:
       - name: kuryr-cni
-        image: kuryr/cni:latest
+        image: {{ openshift_openstack_kuryr_cni_image }}
         imagePullPolicy: IfNotPresent
         command: [ "cni_ds_init" ]
         env:

+ 3 - 23
roles/kuryr/templates/configmap.yaml.j2

@@ -141,15 +141,7 @@ data:
     # Driver to use for binding and unbinding ports. (string value)
     # Deprecated group/name - [binding]/driver
     #default_driver = kuryr.lib.binding.drivers.veth
-
-    # Drivers to use for binding and unbinding ports. (list value)
-    #enabled_drivers = kuryr.lib.binding.drivers.veth
-
-    # Specifies the name of the Nova instance interface to link the virtual devices
-    # to (only applicable to some binding drivers. (string value)
-    link_iface = eth0
-
-    driver = kuryr.lib.binding.drivers.vlan
+    default_driver = kuryr.lib.binding.drivers.vlan
 
 
     [cni_daemon]
@@ -301,7 +293,7 @@ data:
     # TODO (apuimedo): Remove the duplicated line just after this one once the
     # RDO packaging contains the upstream patch
     worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
-    external_svc_subnet = {{ kuryr_openstack_external_svc_subnet_id }}
+    external_svc_subnet = {{ kuryr_openstack_public_subnet_id }}
 
     [pod_vif_nested]
 
@@ -466,21 +458,9 @@ data:
     # From kuryr_kubernetes
     #
 
-    # The name prefix of the veth endpoint put inside the container. (string value)
-    #veth_dst_prefix = eth
-
-    # Driver to use for binding and unbinding ports. (string value)
-    # Deprecated group/name - [binding]/driver
-    #default_driver = kuryr.lib.binding.drivers.veth
-
-    # Drivers to use for binding and unbinding ports. (list value)
-    #enabled_drivers = kuryr.lib.binding.drivers.veth
-
     # Specifies the name of the Nova instance interface to link the virtual devices
     # to (only applicable to some binding drivers. (string value)
-    link_iface = eth0
-
-    driver = kuryr.lib.binding.drivers.vlan
+    link_iface = {{ kuryr_cni_link_interface }}
 
 
     [cni_daemon]

+ 1 - 1
roles/kuryr/templates/controller-deployment.yaml.j2

@@ -19,7 +19,7 @@ spec:
       automountServiceAccountToken: true
       hostNetwork: true
       containers:
-      - image: kuryr/controller:latest
+      - image: {{ openshift_openstack_kuryr_controller_image }}
         imagePullPolicy: IfNotPresent
         name: controller
 {% if kuryr_openstack_enable_pools | default(false) %}

+ 2 - 1
roles/lib_utils/action_plugins/sanity_checks.py

@@ -15,7 +15,8 @@ NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True),
                    ('openshift_use_flannel', False),
                    ('openshift_use_nuage', False),
                    ('openshift_use_contiv', False),
-                   ('openshift_use_calico', False))
+                   ('openshift_use_calico', False),
+                   ('openshift_use_kuryr', False))
 
 ENTERPRISE_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
 v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,

+ 5 - 1
roles/openshift_openstack/defaults/main.yml

@@ -53,10 +53,14 @@ openshift_openstack_app_subdomain: "apps"
 
 # heat vars
 openshift_openstack_clusterid: openshift
-openshift_openstack_stack_name: "{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}"
+openshift_openstack_stack_name: "openshift-cluster"
 openshift_openstack_subnet_cidr: "192.168.99.0/24"
 openshift_openstack_pool_start: "192.168.99.3"
 openshift_openstack_pool_end: "192.168.99.254"
+openshift_openstack_kuryr_service_subnet_cidr: "172.30.0.0/16"
+openshift_openstack_kuryr_service_pool_start: "172.30.128.1"
+openshift_openstack_kuryr_service_pool_end: "172.30.255.253"
+openshift_openstack_kuryr_pod_subnet_cidr: "10.11.0.0/16"
 openshift_openstack_master_hostname: master
 openshift_openstack_infra_hostname: infra-node
 openshift_openstack_cns_hostname: cns

+ 105 - 0
roles/openshift_openstack/library/os_network_extensions.py

@@ -0,0 +1,105 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2018 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# pylint: disable=unused-wildcard-import,wildcard-import,unused-import,redefined-builtin
+
+''' os_network_extensions '''
+import keystoneauth1
+
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+    import shade
+    HAS_SHADE = True
+except ImportError:
+    HAS_SHADE = False
+
+DOCUMENTATION = '''
+---
+module: os_network_extensions_facts
+short_description: Retrieve OpenStack Networking extension facts
+description:
+    - Retrieves all the OpenStack Neutron available extensions
+notes:
+    - This module creates a new top-level C(openstack_network_extensions) fact
+      which contains a list of supported OpenStack Neutron extensions
+author:
+    - "Antoni Segura Puimedon <antoni@redhat.com>"
+'''
+
+RETURN = '''
+openstack_network_extensions:
+    description: List of available extensions in the Cloud Neutron
+    type: list
+    returned: always
+    sample:
+      - agent
+      - router
+      - subnet_allocation
+      - trunk
+'''
+
+
+def main():
+    ''' Main module function '''
+    module = AnsibleModule(argument_spec={}, supports_check_mode=True)
+
+    if not HAS_SHADE:
+        module.fail_json(msg='shade is required for this module')
+
+    try:
+        cloud = shade.openstack_cloud()
+    # pylint: disable=broad-except
+    except Exception:
+        module.fail_json(msg='Failed to connect to the cloud')
+
+    try:
+        adapter = keystoneauth1.adapter.Adapter(
+            session=cloud.keystone_session,
+            service_type=cloud.cloud_config.get_service_type('network'),
+            interface=cloud.cloud_config.get_interface('network'),
+            endpoint_override=cloud.cloud_config.get_endpoint('network'),
+            version=cloud.cloud_config.get_api_version('network'))
+    # pylint: disable=broad-except
+    except Exception:
+        module.fail_json(msg='Failed to get an adapter to talk to the Neutron '
+                             'API')
+
+    try:
+        response = adapter.get('/extensions.json')
+    # pylint: disable=broad-except
+    except Exception:
+        module.fail_json(msg='Failed to retrieve Neutron extensions')
+
+    extensions = []
+    try:
+        for ext_record in response.json()['extensions']:
+            extensions.append(ext_record['alias'])
+    # pylint: disable=broad-except
+    except Exception:
+        module.fail_json(msg='Failed to process cloud networking '
+                         'extensions')
+
+    module.exit_json(
+        changed=False,
+        ansible_facts={'openstack_network_extensions': extensions})
+
+
+if __name__ == '__main__':
+    main()

+ 16 - 0
roles/openshift_openstack/tasks/check-prerequisites.yml

@@ -17,6 +17,22 @@
     that: 'shade_result.rc == 0'
     msg: "Python module shade is not installed"
 
+# Gather Neutron extension facts
+- name: Check for Neutron trunk support
+  os_network_extensions:
+
+# Check trunk support
+- fail:
+    msg: "Trunk ports enabled but support lacking in Neutron"
+  when: (use_trunk_ports | default(False)) and
+        ('trunk' not in openstack_network_extensions)
+
+# Check lbaasv2 support
+- fail:
+    msg: "Kuryr enabled but lacking required lbaasv2 support in Neutron"
+  when: (openshift_use_kuryr | default(False)) and
+        ('lbaasv2' not in openstack_network_extensions)
+
 # Check jmespath
 - name: Try to import python module shade
   command: python -c "import jmespath"

+ 0 - 1
roles/openshift_openstack/tasks/node-network.yml

@@ -15,5 +15,4 @@
     name: NetworkManager
     state: restarted
     enabled: yes
-
 # TODO(shadower): add the flannel interface tasks from post-provision-openstack.yml

+ 9 - 0
roles/openshift_openstack/tasks/provision.yml

@@ -84,6 +84,15 @@
 - name: Add the new nodes to the inventory
   meta: refresh_inventory
 
+- name: Legacy LBaaSv2 SG OpenShift API correction
+  os_port:
+    state: present
+    name: "{{ openshift_openstack_api_lb_port_id }}"
+    security_groups:
+    - "{{ openshift_openstack_api_lb_sg_id }}"
+  when:
+  - openshift_openstack_api_lb_provider|default(None) == "haproxy"
+
 - name: CleanUp
   include_tasks: cleanup.yml
   when:

+ 181 - 2
roles/openshift_openstack/templates/heat_stack.yaml.j2

@@ -54,12 +54,130 @@ outputs:
     description: Floating IPs of the nodes
     value: { get_attr: [ infra_nodes, floating_ip ] }
 
+{% if openshift_use_kuryr|default(false)|bool %}
+  vm_subnet:
+    description: ID of the subnet the Pods will be on
+    value: { get_resource: subnet }
+
+  pod_subnet:
+    description: ID of the subnet the Pods will be on
+    value: { get_resource: pod_subnet }
+
+  service_subnet:
+    description: ID of the subnet the services will be on
+    value: { get_resource: service_subnet }
+
+  pod_access_sg_id:
+    description: Id of the security group for services to be able to reach pods
+    value: { get_resource: pod_access_sg }
+
+  api_lb_vip_port_id:
+    description: Id of the OpenShift API load balancer VIP port
+    value: { get_attr: [api_lb, vip_port_id] }
+
+  api_lb_sg_id:
+    description: Security Group Id of the OpenShift API load balancer VIP port
+    value: { get_resource: lb-secgrp }
+
+  api_lb_provider:
+    description: Id of the OpenShift API load balancer VIP port
+    value: { get_attr: [api_lb, show, provider] }
+{% endif %}
+
 conditions:
   no_floating: {% if openshift_openstack_provider_network_name %}true{% else %}false{% endif %}
 
 resources:
 
 {% if not openshift_openstack_provider_network_name %}
+{% if openshift_use_kuryr|default(false)|bool %}
+  api_lb:
+    type: OS::Neutron::LBaaS::LoadBalancer
+    properties:
+      name:
+        str_replace:
+          template: openshift-ansible-cluster_id-api-lb
+          params:
+            cluster_id: {{ openshift_openstack_stack_name }}
+      vip_address: {{ openshift_openstack_kuryr_service_subnet_cidr | ipaddr('1') | ipaddr('address') }}
+      vip_subnet: { get_resource: service_subnet }
+
+  api_lb_listener:
+    type: OS::Neutron::LBaaS::Listener
+    properties:
+      name:
+        str_replace:
+          template: openshift-ansible-cluster_id-api-lb-listener
+          params:
+            cluster_id: {{ openshift_openstack_stack_name }}
+      loadbalancer: { get_resource: api_lb }
+      protocol: HTTPS
+      protocol_port: 443
+
+  api_lb_pool:
+    type: OS::Neutron::LBaaS::Pool
+    properties:
+      name:
+        str_replace:
+          template: openshift-ansible-cluster_id-api-lb-pool
+          params:
+            cluster_id: {{ openshift_openstack_stack_name }}
+      protocol: HTTPS
+      lb_algorithm: ROUND_ROBIN
+      listener: { get_resource: api_lb_listener }
+
+  pod_net:
+    type: OS::Neutron::Net
+    properties:
+      name:
+        str_replace:
+          template: openshift-ansible-cluster_id-pod-net
+          params:
+            cluster_id: {{ openshift_openstack_stack_name }}
+
+  pod_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network_id: { get_resource: pod_net }
+      cidr: {{ openshift_openstack_kuryr_pod_subnet_cidr }}
+      enable_dhcp: False
+      name:
+        str_replace:
+          template: openshift-ansible-cluster_id-pod-subnet
+          params:
+            cluster_id: {{ openshift_openstack_stack_name }}
+      dns_nameservers:
+{% for nameserver in openshift_openstack_dns_nameservers %}
+        - {{ nameserver }}
+{% endfor %}
+
+  service_net:
+    type: OS::Neutron::Net
+    properties:
+      name:
+        str_replace:
+          template: openshift-ansible-cluster_id-service-net
+          params:
+            cluster_id: {{ openshift_openstack_stack_name }}
+
+  service_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network_id: { get_resource: service_net }
+      cidr: {{ openshift_openstack_kuryr_service_subnet_cidr }}
+      gateway_ip: {{ openshift_openstack_kuryr_service_subnet_cidr | ipaddr('-2') | ipaddr('address') }}
+      enable_dhcp: False
+      allocation_pools:
+        - start: {{ openshift_openstack_kuryr_service_pool_start }}
+          end: {{ openshift_openstack_kuryr_service_pool_end }}
+      name:
+        str_replace:
+          template: openshift-ansible-cluster_id-service-subnet
+          params:
+            cluster_id: {{ openshift_openstack_stack_name }}
+
+{% endif %}
+
   net:
     type: OS::Neutron::Net
     properties:
@@ -120,6 +238,33 @@ resources:
       router_id: { get_resource: router }
       subnet_id: { get_resource: subnet }
 
+{% if openshift_use_kuryr|default(false)|bool %}
+  pod_subnet_interface:
+    type: OS::Neutron::RouterInterface
+    properties:
+      router_id: { get_resource: router }
+      subnet_id: { get_resource: pod_subnet }
+
+  service_router_port:
+      type: OS::Neutron::Port
+      properties:
+        network: { get_resource: service_net}
+        fixed_ips:
+          - subnet: { get_resource: service_subnet }
+            ip_address: {{ openshift_openstack_kuryr_service_subnet_cidr | ipaddr('-2') | ipaddr('address') }}
+        name:
+          str_replace:
+            template: openshift-ansible-cluster_id-service-subnet-router-port
+            params:
+              cluster_id: {{ openshift_openstack_stack_name }}
+
+  service_subnet_interface:
+    type: OS::Neutron::RouterInterface
+    properties:
+      router_id: { get_resource: router }
+      port: { get_resource: service_router_port }
+{% endif %}
+
 {% endif %}
 
 #  keypair:
@@ -155,6 +300,25 @@ resources:
           protocol: icmp
           remote_ip_prefix: {{ openshift_openstack_ssh_ingress_cidr }}
 
+{% if openshift_use_kuryr|default(false)|bool %}
+  pod_access_sg:
+    type: OS::Neutron::SecurityGroup
+    properties:
+      name:
+        str_replace:
+          template: openshift-ansible-cluster_id-pod-service-secgrp
+          params:
+            cluster_id: {{ openshift_openstack_stack_name }}
+      description: Give services and nodes access to the pods
+      rules:
+      - ethertype: IPv4
+        remote_ip_prefix: {{ openshift_openstack_kuryr_service_subnet_cidr }}
+      - ethertype: IPv4
+        remote_ip_prefix: {{ openshift_openstack_subnet_cidr }}
+      - ethertype: IPv4
+        remote_mode: remote_group_id
+{% endif %}
+
 {% if openshift_openstack_flat_secgrp|default(False)|bool %}
   flat-secgrp:
     type: OS::Neutron::SecurityGroup
@@ -352,6 +516,15 @@ resources:
           params:
             cluster_id: {{ openshift_openstack_stack_name }}
       rules:
+        # NOTE(shadower): the 53 rules are needed for Kuryr
+        - direction: ingress
+          protocol: tcp
+          port_range_min: 53
+          port_range_max: 53
+        - direction: ingress
+          protocol: udp
+          port_range_min: 53
+          port_range_max: 53
         - direction: ingress
           protocol: tcp
           port_range_min: 10250
@@ -451,7 +624,6 @@ resources:
           port_range_min: 49152
           port_range_max: 49251
 
-{% if openshift_openstack_num_masters|int > 1 %}
   lb-secgrp:
     type: OS::Neutron::SecurityGroup
     properties:
@@ -460,6 +632,11 @@ resources:
       rules:
       - direction: ingress
         protocol: tcp
+        port_range_min: 443
+        port_range_max: 443
+        remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr }}
+      - direction: ingress
+        protocol: tcp
         port_range_min: {{ openshift_master_api_port | default(8443) }}
         port_range_max: {{ openshift_master_api_port | default(8443) }}
         remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr }}
@@ -470,7 +647,6 @@ resources:
         port_range_max: {{ openshift_master_console_port | default(8443) }}
         remote_ip_prefix: {{ openshift_openstack_lb_ingress_cidr }}
 {% endif %}
-{% endif %}
 
   etcd:
     type: OS::Heat::ResourceGroup
@@ -638,6 +814,9 @@ resources:
           data_subnet: { get_resource: data_subnet }
 {% endif %}
 {% endif %}
+{% if openshift_use_kuryr|default(false)|bool %}
+          api_lb_pool: { get_resource: api_lb_pool }
+{% endif %}
           secgrp:
 {% if openshift_openstack_flat_secgrp|default(False)|bool %}
             - { get_resource: flat-secgrp }

+ 23 - 0
roles/openshift_openstack/templates/heat_stack_server.yaml.j2

@@ -95,6 +95,14 @@ parameters:
     label: Security groups
     description: Security group resources
 
+{% if openshift_use_kuryr|default(false)|bool %}
+  api_lb_pool:
+    default: ''
+    type: string
+    label: API LoadBalancer pool ID
+    description: API Loadbalancer pool resource
+{% endif %}
+
   attach_float_net:
     type: boolean
     default: true
@@ -270,3 +278,18 @@ resources:
       volume_id: { get_resource: cinder_volume }
       instance_uuid: { get_resource: server }
 {% endif %}
+
+
+{% if openshift_use_kuryr|default(false)|bool %}
+  lb_member:
+    type: OS::Neutron::LBaaS::PoolMember
+    condition:
+      equals:
+        - get_param: type
+        - master
+    properties:
+      pool: { get_param: api_lb_pool }
+      protocol_port: {{ openshift_master_api_port|default(8443) }}
+      address: { get_attr: [server, first_address]}
+      subnet: { get_param: subnet }
+{% endif %}