소스 검색

Merge pull request #9243 from tomassedovic/openstack-master-scaleup

Add OpenStack master and node scaleup
Scott Dodson 6 년 전
부모
커밋
1a975b11f7

+ 37 - 0
playbooks/openstack/configuration.md

@@ -938,6 +938,43 @@ Where `<count>` is the number of the pods you want (i.e. the number of your
 infra nodes).
 
 
+### Scaling the Master Nodes
+
+Adding master nodes is similar to adding compute/infra nodes, but we need to
+run a different playbook at the end.
+
+You must have a fully working OpenShift cluster before you start scaling.
+
+#### 1. Adding Extra Master Nodes
+
+Edit your `inventory/group_vars/all.yml` and set the new master node total in
+`openshift_openstack_num_masters`.
+
+For example if you started with a single master node and you want to add two
+more (for a grand total of three), you should set:
+
+    openshift_openstack_num_masters: 3
+
+#### 2. Scaling the Cluster
+
+Then run the `master-scaleup.yml` playbook:
+
+```
+$ ansible-playbook --user openshift \
+  -i openshift-ansible/playbooks/openstack/inventory.py \
+  -i inventory \
+  openshift-ansible/playbooks/openstack/openshift-cluster/master-scaleup.yml
+```
+
+This will create the new OpenStack nodes, optionally create the DNS records
+and subscribe them to RHN, configure the `new_masters`, `new_nodes` and
+`new_etcd` groups and run the OpenShift master scaleup tasks.
+
+When the playbook finishes, you should have new master nodes up and running.
+
+Run `oc get nodes` to verify.
+
+
 ## Deploying At Scale
 
 By default, heat stack outputs are resolved.  This may cause

+ 11 - 21
playbooks/openstack/inventory.py

@@ -44,25 +44,22 @@ def base_openshift_inventory(cluster_hosts):
     cns = [server.name for server in cluster_hosts
            if server.metadata['host-type'] == 'cns']
 
-    nodes = list(set(masters + infra_hosts + app + cns))
-
-    dns = [server.name for server in cluster_hosts
-           if server.metadata['host-type'] == 'dns']
-
     load_balancers = [server.name for server in cluster_hosts
                       if server.metadata['host-type'] == 'lb']
 
-    osev3 = list(set(nodes + etcd + load_balancers))
+    # NOTE: everything that should go to the `[nodes]` group:
+    nodes = list(set(masters + etcd + infra_hosts + app + cns))
+
+    # NOTE: all OpenShift nodes, including `[lb]`, `[nfs]`, etc.:
+    osev3 = list(set(nodes + load_balancers))
 
-    inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}
     inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}
-    inventory['masters'] = {'hosts': masters}
-    inventory['etcd'] = {'hosts': etcd}
-    inventory['nodes'] = {'hosts': nodes}
-    inventory['infra_hosts'] = {'hosts': infra_hosts}
-    inventory['app'] = {'hosts': app}
-    inventory['glusterfs'] = {'hosts': cns}
-    inventory['dns'] = {'hosts': dns}
+    inventory['openstack_nodes'] = {'hosts': nodes}
+    inventory['openstack_master_nodes'] = {'hosts': masters}
+    inventory['openstack_etcd_nodes'] = {'hosts': etcd}
+    inventory['openstack_infra_nodes'] = {'hosts': infra_hosts}
+    inventory['openstack_compute_nodes'] = {'hosts': app}
+    inventory['openstack_cns_nodes'] = {'hosts': cns}
     inventory['lb'] = {'hosts': load_balancers}
     inventory['localhost'] = {'ansible_connection': 'local'}
 
@@ -132,13 +129,6 @@ def build_inventory():
 
     inventory = base_openshift_inventory(cluster_hosts)
 
-    for server in cluster_hosts:
-        if 'group' in server.metadata:
-            group = server.metadata.get('group')
-            if group not in inventory:
-                inventory[group] = {'hosts': []}
-            inventory[group]['hosts'].append(server.name)
-
     inventory['_meta'] = {'hostvars': {}}
 
     # Some clouds don't have Cinder. That's okay:

+ 1 - 1
playbooks/openstack/openshift-cluster/configure-new-nodes.yml

@@ -3,7 +3,7 @@
 # that it runs on the new nodes only.
 - name: Prepare the New Nodes in the cluster for installation
   any_errors_fatal: true
-  hosts: new_nodes:new_masters
+  hosts: new_nodes:new_masters:new_etcd
   become: yes
   gather_facts: yes
   tasks:

+ 136 - 0
playbooks/openstack/openshift-cluster/evaluate_groups.yml

@@ -0,0 +1,136 @@
+---
+# NOTE: to support scaling, the `masters` and `new_masters` (and analogous for
+# `nodes` and `etcd`) groups must be exclusive.
+#
+# Since Ansible cannot remove a node from a group, the dynamic inventory
+# can't e.g. tag all master nodes as part of the `masters` group and then
+# add the new ones to `new_masters`. Creating new hosts means refreshing
+# the inventory and that would mean bringing all the nodes (old and new)
+# into the `masters` group. And that causes the scaleup playbook to fail.
+#
+# And since the playbooks can't pass data to the dynamic inventory, this
+# new/old separation cannot happen there.
+#
+# So the inventory sets e.g. `openstack_master_nodes` and this playbook
+# configures the actual source groups such as `masters`, `new_masters`,
+# `nodes`, `etcd`, etc.
+- name: Evaluate the OpenStack groups
+  any_errors_fatal: true
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  become: no
+  tasks:
+  # This will happen when we're deploying a cluster from scratch. Add all
+  # nodes to the `masters` group.
+  - name: Create a brand new masters group (no scaling)
+    add_host:
+      name: "{{ item }}"
+      groups: masters
+    with_items: "{{ groups.openstack_master_nodes | default([]) }}"
+    changed_when: no
+    when: >
+      openshift_openstack_existing is undefined or
+      openshift_openstack_existing.openstack_master_nodes is undefined or
+      openshift_openstack_existing.openstack_master_nodes | length == 0
+
+  # This will happen when we are scaling an existing cluster. Add the current
+  # nodes to the `masters` groups.
+  - name: Create pre-existing masters group
+    add_host:
+      name: "{{ item }}"
+      groups: masters
+    with_items: "{{ openshift_openstack_existing.openstack_master_nodes }}"
+    changed_when: no
+    when:
+    - openshift_openstack_existing is defined
+    - openshift_openstack_existing.openstack_master_nodes is defined
+    - openshift_openstack_existing.openstack_master_nodes | length > 0
+
+  # This will happen when we are scaling an existing cluster. Add the
+  # newly-created nodes to the `new_masters` group.
+  - name: Create new_masters group
+    add_host:
+      name: "{{ item }}"
+      groups: new_masters
+    with_items: "{{ groups.openstack_master_nodes | default([]) | difference(groups.masters) }}"
+    changed_when: no
+    when:
+    - openshift_openstack_existing is defined
+    - openshift_openstack_existing.openstack_master_nodes is defined
+    - openshift_openstack_existing.openstack_master_nodes | length > 0
+
+  - name: Create a brand new etcd group (no scaling)
+    add_host:
+      name: "{{ item }}"
+      groups: etcd
+    with_items: "{{ groups.openstack_etcd_nodes | default([]) }}"
+    changed_when: no
+    when: >
+      openshift_openstack_existing is undefined or
+      openshift_openstack_existing.openstack_etcd_nodes is undefined or
+      openshift_openstack_existing.openstack_etcd_nodes | length == 0
+
+  - name: Create pre-existing etcd group
+    add_host:
+      name: "{{ item }}"
+      groups: etcd
+    with_items: "{{ openshift_openstack_existing.openstack_etcd_nodes }}"
+    changed_when: no
+    when:
+    - openshift_openstack_existing is defined
+    - openshift_openstack_existing.openstack_etcd_nodes is defined
+    - openshift_openstack_existing.openstack_etcd_nodes | length > 0
+
+  - name: Create new_etcd group
+    add_host:
+      name: "{{ item }}"
+      groups: new_etcd
+    with_items: "{{ groups.openstack_etcd_nodes | default([]) | difference(groups.etcd) }}"
+    changed_when: no
+    when:
+    - openshift_openstack_existing is defined
+    - openshift_openstack_existing.openstack_etcd_nodes is defined
+    - openshift_openstack_existing.openstack_etcd_nodes | length > 0
+
+  - name: Create a brand new nodes group (no scaling)
+    add_host:
+      name: "{{ item }}"
+      groups: nodes
+    with_items: "{{ groups.openstack_nodes | default([]) }}"
+    changed_when: no
+    when: >
+      openshift_openstack_existing is undefined or
+      openshift_openstack_existing.openstack_nodes is undefined or
+      openshift_openstack_existing.openstack_nodes | length == 0
+
+  - name: Create pre-existing nodes group
+    add_host:
+      name: "{{ item }}"
+      groups: nodes
+    with_items: "{{ openshift_openstack_existing.openstack_nodes }}"
+    changed_when: no
+    when:
+    - openshift_openstack_existing is defined
+    - openshift_openstack_existing.openstack_nodes is defined
+    - openshift_openstack_existing.openstack_nodes | length > 0
+
+  - name: Create new_nodes group
+    add_host:
+      name: "{{ item }}"
+      groups: new_nodes
+    with_items: "{{ groups.openstack_nodes | default([]) | difference(groups.nodes) }}"
+    changed_when: no
+    when:
+    - openshift_openstack_existing is defined
+    - openshift_openstack_existing.openstack_nodes is defined
+    - openshift_openstack_existing.openstack_nodes | length > 0
+
+  # TODO(shadower): Do we need to add `new_glusterfs` as well? I saw no
+  # mention in the code.
+  - name: Create glusterfs group
+    add_host:
+      name: "{{ item }}"
+      groups: glusterfs
+    with_items: "{{ groups.openstack_cns_nodes | default([]) }}"
+    changed_when: no

+ 3 - 0
playbooks/openstack/openshift-cluster/install.yml

@@ -8,6 +8,9 @@
 # values here. We do it in the OSEv3 group vars. Do we need to add
 # some logic here?
 
+- name: Evaluate basic OpenStack groups
+  import_playbook: evaluate_groups.yml
+
 - import_playbook: ../../prerequisites.yml
 
 - name: Prepare the Nodes in the cluster for installation

+ 85 - 0
playbooks/openstack/openshift-cluster/master-scaleup.yml

@@ -0,0 +1,85 @@
+---
+- name: Save groups before scaling
+  any_errors_fatal: true
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  become: no
+  tasks:
+  - name: Save the node groups to openshift_openstack_existing
+    set_fact:
+      openshift_openstack_existing: "{{ groups }}"
+
+- name: Create the new OpenStack resources
+  import_playbook: provision_resources.yml
+
+- name: Set the new_nodes and new_masters groups
+  import_playbook: evaluate_groups.yml
+
+- name: Evaluate remaining cluster groups
+  import_playbook: ../../init/evaluate_groups.yml
+
+- name: Wait for the new nodes and gather their facts
+  any_errors_fatal: true
+  hosts: new_nodes:new_masters:new_etcd
+  become: yes
+  # NOTE: The nodes may not be up yet, don't gather facts here.
+  # They'll be collected after `wait_for_connection`.
+  gather_facts: no
+  tasks:
+  - name: Wait for the the new nodes to come up
+    wait_for_connection:
+
+  - name: Gather facts for the new nodes
+    setup:
+
+
+- name: Populate the DNS entries for the new nodes
+  any_errors_fatal: true
+  hosts: localhost
+  tasks:
+  - name: Add DNS records for the newly created nodes
+    import_role:
+      name: openshift_openstack
+      tasks_from: populate-dns.yml
+    vars:
+      l_openshift_openstack_dns_update_nodes: "{{ groups.new_nodes }}"
+    when:
+    - openshift_openstack_external_nsupdate_keys is defined
+    - openshift_openstack_external_nsupdate_keys.private is defined or openshift_openstack_external_nsupdate_keys.public is defined
+
+- import_playbook: ../../init/basic_facts.yml
+
+- name: Optionally subscribe the RHEL nodes
+  any_errors_fatal: true
+  hosts: new_nodes:new_masters:new_etcd
+  become: yes
+  gather_facts: yes
+  tasks:
+  - name: Subscribe RHEL instances
+    import_role:
+      name: rhel_subscribe
+    when:
+    - ansible_distribution == "RedHat"
+    - rhsub_user is defined
+    - rhsub_pass is defined
+
+- name: Configure the new OpenStack nodes
+  import_playbook: configure-new-nodes.yml
+
+
+- import_playbook: ../../prerequisites.yml
+  vars:
+    l_scale_up_hosts: "oo_nodes_to_config:oo_masters_to_config"
+    l_base_packages_hosts: "oo_nodes_to_config:oo_masters_to_config"
+    l_init_fact_hosts: "oo_masters:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
+    l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters']) }}"
+
+- import_playbook: ../../init/version.yml
+  vars:
+    l_openshift_version_set_hosts: "oo_masters_to_config:oo_nodes_to_config:!oo_first_master"
+
+- import_playbook: ../../openshift-master/private/scaleup.yml
+
+- name: Show information about the deployed cluster
+  import_playbook: cluster-info.yml

+ 86 - 0
playbooks/openstack/openshift-cluster/node-scaleup.yml

@@ -0,0 +1,86 @@
+---
+- name: Save groups before scaling
+  any_errors_fatal: true
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  become: no
+  tasks:
+  - name: Save the node groups to openshift_openstack_existing
+    set_fact:
+      openshift_openstack_existing: "{{ groups }}"
+
+- name: Create the new OpenStack resources
+  import_playbook: provision_resources.yml
+
+- name: Set the new_nodes groups
+  import_playbook: evaluate_groups.yml
+
+- name: Evaluate remaining cluster groups
+  import_playbook: ../../init/evaluate_groups.yml
+
+- name: Wait for the new nodes and gather their facts
+  any_errors_fatal: true
+  hosts: new_nodes
+  become: yes
+  # NOTE: The nodes may not be up yet, don't gather facts here.
+  # They'll be collected after `wait_for_connection`.
+  gather_facts: no
+  tasks:
+  - name: Wait for the the new nodes to come up
+    wait_for_connection:
+
+  - name: Gather facts for the new nodes
+    setup:
+
+
+- name: Populate the DNS entries for the new nodes
+  any_errors_fatal: true
+  hosts: localhost
+  tasks:
+  - name: Add DNS records for the newly created nodes
+    import_role:
+      name: openshift_openstack
+      tasks_from: populate-dns.yml
+    vars:
+      l_openshift_openstack_dns_update_nodes: "{{ groups.new_nodes }}"
+    when:
+    - openshift_openstack_external_nsupdate_keys is defined
+    - openshift_openstack_external_nsupdate_keys.private is defined or openshift_openstack_external_nsupdate_keys.public is defined
+
+- import_playbook: ../../init/basic_facts.yml
+
+- name: Optionally subscribe the RHEL nodes
+  any_errors_fatal: true
+  hosts: new_nodes
+  become: yes
+  gather_facts: yes
+  tasks:
+  - name: Subscribe RHEL instances
+    import_role:
+      name: rhel_subscribe
+    when:
+    - ansible_distribution == "RedHat"
+    - rhsub_user is defined
+    - rhsub_pass is defined
+
+- name: Configure the new OpenStack nodes
+  import_playbook: configure-new-nodes.yml
+
+
+- import_playbook: ../../prerequisites.yml
+  vars:
+    l_scale_up_hosts: "oo_nodes_to_config"
+    l_base_packages_hosts: "oo_nodes_to_config"
+    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
+    l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
+
+- import_playbook: ../../init/version.yml
+  vars:
+    l_openshift_version_set_hosts: "oo_nodes_to_config:!oo_first_master"
+
+- import_playbook: ../../openshift-node/private/bootstrap.yml
+- import_playbook: ../../openshift-node/private/join.yml
+
+- name: Show information about the deployed cluster
+  import_playbook: cluster-info.yml

+ 7 - 4
playbooks/openstack/openshift-cluster/provision.yml

@@ -3,10 +3,11 @@
   import_playbook: provision_resources.yml
 
 
-# NOTE(shadower): Bring in the host groups:
-- name: evaluate groups
-  import_playbook: ../../init/evaluate_groups.yml
+- name: Evaluate OpenStack groups from the dynamic inventory
+  import_playbook: evaluate_groups.yml
 
+- name: Evaluate remaining cluster groups
+  import_playbook: ../../init/evaluate_groups.yml
 
 - name: Wait for the nodes and gather their facts
   any_errors_fatal: true
@@ -29,10 +30,12 @@
   any_errors_fatal: true
   hosts: localhost
   tasks:
-  - name: Populate DNS entries
+  - name: Add the DNS records
     import_role:
       name: openshift_openstack
       tasks_from: populate-dns.yml
+    vars:
+      l_openshift_openstack_dns_update_nodes: "{{ groups.OSEv3 }}"
     when:
     - openshift_openstack_external_nsupdate_keys is defined
     - openshift_openstack_external_nsupdate_keys.private is defined or openshift_openstack_external_nsupdate_keys.public is defined

+ 8 - 2
playbooks/openstack/openshift-cluster/uninstall.yml

@@ -1,5 +1,8 @@
 ---
-- name: evaluate groups
+- name: Evaluate basic OpenStack groups
+  import_playbook: evaluate_groups.yml
+
+- name: Evaluate remaining cluster groups
   import_playbook: ../../init/evaluate_groups.yml
 
 - name: Unsubscribe RHEL instances
@@ -21,7 +24,10 @@
   - name: Clean DNS entries
     import_role:
       name: openshift_openstack
-      tasks_from: clean-dns.yml
+      tasks_from: populate-dns.yml
+    vars:
+      l_openshift_openstack_dns_update_nodes: "{{ groups.OSEv3 }}"
+      l_dns_record_state: absent
     when:
     - openshift_openstack_external_nsupdate_keys is defined
     - openshift_openstack_external_nsupdate_keys.private is defined or openshift_openstack_external_nsupdate_keys.public is defined

+ 6 - 7
playbooks/openstack/post-install.md

@@ -155,18 +155,17 @@ You can also create your own custom playbook. Here are a few examples:
 
 This example runs against app nodes. The list of options include:
 
-  - cluster_hosts (all hosts: app, infra, masters, dns, lb)
-  - OSEv3 (app, infra, masters)
-  - app
-  - dns
-  - masters
-  - infra_hosts
+  - OSEv3 (all created hosts: app, infra, masters, etcd, glusterfs, lb, nfs)
+  - openstack_nodes (all OpenShift hosts: app, infra, masters, etcd)
+  - openstack_compute_nodes
+  - openstack_master_nodes
+  - openstack_infra_nodes
 
 ### Attach Additional RHN Pools
 
 ```
 ---
-- hosts: cluster_hosts
+- hosts: OSEv3
   tasks:
   - name: Attach additional RHN pool
     become: true

+ 0 - 22
roles/openshift_openstack/tasks/clean-dns.yml

@@ -1,22 +0,0 @@
----
-- name: Generate DNS records
-  include_tasks: generate-dns.yml
-
-- name: "Remove DNS A records"
-  nsupdate:
-    key_name: "{{ item.0.key_name }}"
-    key_secret: "{{ item.0.key_secret }}"
-    key_algorithm: "{{ item.0.key_algorithm }}"
-    server: "{{ item.0.server }}"
-    zone: "{{ item.0.zone }}"
-    record: "{{ item.1.fqdn | replace('.' + openshift_openstack_nsupdate_zone, '') }}"
-    value: "{{ item.1.ip }}"
-    type: "{{ item.1.type }}"
-    state: absent
-  with_subelements:
-    - "{{ openshift_openstack_dns_records | default([]) }}"
-    - entries
-  register: nsupdate_remove_result
-  until: nsupdate_remove_result is succeeded
-  retries: 10
-  delay: 1

+ 2 - 2
roles/openshift_openstack/tasks/create-registry-volume.yml

@@ -6,5 +6,5 @@
     display_description: "Storage for the OpenShift registry"
   register: cinder_registry_volume
   vars:
-    cinder_registry_volume_name: "{{ hostvars[groups.infra_hosts.0].openshift_hosted_registry_storage_volume_name }}"
-    cinder_registry_volume_size: "{{ hostvars[groups.infra_hosts.0].openshift_hosted_registry_storage_volume_size | regex_replace('[A-Z]i$') }}"
+    cinder_registry_volume_name: "{{ hostvars[groups.openstack_infra_nodes.0].openshift_hosted_registry_storage_volume_name }}"
+    cinder_registry_volume_size: "{{ hostvars[groups.openstack_infra_nodes.0].openshift_hosted_registry_storage_volume_size | regex_replace('[A-Z]i$') }}"

+ 0 - 111
roles/openshift_openstack/tasks/generate-dns.yml

@@ -1,111 +0,0 @@
----
-- name: "Generate list of private A records"
-  set_fact:
-    private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'fqdn': hostvars[item]['ansible_hostname'] + openshift_openstack_private_hostname_suffix + '.' + openshift_openstack_full_dns_domain, 'ip': hostvars[item]['private_v4'] } ] }}"
-  with_items: "{{ groups['cluster_hosts'] }}"
-  when:
-    - hostvars[item]['private_v4'] is defined
-    - hostvars[item]['private_v4'] is not none
-    - hostvars[item]['private_v4'] | string
-
-- name: "Add wildcard records to the private A records for infrahosts"
-  set_fact:
-    private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'fqdn': '*.' + hostvars[groups.masters[0]].openshift_master_default_subdomain, 'ip': hostvars[item]['private_v4'] } ] }}"
-  with_items: "{{ groups['infra_hosts'] }}"
-  when:
-    - groups.masters
-    - hostvars[groups.masters[0]].openshift_master_default_subdomain is defined
-    - openshift_openstack_public_router_ip is defined
-    - openshift_openstack_public_router_ip is not none
-    - openshift_openstack_public_router_ip | string
-
-- debug: var=openshift_openstack_private_api_ip
-- name: "Add public master cluster hostname records to the private A records"
-  set_fact:
-    private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'fqdn': hostvars[groups.masters[0]].openshift_master_cluster_public_hostname, 'ip': openshift_openstack_private_api_ip } ] }}"
-  when:
-    - groups.masters
-    - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
-    - openshift_openstack_private_api_ip is defined
-    - openshift_openstack_private_api_ip is not none
-    - openshift_openstack_private_api_ip | string
-
-- name: "Set the private DNS server to use the external value (if provided)"
-  set_fact:
-    nsupdate_server_private: "{{ openshift_openstack_external_nsupdate_keys['private']['server'] }}"
-    nsupdate_key_secret_private: "{{ openshift_openstack_external_nsupdate_keys['private']['key_secret'] }}"
-    nsupdate_key_algorithm_private: "{{ openshift_openstack_external_nsupdate_keys['private']['key_algorithm'] }}"
-    nsupdate_private_key_name: "{{ openshift_openstack_external_nsupdate_keys['private']['key_name'] }}"
-  when:
-    - openshift_openstack_external_nsupdate_keys['private'] is defined
-
-
-- name: "Generate the private Add section for DNS"
-  set_fact:
-    private_named_records:
-      - view: "private"
-        zone: "{{ openshift_openstack_nsupdate_zone }}"
-        server: "{{ nsupdate_server_private }}"
-        key_name: "{{ nsupdate_private_key_name }}"
-        key_secret: "{{ nsupdate_key_secret_private }}"
-        key_algorithm: "{{ nsupdate_key_algorithm_private | lower }}"
-        entries: "{{ private_records }}"
-  when:
-    - openshift_openstack_external_nsupdate_keys['private'] is defined
-    - private_records is defined
-
-- name: "Generate list of public A records"
-  set_fact:
-    public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'fqdn': hostvars[item]['ansible_hostname'] + openshift_openstack_public_hostname_suffix + '.' + openshift_openstack_full_dns_domain, 'ip': hostvars[item]['public_v4'] } ] }}"
-  with_items: "{{ groups['cluster_hosts'] }}"
-  when:
-    - hostvars[item]['public_v4'] is defined
-    - hostvars[item]['public_v4'] | string
-
-- name: "Add wildcard record to the public A records"
-  set_fact:
-    public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'fqdn': '*.' + hostvars[groups.masters[0]].openshift_master_default_subdomain, 'ip': openshift_openstack_public_router_ip } ] }}"
-  when:
-    - groups.masters
-    - hostvars[groups.masters[0]].openshift_master_default_subdomain is defined
-    - openshift_openstack_public_router_ip is defined
-    - openshift_openstack_public_router_ip is not none
-    - openshift_openstack_public_router_ip | string
-
-- name: "Add the public API entry point record"
-  set_fact:
-    public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'fqdn': hostvars[groups.masters[0]].openshift_master_cluster_public_hostname, 'ip': openshift_openstack_public_api_ip } ] }}"
-  when:
-    - groups.masters
-    - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
-    - openshift_openstack_public_api_ip is defined
-    - openshift_openstack_public_api_ip is not none
-    - openshift_openstack_public_api_ip | string
-
-- name: "Set the public DNS server details to use the external value (if provided)"
-  set_fact:
-    nsupdate_server_public: "{{ openshift_openstack_external_nsupdate_keys['public']['server'] }}"
-    nsupdate_key_secret_public: "{{ openshift_openstack_external_nsupdate_keys['public']['key_secret'] }}"
-    nsupdate_key_algorithm_public: "{{ openshift_openstack_external_nsupdate_keys['public']['key_algorithm'] }}"
-    nsupdate_public_key_name: "{{ openshift_openstack_external_nsupdate_keys['public']['key_name'] }}"
-  when:
-    - openshift_openstack_external_nsupdate_keys['public'] is defined
-
-- name: "Generate the public Add section for DNS"
-  set_fact:
-    public_named_records:
-      - view: "public"
-        zone: "{{ openshift_openstack_nsupdate_zone }}"
-        server: "{{ nsupdate_server_public }}"
-        key_name: "{{ nsupdate_public_key_name }}"
-        key_secret: "{{ nsupdate_key_secret_public }}"
-        key_algorithm: "{{ nsupdate_key_algorithm_public | lower }}"
-        entries: "{{ public_records }}"
-  when:
-    - openshift_openstack_external_nsupdate_keys['public'] is defined
-    - public_records is defined
-
-
-- name: "Generate the final openshift_openstack_dns_records"
-  set_fact:
-    openshift_openstack_dns_records: "{{ private_named_records|default([]) + public_named_records|default([]) }}"

+ 84 - 15
roles/openshift_openstack/tasks/populate-dns.yml

@@ -1,22 +1,91 @@
 ---
-- name: Generate DNS records
-  include_tasks: generate-dns.yml
+- name: Add private node records
+  nsupdate:
+    key_name: "{{ openshift_openstack_external_nsupdate_keys['private']['key_name'] }}"
+    key_secret: "{{ openshift_openstack_external_nsupdate_keys['private']['key_secret'] }}"
+    key_algorithm: "{{ openshift_openstack_external_nsupdate_keys['private']['key_algorithm'] | lower }}"
+    server: "{{ openshift_openstack_external_nsupdate_keys['private']['server'] }}"
+    zone: "{{ openshift_openstack_nsupdate_zone }}"
+    record: "{{ hostvars[item]['ansible_hostname'] + openshift_openstack_private_hostname_suffix + '.' + openshift_openstack_full_dns_domain | replace('.' + openshift_openstack_nsupdate_zone, '') }}"
+    value: "{{ hostvars[item]['private_v4'] }}"
+    type: "A"
+    state: "{{ l_dns_record_state | default('present') }}"
+  with_items: "{{ l_openshift_openstack_dns_update_nodes }}"
+  register: nsupdate_add_result
+  until: nsupdate_add_result is succeeded
+  retries: 10
+  when:
+    - openshift_openstack_external_nsupdate_keys['private'] is defined
+    - hostvars[item]['private_v4'] is defined
+    - hostvars[item]['private_v4'] is not none
+    - hostvars[item]['private_v4'] | string
+  delay: 1
+
+
+- name: Add public node records
+  nsupdate:
+    key_name: "{{ openshift_openstack_external_nsupdate_keys['public']['key_name'] }}"
+    key_secret: "{{ openshift_openstack_external_nsupdate_keys['public']['key_secret'] }}"
+    key_algorithm: "{{ openshift_openstack_external_nsupdate_keys['public']['key_algorithm'] | lower }}"
+    server: "{{ openshift_openstack_external_nsupdate_keys['public']['server'] }}"
+    zone: "{{ openshift_openstack_nsupdate_zone }}"
+    record: "{{ hostvars[item]['ansible_hostname'] + openshift_openstack_public_hostname_suffix + '.' + openshift_openstack_full_dns_domain | replace('.' + openshift_openstack_nsupdate_zone, '') }}"
+    value: "{{ hostvars[item]['public_v4'] }}"
+    type: "A"
+    state: "{{ l_dns_record_state | default('present') }}"
+  with_items: "{{ l_openshift_openstack_dns_update_nodes }}"
+  register: nsupdate_add_result
+  until: nsupdate_add_result is succeeded
+  retries: 10
+  when:
+    - openshift_openstack_external_nsupdate_keys['public'] is defined
+    - hostvars[item]['public_v4'] is defined
+    - hostvars[item]['public_v4'] is not none
+    - hostvars[item]['public_v4'] | string
+  delay: 1
+
+- name: Add public wildcard record
+  nsupdate:
+    key_name: "{{ openshift_openstack_external_nsupdate_keys['public']['key_name'] }}"
+    key_secret: "{{ openshift_openstack_external_nsupdate_keys['public']['key_secret'] }}"
+    key_algorithm: "{{ openshift_openstack_external_nsupdate_keys['public']['key_algorithm'] | lower }}"
+    server: "{{ openshift_openstack_external_nsupdate_keys['public']['server'] }}"
+    zone: "{{ openshift_openstack_nsupdate_zone }}"
+    record: "{{ '*.' + hostvars[groups.masters[0]].openshift_master_default_subdomain | replace('.' + openshift_openstack_nsupdate_zone, '') }}"
+    value: "{{ openshift_openstack_public_router_ip }}"
+    type: "A"
+    state: "{{ l_dns_record_state | default('present') }}"
+  register: nsupdate_add_result
+  until: nsupdate_add_result is succeeded
+  retries: 10
+  delay: 1
+  when:
+    - openshift_openstack_external_nsupdate_keys['public'] is defined
+    - groups.masters
+    - hostvars[groups.masters[0]].openshift_master_default_subdomain is defined
+    - openshift_openstack_public_router_ip is defined
+    - openshift_openstack_public_router_ip is not none
+    - openshift_openstack_public_router_ip | string
+
 
-- name: "Add DNS A records"
+- name: Add public API record
   nsupdate:
-    key_name: "{{ item.0.key_name }}"
-    key_secret: "{{ item.0.key_secret }}"
-    key_algorithm: "{{ item.0.key_algorithm }}"
-    server: "{{ item.0.server }}"
-    zone: "{{ item.0.zone }}"
-    record: "{{ item.1.fqdn | replace('.' + openshift_openstack_nsupdate_zone, '') }}"
-    value: "{{ item.1.ip }}"
-    type: "{{ item.1.type }}"
-    state: present
-  with_subelements:
-    - "{{ openshift_openstack_dns_records | default([]) }}"
-    - entries
+    key_name: "{{ openshift_openstack_external_nsupdate_keys['public']['key_name'] }}"
+    key_secret: "{{ openshift_openstack_external_nsupdate_keys['public']['key_secret'] }}"
+    key_algorithm: "{{ openshift_openstack_external_nsupdate_keys['public']['key_algorithm'] | lower }}"
+    server: "{{ openshift_openstack_external_nsupdate_keys['public']['server'] }}"
+    zone: "{{ openshift_openstack_nsupdate_zone }}"
+    record: "{{ hostvars[groups.masters[0]].openshift_master_cluster_public_hostname | replace('.' + openshift_openstack_nsupdate_zone, '') }}"
+    value: "{{ openshift_openstack_public_api_ip }}"
+    type: "A"
+    state: "{{ l_dns_record_state | default('present') }}"
   register: nsupdate_add_result
   until: nsupdate_add_result is succeeded
   retries: 10
   delay: 1
+  when:
+    - groups.masters
+    - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
+    - openshift_openstack_public_api_ip is defined
+    - openshift_openstack_public_api_ip is not none
+    - openshift_openstack_public_api_ip | string

+ 5 - 5
roles/openshift_openstack/tasks/provision.yml

@@ -102,8 +102,8 @@
 - name: Create the Cinder volume for OpenShift Registry
   include_tasks: create-registry-volume.yml
   when:
-  - groups.infra_hosts is defined
-  - groups.infra_hosts.0 is defined
-  - hostvars[groups.infra_hosts.0].openshift_hosted_registry_storage_volume_name is defined
-  - hostvars[groups.infra_hosts.0].openshift_hosted_registry_storage_volume_size is defined
-  - hostvars[groups.infra_hosts.0].openshift_hosted_registry_storage_openstack_volumeID is not defined
+  - groups.openstack_infra_nodes is defined
+  - groups.openstack_infra_nodes.0 is defined
+  - hostvars[groups.openstack_infra_nodes.0].openshift_hosted_registry_storage_volume_name is defined
+  - hostvars[groups.openstack_infra_nodes.0].openshift_hosted_registry_storage_volume_size is defined
+  - hostvars[groups.openstack_infra_nodes.0].openshift_hosted_registry_storage_openstack_volumeID is not defined

+ 1 - 1
roles/openshift_openstack/tasks/unprovision.yml

@@ -23,7 +23,7 @@
 - name: Remove trunk subports
   os_subports_deletion:
     trunk_name: "{{ item }}"
-  with_items: "{{ groups['cluster_hosts'] }}"
+  with_items: "{{ groups['OSEv3'] }}"
   when:
     - openshift_use_kuryr | default(false) | bool