Browse Source

Add the OpenStack master scaleup playbook

This adds a simple playbook that will provision the new master or etcd
nodes and perform the necessary scale up tasks.

All one needs to do is update their master node count in the inventory
and then run playbooks/openstack/openshift-cluster/master-scaleup.yml.

It's of course still possible to scale up nodes manually.
Tomas Sedovic 6 years ago
parent
commit
de98c9cfb5

+ 37 - 0
playbooks/openstack/configuration.md

@@ -1009,6 +1009,43 @@ Where `<count>` is the number of the pods you want (i.e. the number of your
 infra nodes).
 
 
+### Scaling the Master Nodes
+
+Adding master nodes is similar to adding compute/infra nodes, but we need to
+run a different playbook at the end.
+
+You must have a fully working OpenShift cluster before you start scaling.
+
+#### 1. Adding Extra Master Nodes
+
+Edit your `inventory/group_vars/all.yml` and set the new master node total in
+`openshift_openstack_num_masters`.
+
+For example if you started with a single master node and you want to add two
+more (for a grand total of three), you should set:
+
+    openshift_openstack_num_masters: 3
+
+#### 2. Scaling the Cluster
+
+Then run the `master-scaleup.yml` playbook:
+
+```
+$ ansible-playbook --user openshift \
+  -i openshift-ansible/playbooks/openstack/inventory.py \
+  -i inventory \
+  openshift-ansible/playbooks/openstack/openshift-cluster/master-scaleup.yml
+```
+
+This will create the new OpenStack nodes, optionally create the DNS records
+and subscribe them to RHN, configure the `new_masters`, `new_nodes` and
+`new_etcd` groups and run the OpenShift master scaleup tasks.
+
+When the playbook finishes, you should have new master nodes up and running.
+
+Run `oc get nodes` to verify.
+
+
 ## Deploying At Scale
 
 By default, heat stack outputs are resolved.  This may cause

+ 11 - 21
playbooks/openstack/inventory.py

@@ -44,25 +44,22 @@ def base_openshift_inventory(cluster_hosts):
     cns = [server.name for server in cluster_hosts
            if server.metadata['host-type'] == 'cns']
 
-    nodes = list(set(masters + infra_hosts + app + cns))
-
-    dns = [server.name for server in cluster_hosts
-           if server.metadata['host-type'] == 'dns']
-
     load_balancers = [server.name for server in cluster_hosts
                       if server.metadata['host-type'] == 'lb']
 
-    osev3 = list(set(nodes + etcd + load_balancers))
+    # NOTE: everything that should go to the `[nodes]` group:
+    nodes = list(set(masters + etcd + infra_hosts + app + cns))
+
+    # NOTE: all OpenShift nodes, including `[lb]`, `[nfs]`, etc.:
+    osev3 = list(set(nodes + load_balancers))
 
-    inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}
     inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}
-    inventory['masters'] = {'hosts': masters}
-    inventory['etcd'] = {'hosts': etcd}
-    inventory['nodes'] = {'hosts': nodes}
-    inventory['infra_hosts'] = {'hosts': infra_hosts}
-    inventory['app'] = {'hosts': app}
-    inventory['glusterfs'] = {'hosts': cns}
-    inventory['dns'] = {'hosts': dns}
+    inventory['openstack_nodes'] = {'hosts': nodes}
+    inventory['openstack_master_nodes'] = {'hosts': masters}
+    inventory['openstack_etcd_nodes'] = {'hosts': etcd}
+    inventory['openstack_infra_nodes'] = {'hosts': infra_hosts}
+    inventory['openstack_compute_nodes'] = {'hosts': app}
+    inventory['openstack_cns_nodes'] = {'hosts': cns}
     inventory['lb'] = {'hosts': load_balancers}
     inventory['localhost'] = {'ansible_connection': 'local'}
 
@@ -132,13 +129,6 @@ def build_inventory():
 
     inventory = base_openshift_inventory(cluster_hosts)
 
-    for server in cluster_hosts:
-        if 'group' in server.metadata:
-            group = server.metadata.get('group')
-            if group not in inventory:
-                inventory[group] = {'hosts': []}
-            inventory[group]['hosts'].append(server.name)
-
     inventory['_meta'] = {'hostvars': {}}
 
     # Some clouds don't have Cinder. That's okay:

+ 1 - 1
playbooks/openstack/openshift-cluster/configure-new-nodes.yml

@@ -3,7 +3,7 @@
 # that it runs on the new nodes only.
 - name: Prepare the New Nodes in the cluster for installation
   any_errors_fatal: true
-  hosts: new_nodes:new_masters
+  hosts: new_nodes:new_masters:new_etcd
   become: yes
   gather_facts: yes
   tasks:

+ 136 - 0
playbooks/openstack/openshift-cluster/evaluate_groups.yml

@@ -0,0 +1,136 @@
+---
+# NOTE: to support scaling, the `masters` and `new_masters` (and analogous for
+# `nodes` and `etcd`) groups must be exclusive.
+#
+# Since Ansible cannot remove a node from a group, the dynamic inventory
+# can't e.g. tag all master nodes as part of the `masters` group and then
+# add the new ones to `new_masters`. Creating new hosts means refreshing
+# the inventory and that would mean bringing all the nodes (old and new)
+# into the `masters` group. And that causes the scaleup playbook to fail.
+#
+# And since the playbooks can't pass data to the dynamic inventory, this
+# new/old separation cannot happen there.
+#
+# So the inventory sets e.g. `openstack_master_nodes` and this playbook
+# configures the actual source groups such as `masters`, `new_masters`,
+# `nodes`, `etcd`, etc.
+- name: Evaluate the OpenStack groups
+  any_errors_fatal: true
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  become: no
+  tasks:
+  # This will happen when we're deploying a cluster from scratch. Add all
+  # nodes to the `masters` group.
+  - name: Create a brand new masters group (no scaling)
+    add_host:
+      name: "{{ item }}"
+      groups: masters
+    with_items: "{{ groups.openstack_master_nodes | default([]) }}"
+    changed_when: no
+    when: >
+      openshift_openstack_existing is undefined or
+      openshift_openstack_existing.openstack_master_nodes is undefined or
+      openshift_openstack_existing.openstack_master_nodes | length == 0
+
+  # This will happen when we are scaling an existing cluster. Add the current
+  # nodes to the `masters` groups.
+  - name: Create pre-existing masters group
+    add_host:
+      name: "{{ item }}"
+      groups: masters
+    with_items: "{{ openshift_openstack_existing.openstack_master_nodes }}"
+    changed_when: no
+    when:
+    - openshift_openstack_existing is defined
+    - openshift_openstack_existing.openstack_master_nodes is defined
+    - openshift_openstack_existing.openstack_master_nodes | length > 0
+
+  # This will happen when we are scaling an existing cluster. Add the
+  # newly-created nodes to the `new_masters` group.
+  - name: Create new_masters group
+    add_host:
+      name: "{{ item }}"
+      groups: new_masters
+    with_items: "{{ groups.openstack_master_nodes | default([]) | difference(groups.masters) }}"
+    changed_when: no
+    when:
+    - openshift_openstack_existing is defined
+    - openshift_openstack_existing.openstack_master_nodes is defined
+    - openshift_openstack_existing.openstack_master_nodes | length > 0
+
+  - name: Create a brand new etcd group (no scaling)
+    add_host:
+      name: "{{ item }}"
+      groups: etcd
+    with_items: "{{ groups.openstack_etcd_nodes | default([]) }}"
+    changed_when: no
+    when: >
+      openshift_openstack_existing is undefined or
+      openshift_openstack_existing.openstack_etcd_nodes is undefined or
+      openshift_openstack_existing.openstack_etcd_nodes | length == 0
+
+  - name: Create pre-existing etcd group
+    add_host:
+      name: "{{ item }}"
+      groups: etcd
+    with_items: "{{ openshift_openstack_existing.openstack_etcd_nodes }}"
+    changed_when: no
+    when:
+    - openshift_openstack_existing is defined
+    - openshift_openstack_existing.openstack_etcd_nodes is defined
+    - openshift_openstack_existing.openstack_etcd_nodes | length > 0
+
+  - name: Create new_etcd group
+    add_host:
+      name: "{{ item }}"
+      groups: new_etcd
+    with_items: "{{ groups.openstack_etcd_nodes | default([]) | difference(groups.etcd) }}"
+    changed_when: no
+    when:
+    - openshift_openstack_existing is defined
+    - openshift_openstack_existing.openstack_etcd_nodes is defined
+    - openshift_openstack_existing.openstack_etcd_nodes | length > 0
+
+  - name: Create a brand new nodes group (no scaling)
+    add_host:
+      name: "{{ item }}"
+      groups: nodes
+    with_items: "{{ groups.openstack_nodes | default([]) }}"
+    changed_when: no
+    when: >
+      openshift_openstack_existing is undefined or
+      openshift_openstack_existing.openstack_nodes is undefined or
+      openshift_openstack_existing.openstack_nodes | length == 0
+
+  - name: Create pre-existing nodes group
+    add_host:
+      name: "{{ item }}"
+      groups: nodes
+    with_items: "{{ openshift_openstack_existing.openstack_nodes }}"
+    changed_when: no
+    when:
+    - openshift_openstack_existing is defined
+    - openshift_openstack_existing.openstack_nodes is defined
+    - openshift_openstack_existing.openstack_nodes | length > 0
+
+  - name: Create new_nodes group
+    add_host:
+      name: "{{ item }}"
+      groups: new_nodes
+    with_items: "{{ groups.openstack_nodes | default([]) | difference(groups.nodes) }}"
+    changed_when: no
+    when:
+    - openshift_openstack_existing is defined
+    - openshift_openstack_existing.openstack_nodes is defined
+    - openshift_openstack_existing.openstack_nodes | length > 0
+
+  # TODO(shadower): Do we need to add `new_glusterfs` as well? I saw no
+  # mention in the code.
+  - name: Create glusterfs group
+    add_host:
+      name: "{{ item }}"
+      groups: glusterfs
+    with_items: "{{ groups.openstack_cns_nodes | default([]) }}"
+    changed_when: no

+ 3 - 0
playbooks/openstack/openshift-cluster/install.yml

@@ -8,6 +8,9 @@
 # values here. We do it in the OSEv3 group vars. Do we need to add
 # some logic here?
 
+- name: Evaluate basic OpenStack groups
+  import_playbook: evaluate_groups.yml
+
 - import_playbook: ../../prerequisites.yml
 
 - name: Prepare the Nodes in the cluster for installation

+ 36 - 0
playbooks/openstack/openshift-cluster/master-scaleup.yml

@@ -0,0 +1,36 @@
+---
+- name: Save groups before scaling
+  any_errors_fatal: true
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  become: no
+  tasks:
+  - name: Save the node groups to openshift_openstack_existing
+    set_fact:
+      openshift_openstack_existing: "{{ groups }}"
+
+- name: Create the new OpenStack resources
+  import_playbook: provision.yml
+
+- name: Set the new_nodes and new_masters groups
+  import_playbook: evaluate_groups.yml
+
+# TODO: run DNS update and RHEL registration only on new_nodes here. That
+# means we can call `provision_resources.yml` above.
+
+- name: Configure the new OpenStack nodes
+  import_playbook: configure-new-nodes.yml
+
+- import_playbook: ../../prerequisites.yml
+  vars:
+    l_scale_up_hosts: "oo_nodes_to_config:oo_masters_to_config"
+    l_base_packages_hosts: "oo_nodes_to_config:oo_masters_to_config"
+    l_init_fact_hosts: "oo_masters:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
+    l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters']) }}"
+
+- import_playbook: ../../init/version.yml
+  vars:
+    l_openshift_version_set_hosts: "oo_masters_to_config:oo_nodes_to_config:!oo_first_master"
+
+- import_playbook: ../../openshift-master/private/scaleup.yml

+ 4 - 3
playbooks/openstack/openshift-cluster/provision.yml

@@ -3,10 +3,11 @@
   import_playbook: provision_resources.yml
 
 
-# NOTE(shadower): Bring in the host groups:
-- name: evaluate groups
-  import_playbook: ../../init/evaluate_groups.yml
+- name: Evaluate OpenStack groups from the dynamic inventory
+  import_playbook: evaluate_groups.yml
 
+- name: Evaluate remaining cluster groups
+  import_playbook: ../../init/evaluate_groups.yml
 
 - name: Wait for the nodes and gather their facts
   any_errors_fatal: true

+ 4 - 1
playbooks/openstack/openshift-cluster/uninstall.yml

@@ -1,5 +1,8 @@
 ---
-- name: evaluate groups
+- name: Evaluate basic OpenStack groups
+  import_playbook: evaluate_groups.yml
+
+- name: Evaluate remaining cluster groups
   import_playbook: ../../init/evaluate_groups.yml
 
 - name: Unsubscribe RHEL instances

+ 6 - 7
playbooks/openstack/post-install.md

@@ -155,18 +155,17 @@ You can also create your own custom playbook. Here are a few examples:
 
 This example runs against app nodes. The list of options include:
 
-  - cluster_hosts (all hosts: app, infra, masters, dns, lb)
-  - OSEv3 (app, infra, masters)
-  - app
-  - dns
-  - masters
-  - infra_hosts
+  - OSEv3 (all created hosts: app, infra, masters, etcd, glusterfs, lb, nfs)
+  - openstack_nodes (all OpenShift hosts: app, infra, masters, etcd)
+  - openstack_compute_nodes
+  - openstack_master_nodes
+  - openstack_infra_nodes
 
 ### Attach Additional RHN Pools
 
 ```
 ---
-- hosts: cluster_hosts
+- hosts: OSEv3
   tasks:
   - name: Attach additional RHN pool
     become: true

+ 2 - 2
roles/openshift_openstack/tasks/create-registry-volume.yml

@@ -6,5 +6,5 @@
     display_description: "Storage for the OpenShift registry"
   register: cinder_registry_volume
   vars:
-    cinder_registry_volume_name: "{{ hostvars[groups.infra_hosts.0].openshift_hosted_registry_storage_volume_name }}"
-    cinder_registry_volume_size: "{{ hostvars[groups.infra_hosts.0].openshift_hosted_registry_storage_volume_size | regex_replace('[A-Z]i$') }}"
+    cinder_registry_volume_name: "{{ hostvars[groups.openstack_infra_nodes.0].openshift_hosted_registry_storage_volume_name }}"
+    cinder_registry_volume_size: "{{ hostvars[groups.openstack_infra_nodes.0].openshift_hosted_registry_storage_volume_size | regex_replace('[A-Z]i$') }}"

+ 3 - 3
roles/openshift_openstack/tasks/generate-dns.yml

@@ -2,7 +2,7 @@
 - name: "Generate list of private A records"
   set_fact:
     private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'fqdn': hostvars[item]['ansible_hostname'] + openshift_openstack_private_hostname_suffix + '.' + openshift_openstack_full_dns_domain, 'ip': hostvars[item]['private_v4'] } ] }}"
-  with_items: "{{ groups['cluster_hosts'] }}"
+  with_items: "{{ groups['OSEv3'] }}"
   when:
     - hostvars[item]['private_v4'] is defined
     - hostvars[item]['private_v4'] is not none
@@ -11,7 +11,7 @@
 - name: "Add wildcard records to the private A records for infrahosts"
   set_fact:
     private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'fqdn': '*.' + hostvars[groups.masters[0]].openshift_master_default_subdomain, 'ip': hostvars[item]['private_v4'] } ] }}"
-  with_items: "{{ groups['infra_hosts'] }}"
+  with_items: "{{ groups['openstack_infra_nodes'] }}"
   when:
     - groups.masters
     - hostvars[groups.masters[0]].openshift_master_default_subdomain is defined
@@ -57,7 +57,7 @@
 - name: "Generate list of public A records"
   set_fact:
     public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'fqdn': hostvars[item]['ansible_hostname'] + openshift_openstack_public_hostname_suffix + '.' + openshift_openstack_full_dns_domain, 'ip': hostvars[item]['public_v4'] } ] }}"
-  with_items: "{{ groups['cluster_hosts'] }}"
+  with_items: "{{ groups['OSEv3'] }}"
   when:
     - hostvars[item]['public_v4'] is defined
     - hostvars[item]['public_v4'] | string

+ 5 - 5
roles/openshift_openstack/tasks/provision.yml

@@ -102,8 +102,8 @@
 - name: Create the Cinder volume for OpenShift Registry
   include_tasks: create-registry-volume.yml
   when:
-  - groups.infra_hosts is defined
-  - groups.infra_hosts.0 is defined
-  - hostvars[groups.infra_hosts.0].openshift_hosted_registry_storage_volume_name is defined
-  - hostvars[groups.infra_hosts.0].openshift_hosted_registry_storage_volume_size is defined
-  - hostvars[groups.infra_hosts.0].openshift_hosted_registry_storage_openstack_volumeID is not defined
+  - groups.openstack_infra_nodes is defined
+  - groups.openstack_infra_nodes.0 is defined
+  - hostvars[groups.openstack_infra_nodes.0].openshift_hosted_registry_storage_volume_name is defined
+  - hostvars[groups.openstack_infra_nodes.0].openshift_hosted_registry_storage_volume_size is defined
+  - hostvars[groups.openstack_infra_nodes.0].openshift_hosted_registry_storage_openstack_volumeID is not defined

+ 1 - 1
roles/openshift_openstack/tasks/unprovision.yml

@@ -23,7 +23,7 @@
 - name: Remove trunk subports
   os_subports_deletion:
     trunk_name: "{{ item }}"
-  with_items: "{{ groups['cluster_hosts'] }}"
+  with_items: "{{ groups['OSEv3'] }}"
   when:
     - openshift_use_kuryr | default(false) | bool