Browse Source

Merge pull request #10895 from mgugino-upstream-stage/40-scaleup

40 scaleup
OpenShift Merge Robot 6 years ago
parent
commit
5d877feea5

+ 0 - 18
playbooks/deploy_cluster_40.yml

@@ -7,24 +7,6 @@
     l_install_base_packages: True
     l_repo_hosts: "nodes"
 
-- name: Read in openshift-install
-  hosts: masters[0]
-  tasks:
-  - slurp:
-      src: "{{ openshift_install_config_path }}"
-    register: openshift_install_config_reg
-    delegate_to: localhost
-    run_once: True
-  - set_fact:
-      openshift_install_config: "{{ openshift_install_config_reg['content'] | b64decode | from_yaml }}"
-
-# We might need to access these values on each host later.
-- name: set_fact openshift_install_config across all nodes
-  hosts: nodes
-  tasks:
-  - set_fact:
-      openshift_install_config: "{{ hostvars[groups['masters'][0]].openshift_install_config }}"
-
 # TODO(michaelgugino): break up the rest of this file into reusable chunks.
 - name: Install nodes
   hosts: nodes

+ 18 - 0
playbooks/init/basic_facts.yml

@@ -29,3 +29,21 @@
     when:
     - openshift_deployment_type is undefined
     - deployment_type is defined
+
+- name: Read in openshift-install
+  hosts: masters[0]
+  tasks:
+  - slurp:
+      src: "{{ openshift_install_config_path }}"
+    register: openshift_install_config_reg
+    delegate_to: localhost
+    run_once: True
+  - set_fact:
+      openshift_install_config: "{{ openshift_install_config_reg['content'] | b64decode | from_yaml }}"
+
+# We might need to access these values on each host later.
+- name: set_fact openshift_install_config across all nodes
+  hosts: "{{ l_init_fact_hosts | default('nodes') }}"
+  tasks:
+  - set_fact:
+      openshift_install_config: "{{ hostvars[groups['masters'][0]].openshift_install_config }}"

+ 36 - 6
playbooks/openshift-node/scaleup.yml

@@ -24,19 +24,49 @@
 # if g_new_node_hosts is not empty, oo_nodes_to_config will be set to
 # g_new_node_hosts via evaluate_groups.yml
 
-- import_playbook: ../prerequisites.yml
+- name: run the init
+  import_playbook: ../init/main.yml
   vars:
-    l_scale_up_hosts: "oo_nodes_to_config"
-    l_base_packages_hosts: "oo_nodes_to_config"
-    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
-    l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
+    l_init_fact_hosts: "masters:new_nodes"
+    l_openshift_version_set_hosts: "new_nodes"
+    l_install_base_packages: True
+    l_repo_hosts: "new_nodes"
 
 - name: install nodes
-  hosts: oo_nodes_to_config
+  hosts: new_nodes
+  vars:
+    openshift_bootstrap_endpoint: "https://{{ openshift_install_config['metadata']['name'] }}-api.{{ openshift_install_config['baseDomain'] }}:49500/config/worker"
+  roles:
+  - role: container_runtime
   tasks:
   - import_role:
+      name: container_runtime
+      tasks_from: docker_storage_setup_overlay.yml
+  - import_role:
+      name: container_runtime
+      tasks_from: extra_storage_setup.yml
+  - import_role:
+      name: container_runtime
+      tasks_from: package_crio.yml
+  - import_role:
       name: openshift_node40
       tasks_from: install.yml
+
+- name: Start workers
+  hosts: new_nodes
+  vars:
+    openshift_bootstrap_endpoint: "https://{{ openshift_install_config['metadata']['name'] }}-api.{{ openshift_install_config['baseDomain'] }}:49500/config/worker"
+  tasks:
+  - name: Wait for bootstrap endpoint to show up
+    uri:
+      url: "{{ openshift_bootstrap_endpoint }}"
+      validate_certs: false
+    delay: 10
+    retries: 60
+    register: result
+    until:
+    - "'status' in result"
+    - result.status == 200
   - import_role:
       name: openshift_node40
       tasks_from: config.yml

+ 1 - 1
test/libvirt/install-config.yml.template

@@ -4,7 +4,7 @@ machines:
 - name: master
   replicas: ${OCP_MASTERS}
 - name: worker
-  replicas: ${OCP_WORKERS}
+  replicas: 0
 metadata:
   name: ${OCP_CLUSTER_NAME}
 networking:

+ 3 - 1
test/libvirt/installrc

@@ -3,7 +3,7 @@
 export OCP_BASE_DOMAIN="tt.testing"
 export OCP_CLUSTER_ID=$(uuidgen --random)
 export OCP_MASTERS=1
-export OCP_WORKERS=0
+export OCP_WORKERS=1
 export OCP_CLUSTER_NAME="byo-dev"
 export OCP_PULL_SECRET=$(cat ~/try.openshift.com.json)
 export OCP_SSH_PUB_KEY=$(cat ~/.ssh/id_rsa.pub)
@@ -18,4 +18,6 @@ export OCP_LIBVIRT_URI='qemu+tcp://192.168.124.1/system'
 export OCP_INSTALL_CONFIG_PATH="$PWD/install-config-ansible.yml"
 export OCP_IGNITION_PATH="$PWD/bootstrap.ign"
 
+export OCP_WORKER_IPS=\"192.168.126.21\"
+
 export ANSIBLE_HOST_KEY_CHECKING=False

+ 7 - 0
test/libvirt/node_scaleup.sh

@@ -0,0 +1,7 @@
+#!/bin/bash
+set -e
+APB3="`which python3` `which ansible-playbook`"
+WORKDIR=$PWD
+
+cd ../..
+$APB3 -vvv -i $WORKDIR/inventory.txt playbooks/openshift-node/scaleup.yml

+ 10 - 0
test/libvirt/terraform/config.tf

@@ -12,6 +12,16 @@ This applies only to cloud platforms.
 EOF
 }
 
+variable "worker_count" {
+  type    = "string"
+  default = "0"
+
+  description = <<EOF
+The number of master nodes to be created.
+This applies only to cloud platforms.
+EOF
+}
+
 variable "base_domain" {
   type = "string"
 

+ 31 - 0
test/libvirt/terraform/main.tf

@@ -25,6 +25,12 @@ resource "libvirt_volume" "master" {
   base_volume_id = "${module.volume.coreos_base_volume_id}"
 }
 
+resource "libvirt_volume" "worker" {
+  count          = "${var.worker_count}"
+  name           = "${var.cluster_name}-worker-${count.index}"
+  base_volume_id = "${module.volume.coreos_base_volume_id}"
+}
+
 resource "libvirt_network" "net" {
   name = "${var.cluster_name}"
 
@@ -91,6 +97,31 @@ resource "libvirt_domain" "master" {
   }
 }
 
+resource "libvirt_domain" "worker" {
+  count = "${var.worker_count}"
+
+  name = "${var.cluster_name}-worker-${count.index}"
+
+  memory = "${var.libvirt_worker_memory}"
+  vcpu   = "${var.libvirt_worker_vcpu}"
+
+  cloudinit = "${libvirt_cloudinit_disk.commoninit.id}"
+  disk {
+    volume_id = "${element(libvirt_volume.worker.*.id, count.index)}"
+  }
+
+  console {
+    type        = "pty"
+    target_port = 0
+  }
+
+  network_interface {
+    network_id = "${libvirt_network.net.id}"
+    hostname   = "${var.cluster_name}-worker-${count.index}"
+    addresses  = ["${var.libvirt_worker_ips[count.index]}"]
+  }
+}
+
 data "libvirt_network_dns_host_template" "bootstrap" {
   count    = "${var.bootstrap_dns ? 1 : 0}"
   ip       = "${var.libvirt_bootstrap_ip}"

+ 4 - 0
test/libvirt/terraform/terraform.tfvars.template

@@ -3,6 +3,7 @@
   "cluster_name": "${OCP_CLUSTER_NAME}",
   "base_domain": "${OCP_BASE_DOMAIN}",
   "master_count": ${OCP_MASTERS},
+  "worker_count": ${OCP_WORKERS},
 
 "libvirt_uri": "${OCP_LIBVIRT_URI}",
 "os_image": "${OCP_IMAGE}",
@@ -11,6 +12,9 @@
 "libvirt_master_ips": [
   ${OCP_MASTER_IPS}
 ],
+"libvirt_worker_ips": [
+  ${OCP_WORKER_IPS}
+],
 "libvirt_bootstrap_ip": "${OCP_BOOTSTRAP_IP}",
 "ssh_key": "${OCP_SSH_PUB_KEY}"
 }

+ 21 - 0
test/libvirt/terraform/variables-libvirt.tf

@@ -53,3 +53,24 @@ variable "ssh_key" {
   type        = "string"
   description = "ssh public key"
 }
+
+variable "libvirt_worker_ips" {
+  type        = "list"
+  description = "the list of desired master ips. Must match worker_count"
+}
+
+# It's definitely recommended to bump this if you can.
+variable "libvirt_worker_memory" {
+  type        = "string"
+  description = "RAM in MiB allocated to masters"
+  default     = "2048"
+}
+
+# At some point this one is likely to default to the number
+# of physical cores you have.  See also
+# https://pagure.io/standard-test-roles/pull-request/223
+variable "libvirt_worker_vcpu" {
+  type        = "string"
+  description = "CPUs allocated to workers"
+  default     = "1"
+}