فهرست منبع

Merge pull request #10880 from mgugino-upstream-stage/install-config-as-vars

Install config as vars
Scott Dodson 6 سال پیش
والد
کامیت
24fe8809c5
35فایلهای تغییر یافته به همراه671 افزوده شده و 10 حذف شده
  1. 10 0
      .gitignore
  2. 23 7
      playbooks/deploy_cluster_40.yml
  3. 1 1
      roles/openshift_gcp/tasks/setup_scale_group_facts.yml
  4. 2 2
      roles/openshift_node40/tasks/config.yml
  5. 3 0
      roles/openshift_node40/tasks/install.yml
  6. 26 0
      test/libvirt/README.md
  7. 5 0
      test/libvirt/cleanup.sh
  8. 10 0
      test/libvirt/deploy.sh
  9. 9 0
      test/libvirt/generate_assets.sh
  10. 22 0
      test/libvirt/generate_inventory.sh
  11. 10 0
      test/libvirt/group_vars/nodes.yml
  12. 28 0
      test/libvirt/install-config.yml.template
  13. 21 0
      test/libvirt/installrc
  14. 24 0
      test/libvirt/inv.txt.template
  15. 8 0
      test/libvirt/playbooks/files/openshift-local.repo
  16. 14 0
      test/libvirt/playbooks/localrepo.yml
  17. 15 0
      test/libvirt/playbooks/prep.yml
  18. 9 0
      test/libvirt/playbooks/rhel_prep.yml
  19. 9 0
      test/libvirt/playbooks/templates/buildah_repo.sh
  20. 9 0
      test/libvirt/rhel_setup.sh
  21. 7 0
      test/libvirt/run_ansible.sh
  22. 49 0
      test/libvirt/terraform/bootstrap/README.md
  23. 40 0
      test/libvirt/terraform/bootstrap/main.tf
  24. 2 0
      test/libvirt/terraform/bootstrap/meta-data.tpl
  25. 8 0
      test/libvirt/terraform/bootstrap/user-data.tpl
  26. 25 0
      test/libvirt/terraform/bootstrap/variables.tf
  27. 62 0
      test/libvirt/terraform/config.tf
  28. 120 0
      test/libvirt/terraform/main.tf
  29. 16 0
      test/libvirt/terraform/terraform.tfvars.template
  30. 8 0
      test/libvirt/terraform/user-data.tpl
  31. 55 0
      test/libvirt/terraform/variables-libvirt.tf
  32. 4 0
      test/libvirt/terraform/volume/main.tf
  33. 3 0
      test/libvirt/terraform/volume/outputs.tf
  34. 9 0
      test/libvirt/terraform/volume/variables.tf
  35. 5 0
      test/libvirt/terraform_provision.sh

+ 10 - 0
.gitignore

@@ -24,3 +24,13 @@ multi_ec2.yaml
 *.egg-info
 .eggs
 cover/
+test/libvirt/install-config.yml
+test/libvirt/.openshift_install_state.json
+test/libvirt/.openshift_install.log
+test/libvirt/*.ign
+test/libvirt/install-config-ansible.yml
+test/libvirt/terraform/terraform.tfvars
+test/libvirt/terraform/.terraform
+*.tfstate
+*.tfstate.backup
+test/libvirt/inventory.txt

+ 23 - 7
playbooks/deploy_cluster_40.yml

@@ -5,7 +5,25 @@
     l_init_fact_hosts: "nodes"
     l_openshift_version_set_hosts: "nodes"
     l_install_base_packages: True
-    l_repo_hosts: "all:!all"
+    l_repo_hosts: "nodes"
+
+- name: Read in openshift-install
+  hosts: masters[0]
+  tasks:
+  - slurp:
+      src: "{{ openshift_install_config_path }}"
+    register: openshift_install_config_reg
+    delegate_to: localhost
+    run_once: True
+  - set_fact:
+      openshift_install_config: "{{ openshift_install_config_reg['content'] | b64decode | from_yaml }}"
+
+# We might need to access these values on each host later.
+- name: set_fact openshift_install_config across all nodes
+  hosts: nodes
+  tasks:
+  - set_fact:
+      openshift_install_config: "{{ hostvars[groups['masters'][0]].openshift_install_config }}"
 
 # TODO(michaelgugino): break up the rest of this file into reusable chunks.
 - name: Install nodes
@@ -41,10 +59,9 @@
 
 - name: Start masters
   hosts: masters
+  vars:
+    openshift_bootstrap_endpoint: "https://{{ openshift_install_config['metadata']['name'] }}-api.{{ openshift_install_config['baseDomain'] }}:49500/config/master"
   tasks:
-  # This is required for openshift_node40/config.yml
-  - set_fact:
-      openshift_bootstrap_endpoint: "https://{{ openshift_master_cluster_hostname }}:{{ mcd_port }}/config/master"
   - name: Wait for bootstrap endpoint to show up
     uri:
       url: "{{ openshift_bootstrap_endpoint }}"
@@ -67,10 +84,9 @@
 
 - name: Start workers
   hosts: workers
+  vars:
+    openshift_bootstrap_endpoint: "https://{{ openshift_install_config['metadata']['name'] }}-api.{{ openshift_install_config['baseDomain'] }}:49500/config/worker"
   tasks:
-  # This is required for openshift_node40/config.yml
-  - set_fact:
-      openshift_bootstrap_endpoint: "https://{{ openshift_master_cluster_hostname }}:{{ mcd_port }}/config/worker"
   - name: Wait for bootstrap endpoint to show up
     uri:
       url: "{{ openshift_bootstrap_endpoint }}"

+ 1 - 1
roles/openshift_gcp/tasks/setup_scale_group_facts.yml

@@ -5,7 +5,7 @@
     groups:
     - bootstrap
     - nodes
-    ignition_file: "{{ openshift_bootstrap_ignition_file }}"
+    openshift_ignition_file_path: "{{ openshift_bootstrap_ignition_file }}"
   with_items: "{{ groups['tag_ocp-bootstrap'] | default([]) }}"
 
 - name: Add master instances

+ 2 - 2
roles/openshift_node40/tasks/config.yml

@@ -10,8 +10,8 @@
   when: openshift_bootstrap_endpoint is defined
 
 - set_fact:
-    ign_contents: "{{ lookup('file', ignition_file) }}"
-  when: ignition_file is defined
+    ign_contents: "{{ lookup('file', openshift_ignition_file_path) }}"
+  when: openshift_ignition_file_path is defined
 
 - debug:
     var: ign_contents

+ 3 - 0
roles/openshift_node40/tasks/install.yml

@@ -10,6 +10,9 @@
   delay: 1
   vars:
     l_node_packages:
+    #- "atomic-openshift-node{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
+    #- "atomic-openshift-clients{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
+    #- "atomic-openshift-hyperkube{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
     - "origin-node{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
     - "origin-clients{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
     - "origin-hyperkube{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"

+ 26 - 0
test/libvirt/README.md

@@ -0,0 +1,26 @@
+# Do Not Use
+
+Anything contained in this directory is unsupported and should not be used
+to provision any OpenShift clusters.  Please refer to official documentation
+for supported installation methods.
+
+## How to use
+Don't use it.
+
+clone https://github.com/openshift/aos-ansible/pull/74 to ~/git/aos-ansible
+(Red Hat use only)
+
+Ensure openshift-install and terraform are in your path.
+
+cd to this directory.
+
+source installrc; export variables you want to override.  You'll need to at least
+update what image you want to use unless you have that exact image in that exact
+place.
+
+./deploy.sh
+This will generate install assets (inventory, install-config.yml, tfvars),
+provision instances via terraform and start installation of
+openshift-ansible.
+
+Afterwards, you can cleanup with ./cleanup.sh

+ 5 - 0
test/libvirt/cleanup.sh

@@ -0,0 +1,5 @@
+#!/bin/bash
+rm .openshift* -f
+rm *.ign -f
+cd terraform/
+terraform destroy -auto-approve

+ 10 - 0
test/libvirt/deploy.sh

@@ -0,0 +1,10 @@
+#!/bin/bash
+#. installrc
+set -e
+./generate_assets.sh
+./terraform_provision.sh
+echo "sleeping 20"
+sleep 20
+./rhel_setup.sh
+openshift-install create ignition-configs
+./run_ansible.sh

+ 9 - 0
test/libvirt/generate_assets.sh

@@ -0,0 +1,9 @@
+#!/bin/bash
+# run this file directly via ./generate_assets.sh; sh generate_assets.sh won't work.
+
+./generate_inventory.sh
+cat install-config.yml.template | envsubst > install-config.yml
+# Need to make a copy for ansible to reference because install-config.yml is
+# consumed/deleted when we generate igintion configs.
+cp install-config.yml install-config-ansible.yml
+cat terraform/terraform.tfvars.template | envsubst > terraform/terraform.tfvars

+ 22 - 0
test/libvirt/generate_inventory.sh

@@ -0,0 +1,22 @@
+#!/bin/bash
+
+MASTERS_LIST="${OCP_CLUSTER_NAME}-master-0.${OCP_BASE_DOMAIN}"
+WORKERS_LIST=""
+NEW_LINE_SUB="__new_line__"
+
+
+# Generate masters for inventory
+for (( c=1; c<$OCP_MASTERS; c++ ))
+do
+    MASTERS_LIST="${MASTERS_LIST}${NEW_LINE_SUB}${OCP_CLUSTER_NAME}-master-${c}.${OCP_BASE_DOMAIN}"
+done
+
+# Generate masters for inventory
+for (( c=0; c<$OCP_WORKERS; c++ ))
+do
+    WORKERS_LIST="${WORKERS_LIST}${NEW_LINE_SUB}${OCP_CLUSTER_NAME}-worker-${c}.${OCP_BASE_DOMAIN}"
+done
+export WORKERS_LIST=$WORKERS_LIST
+export MASTERS_LIST=$MASTERS_LIST
+cat inv.txt.template | envsubst > inventory.txt
+sed -i "s/${NEW_LINE_SUB}/\n/g" inventory.txt

+ 10 - 0
test/libvirt/group_vars/nodes.yml

@@ -0,0 +1,10 @@
+---
+openshift_additional_repos:
+  - name: "origin-pr"
+    baseurl: "https://rpms.svc.ci.openshift.org/openshift-origin-v4.0/"
+    enabled: 1
+    gpgcheck: 0
+  - name: "origin-pr-dependencies"
+    baseurl: "https://cbs.centos.org/repos/paas7-openshift-origin311-testing/x86_64/os/"
+    enabled: 1
+    gpgcheck: 0

+ 28 - 0
test/libvirt/install-config.yml.template

@@ -0,0 +1,28 @@
+baseDomain: ${OCP_BASE_DOMAIN}
+clusterID:  ${OCP_CLUSTER_ID}
+machines:
+- name: master
+  replicas: ${OCP_MASTERS}
+- name: worker
+  replicas: ${OCP_WORKERS}
+metadata:
+  name: ${OCP_CLUSTER_NAME}
+networking:
+  clusterNetworks:
+  - cidr:             10.128.0.0/14
+    hostSubnetLength: 9
+  serviceCIDR: 172.30.0.0/16
+  type:        OpenshiftSDN
+platform:
+  libvirt:
+    URI: qemu+tcp://192.168.122.1/system
+    defaultMachinePlatform:
+      image: file:///unused
+    masterIPs: null
+    network:
+      if: ${OCP_NETDEV}
+      ipRange: ${OCP_NETCIDR}
+pullSecret: |
+  ${OCP_PULL_SECRET}
+sshKey: |
+  ${OCP_SSH_PUB_KEY}

+ 21 - 0
test/libvirt/installrc

@@ -0,0 +1,21 @@
+#!/bin/bash
+
+export OCP_BASE_DOMAIN="tt.testing"
+export OCP_CLUSTER_ID=$(uuidgen --random)
+export OCP_MASTERS=1
+export OCP_WORKERS=0
+export OCP_CLUSTER_NAME="byo-dev"
+export OCP_PULL_SECRET=$(cat ~/try.openshift.com.json)
+export OCP_SSH_PUB_KEY=$(cat ~/.ssh/id_rsa.pub)
+export OCP_IMAGE=~/images/rhel-guest-image-7.5-146.x86_64.qcow2
+export OCP_NETDEV=tt0
+export OCP_NETCIDR="192.168.126.0/24"
+# OCP_MASTERS is a list of quoted ips, no trailing comma.  eg:
+# "\"192.168.126.11\", \"192.168.126.12\""
+export OCP_MASTER_IPS=\"192.168.126.11\"
+export OCP_BOOTSTRAP_IP="192.168.126.10"
+export OCP_LIBVIRT_URI='qemu+tcp://192.168.124.1/system'
+export OCP_INSTALL_CONFIG_PATH="$PWD/install-config-ansible.yml"
+export OCP_IGNITION_PATH="$PWD/bootstrap.ign"
+
+export ANSIBLE_HOST_KEY_CHECKING=False

+ 24 - 0
test/libvirt/inv.txt.template

@@ -0,0 +1,24 @@
+[nodes:children]
+bootstrap
+masters
+workers
+
+[nodes:vars]
+ansible_ssh_user=cloud-user
+ansible_become=True
+
+openshift_install_config_path="${OCP_INSTALL_CONFIG_PATH}"
+openshift_deployment_type=origin
+openshift_release=v4.0
+
+[bootstrap]
+${OCP_CLUSTER_NAME}-bootstrap.${OCP_BASE_DOMAIN}
+
+[bootstrap:vars]
+openshift_ignition_file_path="${OCP_IGNITION_PATH}"
+
+[masters]
+${MASTERS_LIST}
+
+[workers]
+${WORKERS_LIST}

+ 8 - 0
test/libvirt/playbooks/files/openshift-local.repo

@@ -0,0 +1,8 @@
+[openshift-local]
+name=openshift-local
+baseurl=file:///root/rpms/
+enabled=1
+metadata_expire=7d
+repo_gpgcheck=0
+type=rpm
+gpgcheck=0

+ 14 - 0
test/libvirt/playbooks/localrepo.yml

@@ -0,0 +1,14 @@
+---
+- hosts: nodes
+  tasks:
+    - command: "{{ ansible_pkg_mgr }} install buildah -y"
+    - name: Transfer the buildah script
+      template:
+        src: buildah_repo.sh
+        dest: /root
+    - name: Execute buildah script
+      command: sh /root/buildah_repo.sh
+    - name: Create local repo file
+      copy:
+        src: openshift-local.repo
+        dest: /etc/yum.repos.d/openshift-local.repo

+ 15 - 0
test/libvirt/playbooks/prep.yml

@@ -0,0 +1,15 @@
+---
+- hosts: nodes
+  tasks:
+    - command: "dnf -y update"
+    - command: "dnf install NetworkManager -y"
+    - name: Start NetworkManager
+      command: systemctl start NetworkManager
+    - name: Enable NetworkManager
+      command: systemctl enable NetworkManager
+    - name: Install docker
+      command: "dnf install docker -y"
+    - name: Start Docker
+      command: systemctl start docker
+    - name: Enable docker
+      command: systemctl enable docker

+ 9 - 0
test/libvirt/playbooks/rhel_prep.yml

@@ -0,0 +1,9 @@
+---
+- hosts: nodes
+  tasks:
+    - command: "yum -y update"
+    - command: "yum install NetworkManager -y"
+    - name: Start NetworkManager
+      command: systemctl start NetworkManager
+    - name: Enable NetworkManager
+      command: systemctl enable NetworkManager

+ 9 - 0
test/libvirt/playbooks/templates/buildah_repo.sh

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+# Execute buildah to scrape rpms from container.
+
+cd /root
+cx=$(buildah from {{ easy_openshift_repo_image }})
+cx_root=$(buildah mount $cx)
+
+cp $cx_root/* . -r

+ 9 - 0
test/libvirt/rhel_setup.sh

@@ -0,0 +1,9 @@
+#!/bin/bash
+set -e
+APB3="`which python3` `which ansible-playbook`"
+WORKDIR=$PWD
+
+# Need system packages.
+$APB3 -i $WORKDIR/inventory.txt ~/git/aos-ansible/playbooks/aws_install_prep.yml
+$APB3 -i $WORKDIR/inventory.txt $WORKDIR/playbooks/rhel_prep.yml -vvv
+#$APB3 -i $WORKDIR/inventory.txt $WORKDIR/playbooks/localrepo.yml -vvv

+ 7 - 0
test/libvirt/run_ansible.sh

@@ -0,0 +1,7 @@
+#!/bin/bash
+set -e
+APB3="`which python3` `which ansible-playbook`"
+WORKDIR=$PWD
+
+cd ../..
+$APB3 -vvv -i $WORKDIR/inventory.txt playbooks/deploy_cluster_40.yml

+ 49 - 0
test/libvirt/terraform/bootstrap/README.md

@@ -0,0 +1,49 @@
+# Bootstrap Module
+
+This [Terraform][] [module][] manages [libvirt][] resources only needed during cluster bootstrapping.
+It uses [implicit provider inheritance][implicit-provider-inheritance] to access the [libvirt provider][libvirt-provider].
+
+## Example
+
+Set up a `main.tf` with:
+
+```hcl
+provider "libvirt" {
+  uri = "qemu:///system"
+}
+
+resource "libvirt_network" "example" {
+  name   = "example"
+  mode   = "none"
+  domain = "example.com"
+  addresses = ["192.168.0.0/24"]
+}
+
+resource "libvirt_volume" "example" {
+  name   = "example"
+  source = "file:///path/to/example.qcow2"
+}
+
+module "bootstrap" {
+  source = "github.com/openshift/installer//data/data/libvirt/bootstrap"
+
+  addresses      = ["192.168.0.1"]
+  base_volume_id = "${libvirt_volume.example.id}"
+  cluster_name   = "my-cluster"
+  ignition       = "{\"ignition\": {\"version\": \"2.2.0\"}}",
+  network_id     = "${libvirt_network.example.id}"
+}
+```
+
+Then run:
+
+```console
+$ terraform init
+$ terraform plan
+```
+
+[libvirt]: https://libvirt.org/
+[libvirt-provider]: https://github.com/dmacvicar/terraform-provider-libvirt
+[implicit-provider-inheritance]: https://www.terraform.io/docs/modules/usage.html#implicit-provider-inheritance
+[module]: https://www.terraform.io/docs/modules/
+[Terraform]: https://www.terraform.io/

+ 40 - 0
test/libvirt/terraform/bootstrap/main.tf

@@ -0,0 +1,40 @@
+resource "libvirt_volume" "bootstrap" {
+  name           = "${var.cluster_name}-bootstrap"
+  base_volume_id = "${var.base_volume_id}"
+}
+
+data "template_file" "user_data" {
+  template = "${file("${path.module}/user-data.tpl")}"
+  vars {
+    ssh_authorized_keys = "${var.ssh_key}"
+  }
+}
+
+resource "libvirt_cloudinit_disk" "bootstrapinit" {
+  name           = "${var.cluster_name}-bs-init.iso"
+  user_data      = "${data.template_file.user_data.rendered}"
+}
+
+resource "libvirt_domain" "bootstrap" {
+  name = "${var.cluster_name}-bootstrap"
+
+  memory = "2048"
+
+  vcpu = "2"
+
+  cloudinit = "${libvirt_cloudinit_disk.bootstrapinit.id}"
+  disk {
+    volume_id = "${libvirt_volume.bootstrap.id}"
+  }
+
+  console {
+    type        = "pty"
+    target_port = 0
+  }
+
+  network_interface {
+    network_id = "${var.network_id}"
+    hostname   = "${var.cluster_name}-bootstrap"
+    addresses  = "${var.addresses}"
+  }
+}

+ 2 - 0
test/libvirt/terraform/bootstrap/meta-data.tpl

@@ -0,0 +1,2 @@
+instance-id: ${instance_id}
+local-hostname: ${instance_id}

+ 8 - 0
test/libvirt/terraform/bootstrap/user-data.tpl

@@ -0,0 +1,8 @@
+#cloud-config
+
+# add any ssh public keys
+ssh_authorized_keys:
+  - "${ssh_authorized_keys}"
+
+runcmd:
+  - "echo done"

+ 25 - 0
test/libvirt/terraform/bootstrap/variables.tf

@@ -0,0 +1,25 @@
+variable "addresses" {
+  type        = "list"
+  default     = []
+  description = "IP addresses to assign to the boostrap node."
+}
+
+variable "base_volume_id" {
+  type        = "string"
+  description = "The ID of the base volume for the bootstrap node."
+}
+
+variable "cluster_name" {
+  type        = "string"
+  description = "The name of the cluster."
+}
+
+variable "network_id" {
+  type        = "string"
+  description = "The ID of a network resource containing the bootstrap node's addresses."
+}
+
+variable "ssh_key" {
+  type        = "string"
+  description = "ssh public key"
+}

+ 62 - 0
test/libvirt/terraform/config.tf

@@ -0,0 +1,62 @@
+terraform {
+  required_version = ">= 0.10.7"
+}
+
+variable "master_count" {
+  type    = "string"
+  default = "1"
+
+  description = <<EOF
+The number of master nodes to be created.
+This applies only to cloud platforms.
+EOF
+}
+
+variable "base_domain" {
+  type = "string"
+
+  description = <<EOF
+The base DNS domain of the cluster. It must NOT contain a trailing period. Some
+DNS providers will automatically add this if necessary.
+
+Example: `openshift.example.com`.
+
+Note: This field MUST be set manually prior to creating the cluster.
+This applies only to cloud platforms.
+EOF
+}
+
+variable "cluster_name" {
+  type = "string"
+
+  description = <<EOF
+The name of the cluster.
+If used in a cloud-environment, this will be prepended to `base_domain` resulting in the URL to the OpenShift console.
+
+Note: This field MUST be set manually prior to creating the cluster.
+EOF
+}
+
+variable "ignition_master" {
+  type    = "string"
+  default = ""
+
+  description = <<EOF
+(internal) Ignition config file contents. This is automatically generated by the installer.
+EOF
+}
+
+variable "ignition_bootstrap" {
+  type    = "string"
+  default = ""
+
+  description = <<EOF
+(internal) Ignition config file contents. This is automatically generated by the installer.
+EOF
+}
+
+// This variable is generated by OpenShift internally. Do not modify
+variable "cluster_id" {
+  type        = "string"
+  description = "(internal) The OpenShift cluster id."
+}

+ 120 - 0
test/libvirt/terraform/main.tf

@@ -0,0 +1,120 @@
+provider "libvirt" {
+  uri = "${var.libvirt_uri}"
+}
+
+module "volume" {
+  source = "./volume"
+
+  cluster_name = "${var.cluster_name}"
+  image        = "${var.os_image}"
+}
+
+module "bootstrap" {
+  source = "./bootstrap"
+
+  addresses      = ["${var.libvirt_bootstrap_ip}"]
+  base_volume_id = "${module.volume.coreos_base_volume_id}"
+  cluster_name   = "${var.cluster_name}"
+  network_id     = "${libvirt_network.net.id}"
+  ssh_key        = "${var.ssh_key}"
+}
+
+resource "libvirt_volume" "master" {
+  count          = "${var.master_count}"
+  name           = "${var.cluster_name}-master-${count.index}"
+  base_volume_id = "${module.volume.coreos_base_volume_id}"
+}
+
+resource "libvirt_network" "net" {
+  name = "${var.cluster_name}"
+
+  mode   = "nat"
+  bridge = "${var.libvirt_network_if}"
+
+  domain = "${var.base_domain}"
+
+  addresses = [
+    "${var.libvirt_ip_range}",
+  ]
+
+  dns = [{
+    local_only = true
+
+    srvs = ["${flatten(list(
+      data.libvirt_network_dns_srv_template.etcd_cluster.*.rendered,
+    ))}"]
+
+    hosts = ["${flatten(list(
+      data.libvirt_network_dns_host_template.bootstrap.*.rendered,
+      data.libvirt_network_dns_host_template.masters.*.rendered,
+      data.libvirt_network_dns_host_template.etcds.*.rendered,
+    ))}"]
+  }]
+
+  autostart = true
+}
+
+data "template_file" "user_data" {
+  template = "${file("${path.module}/user-data.tpl")}"
+  vars {
+    ssh_authorized_keys = "${var.ssh_key}"
+  }
+}
+
+resource "libvirt_cloudinit_disk" "commoninit" {
+  name           = "${var.cluster_name}-master-init.iso"
+  user_data      = "${data.template_file.user_data.rendered}"
+}
+
+resource "libvirt_domain" "master" {
+  count = "${var.master_count}"
+
+  name = "${var.cluster_name}-master-${count.index}"
+
+  memory = "${var.libvirt_master_memory}"
+  vcpu   = "${var.libvirt_master_vcpu}"
+
+  cloudinit = "${libvirt_cloudinit_disk.commoninit.id}"
+  disk {
+    volume_id = "${element(libvirt_volume.master.*.id, count.index)}"
+  }
+
+  console {
+    type        = "pty"
+    target_port = 0
+  }
+
+  network_interface {
+    network_id = "${libvirt_network.net.id}"
+    hostname   = "${var.cluster_name}-master-${count.index}"
+    addresses  = ["${var.libvirt_master_ips[count.index]}"]
+  }
+}
+
+data "libvirt_network_dns_host_template" "bootstrap" {
+  count    = "${var.bootstrap_dns ? 1 : 0}"
+  ip       = "${var.libvirt_bootstrap_ip}"
+  hostname = "${var.cluster_name}-api"
+}
+
+data "libvirt_network_dns_host_template" "masters" {
+  count    = "${var.master_count}"
+  ip       = "${var.libvirt_master_ips[count.index]}"
+  hostname = "${var.cluster_name}-api"
+}
+
+data "libvirt_network_dns_host_template" "etcds" {
+  count    = "${var.master_count}"
+  ip       = "${var.libvirt_master_ips[count.index]}"
+  hostname = "${var.cluster_name}-etcd-${count.index}"
+}
+
+data "libvirt_network_dns_srv_template" "etcd_cluster" {
+  count    = "${var.master_count}"
+  service  = "etcd-server-ssl"
+  protocol = "tcp"
+  domain   = "${var.cluster_name}.${var.base_domain}"
+  port     = 2380
+  weight   = 10
+  target   = "${var.cluster_name}-etcd-${count.index}.${var.base_domain}"
+}

+ 16 - 0
test/libvirt/terraform/terraform.tfvars.template

@@ -0,0 +1,16 @@
+{
+  "cluster_id": "${OCP_CLUSTER_ID}",
+  "cluster_name": "${OCP_CLUSTER_NAME}",
+  "base_domain": "${OCP_BASE_DOMAIN}",
+  "master_count": ${OCP_MASTERS},
+
+"libvirt_uri": "${OCP_LIBVIRT_URI}",
+"os_image": "${OCP_IMAGE}",
+"libvirt_network_if": "${OCP_NETDEV}",
+"libvirt_ip_range": "${OCP_NETCIDR}",
+"libvirt_master_ips": [
+  ${OCP_MASTER_IPS}
+],
+"libvirt_bootstrap_ip": "${OCP_BOOTSTRAP_IP}",
+"ssh_key": "${OCP_SSH_PUB_KEY}"
+}

+ 8 - 0
test/libvirt/terraform/user-data.tpl

@@ -0,0 +1,8 @@
+#cloud-config
+
+# add any ssh public keys
+ssh_authorized_keys:
+  - "${ssh_authorized_keys}"
+
+runcmd:
+  - echo "done"

+ 55 - 0
test/libvirt/terraform/variables-libvirt.tf

@@ -0,0 +1,55 @@
+variable "bootstrap_dns" {
+  default     = true
+  description = "Whether to include DNS entries for the bootstrap node or not."
+}
+
+variable "libvirt_uri" {
+  type        = "string"
+  description = "libvirt connection URI"
+}
+
+variable "libvirt_network_if" {
+  type        = "string"
+  description = "The name of the bridge to use"
+}
+
+variable "libvirt_ip_range" {
+  type        = "string"
+  description = "IP range for the libvirt machines"
+}
+
+variable "os_image" {
+  type        = "string"
+  description = "The URL of the OS disk image"
+}
+
+variable "libvirt_bootstrap_ip" {
+  type        = "string"
+  description = "the desired bootstrap ip"
+}
+
+variable "libvirt_master_ips" {
+  type        = "list"
+  description = "the list of desired master ips. Must match master_count"
+}
+
+# It's definitely recommended to bump this if you can.
+variable "libvirt_master_memory" {
+  type        = "string"
+  description = "RAM in MiB allocated to masters"
+  default     = "4096"
+}
+
+# At some point this one is likely to default to the number
+# of physical cores you have.  See also
+# https://pagure.io/standard-test-roles/pull-request/223
+variable "libvirt_master_vcpu" {
+  type        = "string"
+  description = "CPUs allocated to masters"
+  default     = "2"
+}
+
+variable "ssh_key" {
+  type        = "string"
+  description = "ssh public key"
+}

+ 4 - 0
test/libvirt/terraform/volume/main.tf

@@ -0,0 +1,4 @@
+resource "libvirt_volume" "coreos_base" {
+  name   = "${var.cluster_name}-base"
+  source = "${var.image}"
+}

+ 3 - 0
test/libvirt/terraform/volume/outputs.tf

@@ -0,0 +1,3 @@
+output "coreos_base_volume_id" {
+  value = "${libvirt_volume.coreos_base.id}"
+}

+ 9 - 0
test/libvirt/terraform/volume/variables.tf

@@ -0,0 +1,9 @@
+variable "cluster_name" {
+  type        = "string"
+  description = "The name of the cluster."
+}
+
+variable "image" {
+  description = "The URL of the OS disk image"
+  type        = "string"
+}

+ 5 - 0
test/libvirt/terraform_provision.sh

@@ -0,0 +1,5 @@
+#!/bin/bash
+
+cd terraform
+terraform init
+terraform apply -auto-approve