Browse Source

Rework playbooks to setup 4.0 on GCP

Vadim Rutkovsky 6 years ago
parent
commit
ab0e91ed85
55 changed files with 940 additions and 1381 deletions
  1. 2 2
      images/installer/Dockerfile
  2. 0 6
      images/installer/origin-extra-root/etc/yum.repos.d/centos-ansible26.repo
  3. 1 1
      images/installer/root/usr/local/bin/entrypoint-provider
  4. 3 0
      inventory/dynamic/gcp/group_vars/all/00_defaults.yml
  5. 1 1
      inventory/dynamic/gcp/hosts.sh
  6. 92 0
      playbooks/bootkube.yml
  7. 141 0
      playbooks/deploy_cluster_40.yml
  8. 0 14
      playbooks/gcp/OWNERS
  9. 0 186
      playbooks/gcp/openshift-cluster/build_base_image.yml
  10. 0 36
      playbooks/gcp/openshift-cluster/install.yml
  11. 0 12
      playbooks/gcp/openshift-cluster/install_gcp.yml
  12. 0 23
      playbooks/gcp/openshift-cluster/openshift_node_group.yml
  13. 0 9
      playbooks/gcp/openshift-cluster/publish_image.yml
  14. 0 1
      playbooks/gcp/openshift-cluster/roles
  15. 1 1
      requirements.txt
  16. 5 0
      roles/container_runtime/tasks/crio_firewall.yml
  17. 0 2
      roles/container_runtime/tasks/docker_storage_setup_overlay.yml
  18. 10 5
      roles/lib_utils/action_plugins/parse_ignition.py
  19. 6 5
      roles/lib_utils/test/test_parse_ignition.py
  20. 1 1
      roles/openshift_facts/defaults/main.yml
  21. 98 12
      roles/openshift_gcp/defaults/main.yml
  22. 0 20
      roles/openshift_gcp/tasks/add_custom_repositories.yml
  23. 0 10
      roles/openshift_gcp/tasks/configure_gcp_base_image.yml
  24. 0 40
      roles/openshift_gcp/tasks/configure_master_bootstrap.yml
  25. 6 0
      roles/openshift_gcp/tasks/configure_master_healthcheck.yml
  26. 145 0
      roles/openshift_gcp/tasks/deprovision.yml
  27. 9 2
      roles/openshift_gcp/tasks/dynamic_inventory.yml
  28. 252 37
      roles/openshift_gcp/tasks/main.yml
  29. 4 1
      roles/openshift_gcp/tasks/provision_ssh_keys.yml
  30. 0 32
      roles/openshift_gcp/tasks/publish_image.yml
  31. 93 0
      roles/openshift_gcp/tasks/remove_bootstrap.yml
  32. 15 28
      roles/openshift_gcp/tasks/setup_scale_group_facts.yml
  33. 0 13
      roles/openshift_gcp/templates/dns.j2.sh
  34. 2 2
      roles/openshift_gcp/templates/master_healthcheck.j2
  35. 0 7
      roles/openshift_gcp/templates/openshift-bootstrap-update.j2
  36. 0 304
      roles/openshift_gcp/templates/provision.j2.sh
  37. 0 179
      roles/openshift_gcp/templates/remove.j2.sh
  38. 0 20
      roles/openshift_gcp/templates/yum_repo.j2
  39. 11 3
      roles/openshift_node40/tasks/config.yml
  40. 3 1
      roles/openshift_node40/tasks/create_files_from_ignition.yml
  41. 3 1
      roles/openshift_node40/tasks/systemd.yml
  42. 0 7
      test/ci/README.md
  43. 0 45
      test/ci/deprovision.yml
  44. 0 113
      test/ci/inventory/group_vars/OSEv3/vars.yml
  45. 0 112
      test/ci/launch.yml
  46. 0 26
      test/ci/template-inventory.j2
  47. 0 46
      test/ci/vars.yml.sample
  48. 5 13
      playbooks/gcp/openshift-cluster/build_image.yml
  49. 1 2
      playbooks/gcp/openshift-cluster/deprovision.yml
  50. 29 0
      test/gcp/install.yml
  51. 0 0
      test/gcp/inventory.yml
  52. 0 0
      test/gcp/launch.yml
  53. 0 0
      test/gcp/provision.yml
  54. 1 0
      test/gcp/roles
  55. 0 0
      test/gcp/upgrade.yml

+ 2 - 2
images/installer/Dockerfile

@@ -10,13 +10,13 @@ COPY images/installer/origin-extra-root /
 # install ansible and deps
 RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl python2-passlib httpd-tools openssh-clients origin-clients iproute patch" \
  && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
- && EPEL_PKGS="ansible-2.6.5 python2-boto python2-boto3 python2-crypto which python2-pip.noarch python2-scandir python2-packaging azure-cli-2.0.46" \
+ && EPEL_PKGS="ansible python2-boto python2-boto3 python2-crypto which python2-pip.noarch python2-scandir python2-packaging azure-cli-2.0.46" \
  && yum install -y epel-release \
  && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
  && if [ "$(uname -m)" == "x86_64" ]; then yum install -y https://sdodson.fedorapeople.org/google-cloud-sdk-183.0.0-3.el7.x86_64.rpm ; fi \
  && yum install -y java-1.8.0-openjdk-headless \
  && rpm -V $INSTALL_PKGS $EPEL_PKGS $EPEL_TESTING_PKGS \
- && pip install 'apache-libcloud~=2.2.1' 'SecretStorage<3' 'ansible[azure]' \
+ && pip install 'apache-libcloud~=2.2.1' 'SecretStorage<3' 'ansible[azure]' 'google-auth' \
  && yum clean all
 
 LABEL name="openshift/origin-ansible" \

+ 0 - 6
images/installer/origin-extra-root/etc/yum.repos.d/centos-ansible26.repo

@@ -1,6 +0,0 @@
-
-[centos-ansible26-testing]
-name=CentOS Ansible 2.6 testing repo
-baseurl=https://cbs.centos.org/repos/configmanagement7-ansible-26-testing/x86_64/os/
-enabled=1
-gpgcheck=0

+ 1 - 1
images/installer/root/usr/local/bin/entrypoint-provider

@@ -45,7 +45,7 @@ if [[ -f "${FILES}/ssh-privatekey" ]]; then
   else
     keyfile="${HOME}/.ssh/id_rsa"
   fi
-  mkdir "${HOME}/.ssh"
+  mkdir -p "${HOME}/.ssh"
   rm -f "${keyfile}"
   cat "${FILES}/ssh-privatekey" > "${keyfile}"
   chmod 0600 "${keyfile}"

+ 3 - 0
inventory/dynamic/gcp/group_vars/all/00_defaults.yml

@@ -20,6 +20,9 @@ openshift_master_cluster_hostname: "internal-openshift-master.{{ public_hosted_z
 openshift_master_cluster_public_hostname: "openshift-master.{{ public_hosted_zone }}"
 openshift_master_default_subdomain: "{{ wildcard_zone }}"
 
+mcd_port: 49500
+mcd_endpoint: "{{ openshift_master_cluster_public_hostname }}:{{ mcd_port }}"
+
 # Cloud specific settings
 openshift_cloudprovider_kind: gce
 openshift_hosted_registry_storage_provider: gcs

+ 1 - 1
inventory/dynamic/gcp/hosts.sh

@@ -5,7 +5,7 @@ set -euo pipefail
 # Use a playbook to calculate the inventory dynamically from
 # the provided cluster variables.
 src="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-if ! out="$( ansible-playbook --inventory-file "${src}/none" ${src}/../../../playbooks/gcp/openshift-cluster/inventory.yml 2>&1 )"; then
+if ! out="$( ansible-playbook --inventory-file "${src}/none" ${src}/../../../test/gcp/inventory.yml 2>&1 )"; then
   echo "error: Inventory configuration failed" 1>&2
   echo "$out" 1>&2
   echo "{}"

+ 92 - 0
playbooks/bootkube.yml

@@ -0,0 +1,92 @@
+---
+# Generate config using openshift-installer, set Base Domain to testing.tt
+# Add bootstrap host in [bootstrap] group and set ignition_file
+# Add master host to [masters] group
+# Add worker hosts in [workers] group
+# Make sure bootstrap has <clusterid>-api.<dns base> name
+# Make sure masters have <clusterid>-etcd-<index>.<dns base> name
+
+# FIXME: use dnsmasq to fake DNS entries
+
+- import_playbook: init/main.yml
+  vars:
+    l_install_base_packages: True
+    l_repo_hosts: "all:!all"
+
+# TODO: proper firewalld setup
+# 49500 on bootstrap; 2379, 6443, 10250 on masters, 10250 on workers
+
+- import_playbook: container-runtime/private/setup_storage.yml
+
+- import_playbook: container-runtime/private/config.yml
+
+- name: install nodes
+  hosts: nodes
+  tasks:
+  - import_role:
+      name: openshift_node40
+      tasks_from: install.yml
+
+- name: Config bootstrap node
+  hosts: bootstrap
+  tasks:
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml
+
+- name: Start masters
+  hosts: masters
+  tasks:
+  # TODO Read this from master's ignition file
+  - set_fact:
+      openshift_bootstrap_endpoint: "https://{{ bootstrap }}:49500/config/master?etcd_index={{ index }}"
+    vars:
+      bootstrap: "{{ hostvars[groups['bootstrap'][0]]['ansible_host'] }}"
+      index: "{{ groups['masters'].index(inventory_hostname) }}"
+  - name: Wait for bootstrap endpoint to show up
+    uri:
+      url: "{{ openshift_bootstrap_endpoint }}"
+      validate_certs: false
+    delay: 10
+    retries: 60
+    register: result
+    until:
+    - "'status' in result"
+    - result.status == 200
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - name: Make sure etcd user exists
+    user:
+      name: etcd
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml
+
+- name: Start workers
+  hosts: workers
+  tasks:
+  # TODO Read this from master's ignition file
+  - set_fact:
+      openshift_bootstrap_endpoint: "https://{{ bootstrap }}:49500/config/worker"
+    vars:
+      bootstrap: "{{ hostvars[groups['bootstrap'][0]]['ansible_host'] }}"
+  - name: Wait for bootstrap endpoint to show up
+    uri:
+      url: "{{ openshift_bootstrap_endpoint }}"
+      validate_certs: false
+    delay: 10
+    retries: 60
+    register: result
+    until:
+    - "'status' in result"
+    - result.status == 200
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml

+ 141 - 0
playbooks/deploy_cluster_40.yml

@@ -0,0 +1,141 @@
+---
+- name: run the init
+  import_playbook: init/main.yml
+  vars:
+    l_init_fact_hosts: "nodes"
+    l_openshift_version_set_hosts: "nodes"
+    l_install_base_packages: True
+    l_repo_hosts: "all:!all"
+
+# TODO(michaelgugino): break up the rest of this file into reusable chunks.
+- name: Install nodes
+  hosts: nodes
+  roles:
+  - role: container_runtime
+  tasks:
+  - import_role:
+      name: container_runtime
+      tasks_from: docker_storage_setup_overlay.yml
+  - import_role:
+      name: container_runtime
+      tasks_from: extra_storage_setup.yml
+  - import_role:
+      name: container_runtime
+      tasks_from: package_crio.yml
+  - name: FIXME pause_image
+    ini_file:
+      dest: "/etc/crio/crio.conf"
+      section: crio.image
+      option: pause_image
+      value: '"docker.io/openshift/origin-pod:v4.0"'
+  - name: FIXME restart crio
+    service:
+      name: crio
+      state: restarted
+  - import_role:
+      name: openshift_node40
+      tasks_from: install.yml
+
+- name: Config bootstrap node
+  hosts: bootstrap
+  tasks:
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml
+    vars:
+      excluded_services:
+      - progress.service
+
+- name: Start masters
+  hosts: masters
+  tasks:
+  # This is required for openshift_node40/config.yml
+  - set_fact:
+      openshift_bootstrap_endpoint: "https://{{ mcd_endpoint }}/config/master"
+  - name: Wait for bootstrap endpoint to show up
+    uri:
+      url: "{{ openshift_bootstrap_endpoint }}"
+      validate_certs: false
+    delay: 10
+    retries: 60
+    register: result
+    until:
+    - "'status' in result"
+    - result.status == 200
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - name: Make sure etcd user exists
+    user:
+      name: etcd
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml
+
+- name: Start workers
+  hosts: workers
+  tasks:
+  # This is required for openshift_node40/config.yml
+  - set_fact:
+      openshift_bootstrap_endpoint: "https://{{ mcd_endpoint }}/config/worker"
+  - name: Wait for bootstrap endpoint to show up
+    uri:
+      url: "{{ openshift_bootstrap_endpoint }}"
+      validate_certs: false
+    delay: 10
+    retries: 60
+    register: result
+    until:
+    - "'status' in result"
+    - result.status == 200
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml
+
+- name: Wait for nodes to become ready
+  hosts: bootstrap
+  tasks:
+  - name: Wait for temporary control plane to show up
+    #TODO: Rework with k8s module
+    oc_obj:
+      state: list
+      kind: pod
+      namespace: kube-system
+      kubeconfig: /opt/tectonic/auth/kubeconfig
+    register: control_plane_pods
+    retries: 60
+    delay: 10
+    until:
+    - "'results' in control_plane_pods and 'results' in control_plane_pods.results"
+    - control_plane_pods.results.results[0]['items'] | length > 0
+  - name: Wait for master nodes to show up
+    #TODO: Rework with k8s module
+    oc_obj:
+      state: list
+      kind: node
+      selector: "node-role.kubernetes.io/master"
+      kubeconfig: /opt/tectonic/auth/kubeconfig
+    register: master_nodes
+    retries: 60
+    delay: 10
+    until:
+    - "'results' in master_nodes and 'results' in master_nodes.results"
+    - master_nodes.results.results[0]['items'] | length > 0
+  - name: Wait for bootkube service to finish
+    service_facts: {}
+    #10 mins to complete temp plane
+    retries: 120
+    delay: 5
+    until: "'bootkube.service' not in ansible_facts.services"
+    ignore_errors: true
+  - name: Fetch kubeconfig for test container
+    fetch:
+      src: /opt/tectonic/auth/kubeconfig
+      dest: /tmp/artifacts/installer/auth/kubeconfig
+      flat: yes

+ 0 - 14
playbooks/gcp/OWNERS

@@ -1,14 +0,0 @@
-# approval == this is a good idea /approve
-approvers:
-  - smarterclayton
-  - michaelgugino
-  - mtnbikenc
-  - sdodson
-  - vrutkovs
-# review == this code is good /lgtm
-reviewers:
-  - smarterclayton
-  - michaelgugino
-  - mtnbikenc
-  - sdodson
-  - vrutkovs

+ 0 - 186
playbooks/gcp/openshift-cluster/build_base_image.yml

@@ -1,186 +0,0 @@
----
-# This playbook ensures that a base image is up to date with all of the required settings
-- name: Verify prerequisites for image build
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  tasks:
-  - name: Require openshift_gcp_root_image
-    fail:
-      msg: "A root OS image name or family is required for base image building.  Please ensure `openshift_gcp_root_image` is defined."
-    when: openshift_gcp_root_image is undefined
-
-- name: Provision ssh key
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  tasks:
-  - name: Set up core host GCP configuration
-    import_role:
-      name: openshift_gcp
-      tasks_from: provision_ssh_keys.yml
-
-- name: Launch image build instance
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  tasks:
-  - name: Create the image instance disk
-    gce_pd:
-      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
-      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
-      project_id: "{{ openshift_gcp_project }}"
-      zone: "{{ openshift_gcp_zone }}"
-      name: "{{ openshift_gcp_prefix }}build-image-instance"
-      disk_type: pd-ssd
-      image: "{{ openshift_gcp_root_image }}"
-      size_gb: 10
-      state: present
-
-  - name: Launch the image build instance
-    gce:
-      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
-      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
-      project_id: "{{ openshift_gcp_project }}"
-      zone: "{{ openshift_gcp_zone }}"
-      machine_type: n1-standard-1
-      instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
-      state: present
-      tags:
-      - build-image-instance
-      disk_auto_delete: false
-      disks:
-      - "{{ openshift_gcp_prefix }}build-image-instance"
-    register: gce
-
-  - add_host:
-      hostname: "{{ item.public_ip }}"
-      groupname: build_instance_ips
-    with_items: "{{ gce.instance_data }}"
-
-  - name: Wait for instance to respond to SSH
-    wait_for:
-      delay: 1
-      host: "{{ item.public_ip }}"
-      port: 22
-      state: started
-      timeout: 120
-    with_items: "{{ gce.instance_data }}"
-
-- name: Prepare instance content sources
-  pre_tasks:
-  - set_fact:
-      allow_rhel_subscriptions: "{{ rhsub_skip | default('no', True) | lower in ['no', 'false'] }}"
-  - set_fact:
-      using_rhel_subscriptions: "{{ (deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise'] or ansible_distribution == 'RedHat') and allow_rhel_subscriptions }}"
-  hosts: build_instance_ips
-  roles:
-  - role: rhel_subscribe
-    when: using_rhel_subscriptions
-  - role: openshift_repos
-    vars:
-      openshift_additional_repos: []
-  post_tasks:
-  - name: Add custom repositories
-    include_role:
-      name: openshift_gcp
-      tasks_from: add_custom_repositories.yml
-  - name: Add the Google Cloud repo
-    yum_repository:
-      name: google-cloud
-      description: Google Cloud Compute
-      baseurl: https://packages.cloud.google.com/yum/repos/google-cloud-compute-el7-x86_64
-      gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
-      gpgcheck: yes
-      repo_gpgcheck: yes
-      state: present
-    when: ansible_os_family == "RedHat"
-  - name: Add the jdetiber-qemu-user-static copr repo
-    yum_repository:
-      name: jdetiber-qemu-user-static
-      description: QEMU user static COPR
-      baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/epel-7-$basearch/
-      gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/pubkey.gpg
-      gpgcheck: yes
-      repo_gpgcheck: no
-      state: present
-    when: ansible_os_family == "RedHat"
-  - name: Accept GPG keys for the repos
-    command: yum -q makecache -y --disablerepo='*' --enablerepo='google-cloud,jdetiber-qemu-user-static'
-  - name: Install qemu-user-static
-    package:
-      name: qemu-user-static
-      state: present
-  - name: Disable yum-cron service (installed by Google Cloud by default)
-    systemd:
-      name: yum-cron
-      state: stopped
-      enabled: no
-  - name: Start and enable systemd-binfmt service
-    systemd:
-      name: systemd-binfmt
-      state: started
-      enabled: yes
-
-- name: Build image
-  hosts: build_instance_ips
-  pre_tasks:
-  - name: Set up core host GCP configuration
-    include_role:
-      name: openshift_gcp
-      tasks_from: configure_gcp_base_image.yml
-  roles:
-  - role: os_update_latest
-  post_tasks:
-  - name: Disable all repos on RHEL
-    command: subscription-manager repos --disable="*"
-    when: using_rhel_subscriptions
-  - name: Enable repos for packages on RHEL
-    command: subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms"
-    when: using_rhel_subscriptions
-  - name: Install common image prerequisites
-    package:
-      name: "{{ pkg_list | join(',') }}"
-      state: latest
-    vars:
-      pkg_list:
-      # required by Ansible
-      - PyYAML
-      - google-compute-engine
-      - google-compute-engine-init
-      - google-config
-      - wget
-      - git
-      - net-tools
-      - bind-utils
-      - iptables-services
-      - bridge-utils
-      - bash-completion
-  - name: Clean yum metadata
-    command: yum clean all
-    args:
-      warn: no
-    when: ansible_os_family == "RedHat"
-
-- name: Commit image
-  hosts: localhost
-  connection: local
-  tasks:
-  - name: Terminate the image build instance
-    gce:
-      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
-      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
-      project_id: "{{ openshift_gcp_project }}"
-      zone: "{{ openshift_gcp_zone }}"
-      instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
-      state: absent
-  - name: Save the new image
-    command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_base_image_name | default(openshift_gcp_base_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_base_image }}"
-  - name: Remove the image instance disk
-    gce_pd:
-      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
-      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
-      project_id: "{{ openshift_gcp_project }}"
-      zone: "{{ openshift_gcp_zone }}"
-      name: "{{ openshift_gcp_prefix }}build-image-instance"
-      state: absent

+ 0 - 36
playbooks/gcp/openshift-cluster/install.yml

@@ -1,36 +0,0 @@
-# This playbook installs onto a provisioned cluster
----
-- hosts: localhost
-  connection: local
-  tasks:
-  - name: place all scale groups into Ansible groups
-    include_role:
-      name: openshift_gcp
-      tasks_from: setup_scale_group_facts.yml
-
-- name: run the init
-  import_playbook: ../../init/main.yml
-
-- import_playbook: ../../openshift-checks/private/install.yml
-
-- name: ensure master nodes are ready for bootstrapping
-  import_playbook: ../../openshift-node/private/bootstrap.yml
-
-- name: configure the control plane
-  import_playbook: ../../common/private/control_plane.yml
-
-- name: run the GCP specific post steps
-  import_playbook: install_gcp.yml
-
-- name: install components
-  import_playbook: ../../common/private/components.yml
-
-- name: Copy the kubeconfig, used by CI to determine when the containers are ready
-  hosts: oo_first_master
-  gather_facts: no
-  tasks:
-  - name: Retrieve cluster configuration
-    fetch:
-      src: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
-      dest: "/tmp/"
-      flat: yes

+ 0 - 12
playbooks/gcp/openshift-cluster/install_gcp.yml

@@ -1,12 +0,0 @@
----
-- hosts: masters
-  gather_facts: no
-  tasks:
-  - name: create master health check service
-    include_role:
-      name: openshift_gcp
-      tasks_from: configure_master_healthcheck.yml
-  - name: configure master bootstrap distribution
-    include_role:
-      name: openshift_gcp
-      tasks_from: configure_master_bootstrap.yml

+ 0 - 23
playbooks/gcp/openshift-cluster/openshift_node_group.yml

@@ -1,23 +0,0 @@
-# This playbook installs onto a provisioned cluster
----
-- hosts: localhost
-  connection: local
-  tasks:
-  - name: place all scale groups into Ansible groups
-    include_role:
-      name: openshift_gcp
-      tasks_from: setup_scale_group_facts.yml
-    vars:
-      all_nodes: true
-
-- import_playbook: ../../init/main.yml
-  vars:
-    l_init_fact_hosts: "oo_masters_to_config"
-    l_openshift_version_set_hosts: "all:!all"
-    l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
-
-- name: Setup node-group configmaps
-  hosts: oo_first_master
-  tasks:
-  - import_role:
-      name: openshift_node_group

+ 0 - 9
playbooks/gcp/openshift-cluster/publish_image.yml

@@ -1,9 +0,0 @@
----
-- name: Publish the most recent image
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  tasks:
-  - import_role:
-      name: openshift_gcp
-      tasks_from: publish_image.yml

+ 0 - 1
playbooks/gcp/openshift-cluster/roles

@@ -1 +0,0 @@
-../../../roles

+ 1 - 1
requirements.txt

@@ -1,6 +1,6 @@
 # Versions are pinned to prevent pypi releases arbitrarily breaking
 # tests with new APIs/semantics. We want to update versions deliberately.
-ansible==2.6.5
+ansible==2.7.1
 boto==2.44.0
 click==6.7
 pyOpenSSL==17.5.0

+ 5 - 0
roles/container_runtime/tasks/crio_firewall.yml

@@ -1,6 +1,11 @@
 ---
 - when: r_crio_firewall_enabled | bool and not r_crio_use_firewalld | bool
   block:
+  - name: Make sure iptables-services is installed
+    package:
+      name: iptables-services
+      state: present
+
   - name: Add iptables allow rules
     os_firewall_manage_iptables:
       name: "{{ item.service }}"

+ 0 - 2
roles/container_runtime/tasks/docker_storage_setup_overlay.yml

@@ -6,5 +6,3 @@
     owner: root
     group: root
     mode: 0664
-  when:
-  - container_runtime_docker_storage_type == 'overlay2'

+ 10 - 5
roles/lib_utils/action_plugins/parse_ignition.py

@@ -2,10 +2,9 @@
 
 import base64
 import os
-
-from ansible.plugins.action import ActionBase
-from ansible import errors
+import six
 from six.moves import urllib
+from ansible.plugins.action import ActionBase
 
 
 # pylint: disable=too-many-function-args
@@ -29,7 +28,8 @@ def get_file_data(encoded_contents):
 # pylint: disable=too-many-function-args
 def get_files(files_dict, systemd_dict, dir_list, data):
     """parse data to populate file_dict"""
-    for item in data['storage']['files']:
+    files = data.get('storage', []).get('files', [])
+    for item in files:
         path = item["path"]
         dir_list.add(os.path.dirname(path))
         # remove prefix "data:,"
@@ -41,8 +41,12 @@ def get_files(files_dict, systemd_dict, dir_list, data):
         inode = {"contents": contents, "mode": mode}
         files_dict[path] = inode
     # get the systemd units files we're here
-    for item in data['systemd']['units']:
+    systemd_units = data.get('systemd', []).get('units', [])
+    for item in systemd_units:
         contents = item['contents']
+        if six.PY2:
+            # pylint: disable=redefined-variable-type
+            contents = contents.decode('unicode-escape')
         mode = "0644"
         inode = {"contents": contents, "mode": mode}
         name = item['name']
@@ -53,6 +57,7 @@ def get_files(files_dict, systemd_dict, dir_list, data):
         systemd_dict[name] = enabled
 
 
+# pylint: disable=too-few-public-methods
 class ActionModule(ActionBase):
     """ActionModule for parse_ignition.py"""
 

+ 6 - 5
roles/lib_utils/test/test_parse_ignition.py

@@ -7,9 +7,10 @@ import sys
 
 MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'action_plugins'))
 sys.path.insert(0, MODULE_PATH)
+ASSET_PATH = os.path.realpath(os.path.join(__file__, os.pardir, 'test_data'))
 
 # pylint: disable=import-error,wrong-import-position,missing-docstring
-import parse_ignition # noqa: E402
+import parse_ignition  # noqa: E402
 
 
 def read_ign(path):
@@ -25,7 +26,7 @@ def write_out_files(files_dict):
 
 
 def test_parse_json():
-    ign_data = read_ign('test_data/example.ign.json')
+    ign_data = read_ign(os.path.join(ASSET_PATH, 'example.ign.json'))
     files_dict = {}
     systemd_dict = {}
     dir_list = set()
@@ -36,7 +37,7 @@ def test_parse_json():
 
 
 def test_parse_json_encoded_files():
-    ign_data = read_ign('test_data/bootstrap.ign.json')
+    ign_data = read_ign(os.path.join(ASSET_PATH, 'bootstrap.ign.json'))
     files_dict = {}
     systemd_dict = {}
     dir_list = set()
@@ -44,11 +45,11 @@ def test_parse_json_encoded_files():
     result['files_dict'] = files_dict
     result['systemd_dict'] = systemd_dict
     parse_ignition.get_files(files_dict, systemd_dict, dir_list, ign_data)
-    #print(files_dict['/opt/tectonic/manifests/cluster-config.yaml']['contents'])
+    # print(files_dict['/opt/tectonic/manifests/cluster-config.yaml']['contents'])
 
 
 def parse_json2():
-    ign_data = read_ign('test_data/bs.ign.json')
+    ign_data = read_ign(os.path.join(ASSET_PATH, 'bs.ign.json'))
     files_dict = {}
     systemd_dict = {}
     dir_list = set()

+ 1 - 1
roles/openshift_facts/defaults/main.yml

@@ -136,7 +136,7 @@ openshift_service_type_dict:
   openshift-enterprise: atomic-openshift
 
 openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
-openshift_master_api_port: "8443"
+openshift_master_api_port: "6443"
 openshift_ca_host: "{{ groups.oo_first_master.0 }}"
 openshift_use_openshift_sdn: true
 os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"

+ 98 - 12
roles/openshift_gcp/defaults/main.yml

@@ -31,21 +31,21 @@ openshift_gcp_registry_bucket_name: "{{ openshift_gcp_prefix }}-docker-registry"
 openshift_gcp_master_dns_ttl: 300
 
 openshift_gcp_node_group_config:
-  - name: master
-    suffix: m
-    tags: ocp-master
+  - name: bootstrap
+    suffix: b
+    tags: ocp-bootstrap ocp-node
     machine_type: n1-standard-2
     boot_disk_size: 150
     scale: 1
-  - name: infra
-    suffix: i
-    tags: ocp-infra-node ocp-node
+  - name: master
+    suffix: m
+    tags: ocp-master ocp-node
     machine_type: n1-standard-2
     boot_disk_size: 150
     scale: 1
-  - name: node
+  - name: worker
     suffix: n
-    tags: ocp-node
+    tags: ocp-worker ocp-node
     machine_type: n1-standard-2
     boot_disk_size: 150
     scale: 3
@@ -61,7 +61,93 @@ openshift_gcp_user_data_file: ''
 
 openshift_gcp_multizone: False
 
-openshift_gcp_node_group_mapping:
-  masters: 'node-config-master'
-  infra: 'node-config-infra'
-  compute: 'node-config-compute'
+provision_custom_repositories: []
+
+mcd_port: 49500
+openshift_gcp_kubernetes_api_port: 6443
+openshift_gcp_master_healthcheck_port: 8080
+
+openshift_gcp_firewall_rules:
+  - rule: icmp
+    allowed:
+      - ip_protocol: 'icmp'
+  - rule: ssh-external
+    allowed:
+      - ip_protocol: 'tcp'
+        ports:
+          - '22'
+  - rule: ssh-internal
+    allowed:
+      - ip_protocol: 'tcp'
+        ports:
+          - '22'
+    source_tags:
+      - ssh-bastion
+  - rule: master-internal
+    allowed:
+      - ip_protocol: 'tcp'
+        ports:
+          - '2224'
+          - '2379'
+          - '2380'
+          - '4001'
+          - "{{ openshift_gcp_kubernetes_api_port }}"
+          - "{{ internal_console_port }}"
+          - '8053'
+          - '8444'
+          - "{{ openshift_gcp_master_healthcheck_port }}"
+          - '10250'
+          - '10255'
+          - '24224'
+          - "{{ mcd_port }}"
+      - ip_protocol: 'udp'
+        ports:
+          - '4789'
+          - '5404'
+          - '5405'
+          - '10255'
+          - '24224'
+    source_tags:
+      - ocp
+    target_tags:
+      - ocp-master
+      - ocp-bootstrap
+  - rule: master-external
+    allowed:
+      - ip_protocol: 'tcp'
+        ports:
+          - "{{ openshift_gcp_master_healthcheck_port }}"
+          - "{{ openshift_gcp_kubernetes_api_port }}"
+          - "{{ openshift_master_api_port }}"
+          - "{{ mcd_port }}"
+    target_tags:
+      - ocp-master
+      - ocp-bootstrap
+  - rule: node-internal
+    allowed:
+      - ip_protocol: 'tcp'
+        ports:
+          - '1936'
+          - '10250'
+          - '10255'
+          - '9000-10000'
+      - ip_protocol: 'udp'
+        ports:
+          - '4789'
+          - '10255'
+    source_tags:
+      - ocp
+    target_tags:
+      - ocp-worker
+  - rule: node-external
+    allowed:
+      - ip_protocol: 'tcp'
+        ports:
+          - '80'
+          - '443'
+          - "{{ openshift_node_port_range }}"
+      - ip_protocol: 'udp'
+        ports:
+          - "{{ openshift_node_port_range }}"
+    target_tags:
+      - ocp-worker

+ 0 - 20
roles/openshift_gcp/tasks/add_custom_repositories.yml

@@ -1,20 +0,0 @@
----
-- name: Copy custom repository secrets
-  copy:
-    src: "{{ files_dir }}/{{ item.1.sslclientcert }}"
-    dest: /var/lib/yum/custom_secret_{{ item.0 }}_cert
-  when: item.1.sslclientcert | default(false)
-  with_indexed_items: "{{ provision_custom_repositories }}"
-- name: Copy custom repository secrets
-  copy:
-    src: "{{ files_dir }}/{{ item.1.sslclientkey }}"
-    dest: /var/lib/yum/custom_secret_{{ item.0 }}_key
-  when: item.1.sslclientkey | default(false)
-  with_indexed_items: "{{ provision_custom_repositories }}"
-
-- name: Create any custom repos that are defined
-  template:
-    src: yum_repo.j2
-    dest: /etc/yum.repos.d/provision_custom_repositories.repo
-  when: provision_custom_repositories | length > 0
-  notify: refresh cache

+ 0 - 10
roles/openshift_gcp/tasks/configure_gcp_base_image.yml

@@ -1,10 +0,0 @@
-# GCE instances are starting with xfs AND barrier=1, which is only for extfs.
----
-- name: Remove barrier=1 from XFS fstab entries
-  command: sed -i -e 's/xfs\(.*\)barrier=1/xfs\1/g; s/, / /g' /etc/fstab
-
-- name: Ensure the root filesystem has XFS group quota turned on
-  command: sed -i -e 's/linux16 \(.*\)$/linux16 \1 rootflags=gquota/g' /boot/grub2/grub.cfg
-
-- name: Ensure the root partition grows on startup
-  copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/

+ 0 - 40
roles/openshift_gcp/tasks/configure_master_bootstrap.yml

@@ -1,40 +0,0 @@
-#
-# These tasks configure the instance to periodically update the project metadata with the
-# latest bootstrap kubeconfig from the project metadata. This keeps the project metadata
-# in sync with the cluster's configuration. We then invoke a CSR approve on any nodes that
-# are waiting to join the cluster.
-#
----
-- name: Copy unit service
-  copy:
-    src: openshift-bootstrap-update.timer
-    dest: /etc/systemd/system/openshift-bootstrap-update.timer
-    owner: root
-    group: root
-    mode: 0664
-
-- name: Copy unit timer
-  copy:
-    src: openshift-bootstrap-update.service
-    dest: /etc/systemd/system/openshift-bootstrap-update.service
-    owner: root
-    group: root
-    mode: 0664
-
-- name: Create bootstrap update script
-  template: src=openshift-bootstrap-update.j2 dest=/usr/bin/openshift-bootstrap-update mode=u+rx
-
-- name: Start bootstrap update timer
-  systemd:
-    name: "openshift-bootstrap-update.timer"
-    state: started
-
-- name: Approve node certificates when bootstrapping
-  oc_csr_approve:
-    oc_bin: "{{ hostvars[groups.masters.0]['first_master_client_binary'] }}"
-    oc_conf: "{{ hostvars[groups.masters.0].openshift.common.config_base }}/master/admin.kubeconfig"
-    node_list: "{{ groups['all'] | map('extract', hostvars) | selectattr('gce_metadata.bootstrap', 'match', 'true') | map(attribute='gce_name') | list }}"
-  register: gcp_csr_approve
-  retries: 30
-  until: gcp_csr_approve is succeeded
-  when: groups['all'] | map('extract', hostvars) | selectattr('gce_metadata.bootstrap', 'match', 'true') | map(attribute='gce_name') | list | length > 0

+ 6 - 0
roles/openshift_gcp/tasks/configure_master_healthcheck.yml

@@ -24,3 +24,9 @@
     name: haproxy
     state: started
     enabled: yes
+
+- name: allow haproxy to connect to any port
+  seboolean:
+    name: haproxy_connect_any
+    state: yes
+    persistent: yes

+ 145 - 0
roles/openshift_gcp/tasks/deprovision.yml

@@ -0,0 +1,145 @@
+---
+- name: Fetch instance group managers
+  gcp_compute_instance_group_manager_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    filters:
+    - "name : {{ openshift_gcp_prefix }}ig*"
+  register: instance_group_managers
+
+- name: Fetch instance templates
+  gcp_compute_instance_template_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    filters:
+    - "name : {{ openshift_gcp_prefix }}instance-template*"
+  register: instance_templates
+
+- name: Collect a list of instances
+  gcp_compute_instance_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+  register: all_instances
+
+- name: Filter instances to fetch masters
+  set_fact:
+    master_instances: "{{ master_instances | default([]) }} + [ {{ item }} ]"
+  with_items:
+  - "{{ all_instances['items'] }}"
+  when:
+  - "'tags' in item"
+  - "'items' in item['tags']"
+  - "cluster_tag in item['tags']['items']"
+  - "'ocp-master' in item['tags']['items']"
+  vars:
+    cluster_tag: "{{ openshift_gcp_prefix }}ocp"
+
+- name: Get managed zone
+  gcp_dns_managed_zone:
+    auth_kind: serviceaccount
+    scopes:
+      - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
+    state: present
+  register: managed_zone
+
+- name: Remove public API hostname
+  gcp_dns_resource_record_set:
+    auth_kind: serviceaccount
+    scopes:
+      - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_master_cluster_public_hostname }}."
+    managed_zone: "{{ managed_zone }}"
+    type: A
+    state: absent
+
+- name: Remove etcd discovery record
+  gcp_dns_resource_record_set:
+    auth_kind: serviceaccount
+    scopes:
+      - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "_etcd-server-ssl._tcp.{{ lookup('env', 'INSTANCE_PREFIX') | mandatory }}.{{ public_hosted_zone }}."
+    managed_zone: "{{ managed_zone }}"
+    type: SRV
+    state: absent
+
+- name: Remove etcd records for masters
+  gcp_dns_resource_record_set:
+    auth_kind: serviceaccount
+    scopes:
+      - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ entry_name }}"
+    managed_zone: "{{ managed_zone }}"
+    type: A
+    state: absent
+  with_indexed_items: "{{ master_instances }}"
+  when: master_instances is defined
+  vars:
+    entry_name: "{{ openshift_gcp_prefix }}etcd-{{ item.0 }}.{{ public_hosted_zone }}."
+
+- name: Remove GCP Instance Groups
+  gcp_compute_instance_group_manager:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    name: "{{ item[0].name }}"
+    base_instance_name: "{{ item[0].name }}"
+    instance_template: "{{ item[1] }}"
+    state: absent
+  with_nested:
+  - "{{ instance_group_managers['items'] }}"
+  - "{{ instance_templates['items'] }}"
+
+- name: Remove GCP instance templates
+  gcp_compute_instance_template:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ item.name }}"
+    state: absent
+  with_items: "{{ instance_templates['items'] }}"
+
+- name: Remove GCP firewall
+  gcp_compute_firewall:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_gcp_prefix }}{{ item.rule }}"
+    state: absent
+  with_items: "{{ openshift_gcp_firewall_rules }}"
+
+- name: Remove GCP network
+  gcp_compute_network:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_gcp_network_name }}"
+    state: absent

+ 9 - 2
roles/openshift_gcp/tasks/dynamic_inventory.yml

@@ -1,5 +1,12 @@
 ---
 - name: Extract PEM from service account file
-  copy: content="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).private_key }}" dest=/tmp/gce.pem mode=0600
+  copy:
+    content: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).private_key }}"
+    dest: /tmp/gce.pem
+    mode: 0600
+
 - name: Templatize environment script
-  template: src=inventory.j2.sh dest=/tmp/inventory.sh mode=u+rx
+  template:
+    src: inventory.j2.sh
+    dest: /tmp/inventory.sh
+    mode: u+rx

+ 252 - 37
roles/openshift_gcp/tasks/main.yml

@@ -1,45 +1,260 @@
-#
-# This role relies on gcloud invoked via templated bash in order to
-# provide a high performance deployment option. The next logical step
-# is to transition to a deployment manager template which is then instantiated.
-# TODO: use a formal set of role parameters consistent with openshift_aws
-#
 ---
-- name: Templatize DNS script
-  template: src=dns.j2.sh dest=/tmp/openshift_gcp_provision_dns.sh mode=u+rx
-- name: Templatize provision script
-  template: src=provision.j2.sh dest=/tmp/openshift_gcp_provision.sh mode=u+rx
-- name: Templatize de-provision script
-  template: src=remove.j2.sh dest=/tmp/openshift_gcp_provision_remove.sh mode=u+rx
-  when:
-  - state | default('present') == 'absent'
+- name: Create GCP network
+  gcp_compute_network:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_gcp_network_name }}"
+    state: present
+  register: network
 
-- name: Provision GCP DNS domain
-  command: /tmp/openshift_gcp_provision_dns.sh
-  args:
-    chdir: "{{ files_dir }}"
-  register: dns_provision
-  when:
-  - state | default('present') == 'present'
-
-- name: Ensure that DNS resolves to the hosted zone
-  assert:
-    that:
-    - "lookup('dig', public_hosted_zone, 'qtype=NS', wantlist=True) | sort | join(',') == dns_provision.stdout"
-    msg: "The DNS domain {{ public_hosted_zone }} defined in 'public_hosted_zone' must have NS records pointing to the Google nameservers: '{{ dns_provision.stdout }}' instead of '{{ lookup('dig', public_hosted_zone, 'qtype=NS') }}'."
-  when:
-  - state | default('present') == 'present'
+- name: Create GCP firewall
+  gcp_compute_firewall:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_gcp_prefix }}{{ item.rule }}"
+    allowed: "{{ item.allowed }}"
+    network: "{{ network.selfLink }}"
+    target_tags: "{{ item.target_tags | default(omit) }}"
+    source_tags: "{{ item.source_tags | default(omit) }}"
+    state: present
+  with_items: "{{ openshift_gcp_firewall_rules }}"
 
 - import_tasks: provision_ssh_keys.yml
 
-- name: Provision GCP resources
-  command: /tmp/openshift_gcp_provision.sh
-  args:
-    chdir: "{{ files_dir }}"
+- name: Find GCP image
+  gcp_compute_image_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    filters:
+    - "family = {{ openshift_gcp_image }}"
+  register: gcp_node_image
+
+- fail:
+    msg: "No images for family '{{ openshift_gcp_image }}' found"
+  when: gcp_node_image['items'] | length == 0
+
+- name: Provision GCP instance templates
+  gcp_compute_instance_template:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_gcp_prefix }}instance-template-{{ item.name }}"
+    properties:
+      machine_type: "{{ item.machine_type }}"
+      network_interfaces:
+      - network: "{{ network }}"
+        access_configs:
+        - name: "{{ openshift_gcp_prefix }}instance-template-{{ item.name }}-config"
+          type: 'ONE_TO_ONE_NAT'
+      disks:
+      - auto_delete: true
+        boot: true
+        initialize_params:
+          disk_size_gb: "{{ item.boot_disk_size }}"
+          source_image: "{{ gcp_node_image['items'][0].selfLink }}"
+      metadata:
+        "cluster-id": "{{ openshift_gcp_prefix + openshift_gcp_clusterid }}"
+        "node-group": "{{ item.name }}"
+      tags:
+        items:
+        - "ocp"
+        - "{{ openshift_gcp_prefix }}ocp"
+        - "{{ item.tags }}"
+    state: present
+  with_items: "{{ openshift_gcp_node_group_config }}"
+  register: instance_template
+
+- name: Create GCP Instance Groups
+  gcp_compute_instance_group_manager:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    name: "{{ openshift_gcp_prefix }}ig-{{ item.item.suffix }}"
+    base_instance_name: "{{ openshift_gcp_prefix }}ig-{{ item.item.suffix }}"
+    instance_template: "{{ item }}"
+    target_size: "{{ item.item.scale | int}}"
+    named_ports:
+    - name: "{{ openshift_gcp_prefix }}port-kube-api"
+      port: "{{ openshift_gcp_kubernetes_api_port }}"
+    - name: "{{ openshift_gcp_prefix }}port-openshift-api"
+      port: "{{ openshift_master_api_port }}"
+    state: present
+  with_items: "{{ instance_template.results }}"
+  register: instance_groups
+
+- name: Get bootstrap instance group
+  gcp_compute_instance_group_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    filters:
+    - name = "{{ openshift_gcp_prefix }}ig-b"
+  register: bootstrap_instance_group
+
+- name: Get master instance group
+  gcp_compute_instance_group_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    filters:
+    - name = "{{ openshift_gcp_prefix }}ig-m"
+  register: master_instance_group
+
+- set_fact:
+    bootstrap_instance_group: "{{ bootstrap_instance_group['items'][0] }}"
+    master_instance_group: "{{ master_instance_group['items'][0] }}"
+
+- name: Wait for bootstrap instance group to start all instances
+  gcp_compute_instance_group_manager_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    filters: "name = {{ bootstrap_instance_group['name'] }}"
+  register: bootstrap_group_result
+  # Wait for 3 minutes
+  retries: 36
+  delay: 5
+  until:
+  - "bootstrap_group_result['items'][0]['currentActions']['none'] == bootstrap_group_result['items'][0]['targetSize']"
+
+- name: Wait for master instance group to start all instances
+  gcp_compute_instance_group_manager_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    filters: "name = {{ master_instance_group['name'] }}"
+  register: master_group_result
+  # Wait for 3 minutes
+  retries: 36
+  delay: 5
+  until:
+  - "master_group_result['items'][0]['currentActions']['none'] == master_group_result['items'][0]['targetSize']"
+
+- name: Collect a list of instances
+  gcp_compute_instance_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+  register: all_instances
+
+- name: Filter instances to fetch bootstrap
+  set_fact:
+    bootstrap_instances: "{{ item }}"
+  with_items:
+  - "{{ all_instances['items'] }}"
   when:
-  - state | default('present') == 'present'
+  - "'tags' in item"
+  - "'items' in item['tags']"
+  - "cluster_tag in item['tags']['items']"
+  - "'ocp-bootstrap' in item['tags']['items']"
+  vars:
+    cluster_tag: "{{ openshift_gcp_prefix }}ocp"
 
-- name: De-provision GCP resources
-  command: /tmp/openshift_gcp_provision_remove.sh
+- name: Filter instances to fetch masters
+  set_fact:
+    master_instances: "{{ master_instances | default([]) }} + [ {{ item }} ]"
+  with_items:
+  - "{{ all_instances['items'] }}"
   when:
-  - state | default('present') == 'absent'
+  - "'tags' in item"
+  - "'items' in item['tags']"
+  - "cluster_tag in item['tags']['items']"
+  - "'ocp-master' in item['tags']['items']"
+  vars:
+    cluster_tag: "{{ openshift_gcp_prefix }}ocp"
+
+- set_fact:
+    etcd_discovery_targets: "{{ etcd_discovery_targets | default([]) }} + ['0 0 2380 {{ entry_name }}']"
+    master_external_ips: "{{ master_external_ips | default([]) }} + ['{{ master_ip }}']"
+  with_indexed_items: "{{ master_instances }}"
+  vars:
+    entry_name: "{{ openshift_gcp_prefix }}etcd-{{ item.0 }}.{{ public_hosted_zone }}."
+    master_ip: "{{ item.1.networkInterfaces[0].accessConfigs[0].natIP }}"
+
+- set_fact:
+    bootstrap_and_masters: "{{ master_external_ips | list }} + ['{{ bootstrap_instances.networkInterfaces[0].accessConfigs[0].natIP }}']"
+
+- name: Get managed zone
+  gcp_dns_managed_zone:
+    auth_kind: serviceaccount
+    scopes:
+      - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
+    state: present
+  register: managed_zone
+
+- name: Create public API hostname
+  gcp_dns_resource_record_set:
+    auth_kind: serviceaccount
+    scopes:
+      - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_master_cluster_public_hostname }}."
+    managed_zone: "{{ managed_zone }}"
+    type: A
+    ttl: 600
+    target: "{{ bootstrap_and_masters }}"
+    state: present
+
+- name: Create etcd records for masters
+  gcp_dns_resource_record_set:
+    auth_kind: serviceaccount
+    scopes:
+      - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ entry_name }}"
+    managed_zone: "{{ managed_zone }}"
+    type: A
+    target: "{{ master_ip }}"
+    state: present
+  with_indexed_items: "{{ master_instances }}"
+  vars:
+    entry_name: "{{ openshift_gcp_prefix }}etcd-{{ item.0 }}.{{ public_hosted_zone }}."
+    master_ip: "{{ item.1.networkInterfaces[0].networkIP }}"
+
+- name: Create etcd discovery entry
+  gcp_dns_resource_record_set:
+    auth_kind: serviceaccount
+    scopes:
+      - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "_etcd-server-ssl._tcp.{{ lookup('env', 'INSTANCE_PREFIX') | mandatory }}.{{ public_hosted_zone }}."
+    managed_zone: "{{ managed_zone }}"
+    type: SRV
+    ttl: 600
+    target: "{{ etcd_discovery_targets }}"
+    state: present

+ 4 - 1
roles/openshift_gcp/tasks/provision_ssh_keys.yml

@@ -1,6 +1,9 @@
 ---
 - name: Templatize SSH key provision script
-  template: src=provision_ssh.j2.sh dest=/tmp/openshift_gcp_provision_ssh.sh mode=u+rx
+  template:
+    src: provision_ssh.j2.sh
+    dest: /tmp/openshift_gcp_provision_ssh.sh
+    mode: u+rx
 
 - name: Provision GCP SSH key resources
   command: /tmp/openshift_gcp_provision_ssh.sh

+ 0 - 32
roles/openshift_gcp/tasks/publish_image.yml

@@ -1,32 +0,0 @@
----
-- name: Require openshift_gcp_image
-  fail:
-    msg: "A source image name or family is required for image publishing.  Please ensure `openshift_gcp_image` is defined."
-  when: openshift_gcp_image is undefined
-
-- name: Require openshift_gcp_target_image
-  fail:
-    msg: "A target image name or family is required for image publishing.  Please ensure `openshift_gcp_target_image` is defined."
-  when: openshift_gcp_target_image is undefined
-
-- block:
-  - name: Retrieve images in the {{ openshift_gcp_target_image }} family
-    command: >
-      gcloud --project "{{ openshift_gcp_project }}" compute images list
-        "--filter=family={{ openshift_gcp_target_image }}"
-        --format=json --sort-by ~creationTimestamp
-    register: images
-  - name: Prune oldest images
-    command: >
-      gcloud --project "{{ openshift_gcp_project }}" compute images delete "{{ item['name'] }}"
-    with_items: "{{ (images.stdout | default('[]') | from_json )[( openshift_gcp_keep_images | int ):] }}"
-  when: openshift_gcp_keep_images is defined
-
-- name: Copy the latest image in the family {{ openshift_gcp_image }} to {{ openshift_gcp_target_image }}
-  command: >
-    gcloud --project "{{ openshift_gcp_target_project | default(openshift_gcp_project) }}"
-      beta compute images create
-      "{{ openshift_gcp_target_image_name | default(openshift_gcp_target_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}"
-      --family "{{ openshift_gcp_target_image }}"
-      --source-image-family "{{ openshift_gcp_image }}"
-      --source-image-project "{{ openshift_gcp_project }}"

+ 93 - 0
roles/openshift_gcp/tasks/remove_bootstrap.yml

@@ -0,0 +1,93 @@
+---
+- name: Get bootstrap instance group
+  gcp_compute_instance_group_manager_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    filters:
+    - name = "{{ openshift_gcp_prefix }}ig-b"
+  register: bootstrap_instance_group
+
+- name: Get bootstrap instance template
+  gcp_compute_instance_template_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    filters:
+    - "name : {{ openshift_gcp_prefix }}instance-template-bootstrap"
+  register: bootstrap_instance_template
+
+- name: Collect a list of instances
+  gcp_compute_instance_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+  register: all_instances
+
+- name: Filter instances to fetch masters
+  set_fact:
+    master_instances: "{{ master_instances | default([]) }} + [ {{ item }} ]"
+  with_items:
+  - "{{ all_instances['items'] }}"
+  when:
+  - "'tags' in item"
+  - "'items' in item['tags']"
+  - "cluster_tag in item['tags']['items']"
+  - "'ocp-master' in item['tags']['items']"
+  vars:
+    cluster_tag: "{{ openshift_gcp_prefix }}ocp"
+
+- set_fact:
+    master_external_ips: "{{ master_external_ips | default([]) }}  + [ '{{ master_ip }}' ]"
+  with_indexed_items: "{{ master_instances }}"
+  vars:
+    master_ip: "{{ item.1.networkInterfaces[0].accessConfigs[0].natIP }}"
+
+- name: Get a managed zone
+  gcp_dns_managed_zone:
+    auth_kind: serviceaccount
+    scopes:
+      - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
+    state: present
+  register: managed_zone
+
+- name: Update public API hostname
+  gcp_dns_resource_record_set:
+    auth_kind: serviceaccount
+    scopes:
+      - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_master_cluster_public_hostname }}."
+    managed_zone: "{{ managed_zone }}"
+    type: A
+    ttl: 600
+    target: "{{ master_external_ips }}"
+    state: present
+
+- name: Delete bootstrap instance group
+  gcp_compute_instance_group_manager:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    name: "{{ bootstrap_instance_group['items'][0]['name'] }}"
+    base_instance_name: "{{ bootstrap_instance_group['items'][0]['baseInstanceName'] }}"
+    instance_template: "{{ bootstrap_instance_template['items'][0] }}"
+    state: absent
+  when:
+  - bootstrap_instance_group['items'] | length > 0
+  - bootstrap_instance_template['items'] | length > 0

+ 15 - 28
roles/openshift_gcp/tasks/setup_scale_group_facts.yml

@@ -1,38 +1,25 @@
 ---
-- name: Set var to exclude bootstrapped nodes
-  set_fact:
-    bootstrapped_nodes: "{{ all_nodes | default(false) | ternary([], groups['tag_ocp-bootstrap']) | default([]) }}"
-
-- name: Add node instances to node group
+- name: Add bootstrap instances
   add_host:
     name: "{{ hostvars[item].gce_name }}"
-    groups: nodes, new_nodes
-    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['compute'] }}"
-  with_items: "{{ groups['tag_ocp-node'] | default([]) | difference(bootstrapped_nodes) }}"
-
-- name: Add bootstrap node instances as nodes
-  add_host:
-    name: "{{ item }}"
-    groups: nodes, new_nodes
+    groups:
+    - bootstrap
+    - nodes
+    ignition_file: "{{ openshift_bootstrap_ignition_file }}"
   with_items: "{{ groups['tag_ocp-bootstrap'] | default([]) }}"
-  when: all_nodes | default(False)
-
-- name: Add non-bootstrapping master node instances to node group
-  add_host:
-    name: "{{ hostvars[item].gce_name }}"
-    groups: nodes
-  with_items: "{{ groups['tag_ocp-master'] | default([]) | difference(bootstrapped_nodes) }}"
 
-- name: Add infra node instances to node group
+- name: Add master instances
   add_host:
     name: "{{ hostvars[item].gce_name }}"
-    groups: nodes, new_nodes
-    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['infra'] }}"
-  with_items: "{{ groups['tag_ocp-infra-node'] | default([]) | difference(bootstrapped_nodes) }}"
+    groups:
+    - masters
+    - nodes
+  with_items: "{{ groups['tag_ocp-master'] | default([]) }}"
 
-- name: Add masters to requisite groups
+- name: Add worker instances
   add_host:
     name: "{{ hostvars[item].gce_name }}"
-    groups: masters, etcd
-    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['masters'] }}"
-  with_items: "{{ groups['tag_ocp-master'] }}"
+    groups:
+    - workers
+    - nodes
+  with_items: "{{ groups['tag_ocp-worker'] | default([]) }}"

+ 0 - 13
roles/openshift_gcp/templates/dns.j2.sh

@@ -1,13 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
-
-# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist
-if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null
-fi
-
-# Always output the expected nameservers as a comma delimited list
-gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ','

+ 2 - 2
roles/openshift_gcp/templates/master_healthcheck.j2

@@ -60,9 +60,9 @@ defaults
 #---------------------------------------------------------------------
 # main frontend which proxys to the backends
 #---------------------------------------------------------------------
-frontend  http-proxy *:8080
+frontend  http-proxy *:{{ openshift_gcp_master_healthcheck_port }}
     acl          url_healthz  path_beg  -i /healthz
     use_backend  ocp          if url_healthz
 
 backend ocp
-    server       ocp localhost:{{ internal_console_port }} ssl verify none
+    server       ocp localhost:{{ openshift_gcp_kubernetes_api_port }} ssl verify none

+ 0 - 7
roles/openshift_gcp/templates/openshift-bootstrap-update.j2

@@ -1,7 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-oc serviceaccounts create-kubeconfig -n openshift-infra node-bootstrapper > /root/bootstrap.kubeconfig
-gcloud compute project-info --project '{{ openshift_gcp_project }}' add-metadata --metadata-from-file '{{ openshift_gcp_prefix + openshift_gcp_clusterid | default("default") }}-bootstrap-config=/root/bootstrap.kubeconfig'
-rm -f /root/bootstrap.kubeconfig

+ 0 - 304
roles/openshift_gcp/templates/provision.j2.sh

@@ -1,304 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-metadata=""
-if [[ -n "{{ openshift_gcp_startup_script_file }}" ]]; then
-    if [[ ! -f "{{ openshift_gcp_startup_script_file }}" ]]; then
-        echo "Startup script file missing at {{ openshift_gcp_startup_script_file }} from=$(pwd)"
-        exit 1
-    fi
-    metadata+="--metadata-from-file=startup-script={{ openshift_gcp_startup_script_file }}"
-fi
-if [[ -n "{{ openshift_gcp_user_data_file }}" ]]; then
-    if [[ ! -f "{{ openshift_gcp_user_data_file }}" ]]; then
-        echo "User data file missing at {{ openshift_gcp_user_data_file }}"
-        exit 1
-    fi
-    if [[ -n "${metadata}" ]]; then
-        metadata+=","
-    else
-        metadata="--metadata-from-file="
-    fi
-    metadata+="user-data={{ openshift_gcp_user_data_file }}"
-fi
-
-# Select image or image family
-image="{{ openshift_gcp_image }}"
-if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe "${image}" &>/dev/null; then
-    if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe-from-family "${image}" &>/dev/null; then
-        echo "No compute image or image-family found, create an image named '{{ openshift_gcp_image }}' to continue'"
-        exit 1
-    fi
-    image="family/${image}"
-fi
-
-### PROVISION THE INFRASTRUCTURE ###
-
-dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
-
-# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers
-if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
-    echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script"
-    exit 1
-fi
-
-# Create network
-if ! gcloud --project "{{ openshift_gcp_project }}" compute networks describe "{{ openshift_gcp_network_name }}" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute networks create "{{ openshift_gcp_network_name }}" --mode "auto"
-else
-    echo "Network '{{ openshift_gcp_network_name }}' already exists"
-fi
-
-# Firewall rules in a form:
-# ['name']='parameters for "gcloud compute firewall-rules create"'
-# For all possible parameters see: gcloud compute firewall-rules create --help
-range=""
-if [[ -n "{{ openshift_node_port_range }}" ]]; then
-    range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}"
-fi
-declare -A FW_RULES=(
-  ['icmp']='--allow icmp'
-  ['ssh-external']='--allow tcp:22'
-  ['ssh-internal']='--allow tcp:22 --source-tags bastion'
-  ['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master"
-  ['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master"
-  ['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255,tcp:9000-10000 --source-tags ocp --target-tags ocp-node,ocp-infra-node"
-  ['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node"
-  ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node"
-)
-for rule in "${!FW_RULES[@]}"; do
-    ( if ! gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then
-        gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules create "{{ openshift_gcp_prefix }}$rule" --network "{{ openshift_gcp_network_name }}" ${FW_RULES[$rule]}
-    else
-        echo "Firewall rule '{{ openshift_gcp_prefix }}${rule}' already exists"
-    fi ) &
-done
-
-
-# Master IP
-( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global
-else
-    echo "IP '{{ openshift_gcp_prefix }}master-ssl-lb-ip' already exists"
-fi ) &
-
-# Internal master IP
-( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}"
-else
-    echo "IP '{{ openshift_gcp_prefix }}master-network-lb-ip' already exists"
-fi ) &
-
-# Router IP
-( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}"
-else
-    echo "IP '{{ openshift_gcp_prefix }}router-network-lb-ip' already exists"
-fi ) &
-
-
-{% for node_group in openshift_gcp_node_group_config %}
-# configure {{ node_group.name }}
-(
-    if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-templates describe "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
-        gcloud --project "{{ openshift_gcp_project }}" compute instance-templates create "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" \
-                --machine-type "{{ node_group.machine_type }}" --network "{{ openshift_gcp_network_name }}" \
-                --tags "{{ openshift_gcp_prefix }}ocp,ocp,ocp-bootstrap,{{ node_group.tags }}" \
-                --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
-                --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
-                --image "{{ node_group.image | default('${image}') }}" ${metadata}  \
-                --metadata "bootstrap={{ node_group.bootstrap | default(False) | bool | to_json }},cluster-id={{ openshift_gcp_prefix + openshift_gcp_clusterid }},node-group={{ node_group.name }}"
-    else
-        echo "Instance template '{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}' already exists"
-    fi
-
-    # Create instance group
-    if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed describe "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" &>/dev/null; then
-        gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed create "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" \
-                --zone "{{ openshift_gcp_zone }}" --template "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
-    else
-        echo "Instance group '{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}' already exists"
-    fi
-) &
-{% endfor %}
-
-for i in `jobs -p`; do wait $i; done
-
-
-# Configure the master external LB rules
-(
-# Master health check
-if ! gcloud --project "{{ openshift_gcp_project }}" compute health-checks describe "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute health-checks create https "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
-else
-    echo "Health check '{{ openshift_gcp_prefix }}master-ssl-lb-health-check' already exists"
-fi
-
-gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-named-ports "{{ openshift_gcp_prefix }}ig-m" \
-        --zone "{{ openshift_gcp_zone }}" --named-ports "{{ openshift_gcp_prefix }}port-name-master:{{ internal_console_port }}"
-
-# Master backend service
-if ! gcloud --project "{{ openshift_gcp_project }}" compute backend-services describe "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute backend-services create "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --health-checks "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port-name "{{ openshift_gcp_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ openshift_gcp_master_lb_timeout }}"
-    gcloud --project "{{ openshift_gcp_project }}" compute backend-services add-backend "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --instance-group "{{ openshift_gcp_prefix }}ig-m" --global --instance-group-zone "{{ openshift_gcp_zone }}"
-else
-    echo "Backend service '{{ openshift_gcp_prefix }}master-ssl-lb-backend' already exists"
-fi
-
-# Master tcp proxy target
-if ! gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies describe "{{ openshift_gcp_prefix }}master-ssl-lb-target" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies create "{{ openshift_gcp_prefix }}master-ssl-lb-target" --backend-service "{{ openshift_gcp_prefix }}master-ssl-lb-backend"
-else
-    echo "Proxy target '{{ openshift_gcp_prefix }}master-ssl-lb-target' already exists"
-fi
-
-# Master forwarding rule
-if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
-    IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
-    gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ openshift_gcp_prefix }}master-ssl-lb-target"
-else
-    echo "Forwarding rule '{{ openshift_gcp_prefix }}master-ssl-lb-rule' already exists"
-fi
-) &
-
-
-# Configure the master internal LB rules
-(
-# Internal master health check
-if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}master-network-lb-health-check" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
-else
-    echo "Health check '{{ openshift_gcp_prefix }}master-network-lb-health-check' already exists"
-fi
-
-# Internal master target pool
-if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}master-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}master-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}master-network-lb-health-check" --region "{{ openshift_gcp_region }}"
-else
-    echo "Target pool '{{ openshift_gcp_prefix }}master-network-lb-pool' already exists"
-fi
-
-# Internal master forwarding rule
-if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
-    IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
-    gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}master-network-lb-pool"
-else
-    echo "Forwarding rule '{{ openshift_gcp_prefix }}master-network-lb-rule' already exists"
-fi
-) &
-
-
-# Configure the infra node rules
-(
-# Router health check
-if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}router-network-lb-health-check" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
-else
-    echo "Health check '{{ openshift_gcp_prefix }}router-network-lb-health-check' already exists"
-fi
-
-# Router target pool
-if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}router-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}router-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}router-network-lb-health-check" --region "{{ openshift_gcp_region }}"
-else
-    echo "Target pool '{{ openshift_gcp_prefix }}router-network-lb-pool' already exists"
-fi
-
-# Router forwarding rule
-if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}router-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
-    IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
-    gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}router-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}router-network-lb-pool"
-else
-    echo "Forwarding rule '{{ openshift_gcp_prefix }}router-network-lb-rule' already exists"
-fi
-) &
-
-for i in `jobs -p`; do wait $i; done
-
-# set the target pools
-(
-if [[ "ig-m" == "{{ openshift_gcp_infra_network_instance_group }}" ]]; then
-    gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool,{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
-else
-    gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
-    gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}{{ openshift_gcp_infra_network_instance_group }}" --target-pools "{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
-fi
-) &
-
-# configure DNS
-(
-# Retry DNS changes until they succeed since this may be a shared resource
-while true; do
-    dns="${TMPDIR:-/tmp}/dns.yaml"
-    rm -f $dns
-
-    # DNS record for master lb
-    if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
-        IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
-        if [[ ! -f $dns ]]; then
-            gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
-        fi
-        gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl {{ openshift_gcp_master_dns_ttl }} --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
-    else
-        echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists"
-    fi
-
-    # DNS record for internal master lb
-    if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
-        IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
-        if [[ ! -f $dns ]]; then
-            gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
-        fi
-        gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl {{ openshift_gcp_master_dns_ttl }} --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
-    else
-        echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists"
-    fi
-
-    # DNS record for router lb
-    if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
-        IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
-        if [[ ! -f $dns ]]; then
-            gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
-        fi
-        gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl {{ openshift_gcp_master_dns_ttl }} --name "{{ wildcard_zone }}." --type A "$IP"
-        gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl {{ openshift_gcp_master_dns_ttl }} --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
-    else
-        echo "DNS record for '{{ wildcard_zone }}' already exists"
-    fi
-
-    # Commit all DNS changes, retrying if preconditions are not met
-    if [[ -f $dns ]]; then
-        if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
-            rc=$?
-            if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
-                continue
-            fi
-            exit $rc
-        fi
-    fi
-    break
-done
-) &
-
-# Create bucket for registry
-(
-if ! gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then
-    gsutil mb -p "{{ openshift_gcp_project }}" -l "{{ openshift_gcp_region }}" "gs://{{ openshift_gcp_registry_bucket_name }}"
-else
-    echo "Bucket '{{ openshift_gcp_registry_bucket_name }}' already exists"
-fi
-) &
-
-# wait until all node groups are stable
-{% for node_group in openshift_gcp_node_group_config %}
-{% if node_group.wait_for_stable | default(False) or not (node_group.bootstrap | default(False)) %}
-# wait for stable {{ node_group.name }}
-( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=600 ) &
-{% else %}
-# not waiting for {{ node_group.name }} due to bootstrapping
-{% endif %}
-{% endfor %}
-
-
-for i in `jobs -p`; do wait $i; done

+ 0 - 179
roles/openshift_gcp/templates/remove.j2.sh

@@ -1,179 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-function teardown_cmd() {
-    a=( $@ )
-    local name=$1
-    a=( "${a[@]:1}" )
-    local flag=0
-    local found=
-    for i in ${a[@]}; do
-        if [[ "$i" == "--"* ]]; then
-            found=true
-            break
-        fi
-        flag=$((flag+1))
-    done
-    if [[ -z "${found}" ]]; then
-      flag=$((flag+1))
-    fi
-    if gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then
-        gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag}
-    fi
-}
-
-function teardown() {
-    for i in `seq 1 20`; do
-        if teardown_cmd $@; then
-            break
-        fi
-        sleep 0.5
-    done
-}
-
-# Preemptively spin down the instances
-{% for node_group in openshift_gcp_node_group_config %}
-# scale down {{ node_group.name }}
-(
-    # performs a delete and scale down as one operation to ensure maximum parallelism
-    if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' 2>/dev/null ); then
-        exit 0
-    fi
-    instances="${instances%?}"
-    if [[ -z "${instances}" ]]; then
-        echo "warning: No instances in {{ node_group.name }}" 1>&2
-        exit 0
-    fi
-    if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed delete-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --instances "${instances}"; then
-        echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2
-        exit 0
-    fi
-) &
-{% endfor %}
-
-# Bucket for registry
-(
-if gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then
-    gsutil -m rm -r "gs://{{ openshift_gcp_registry_bucket_name }}"
-fi
-) &
-
-# Project metadata prefixed with {{ openshift_gcp_prefix }}
-(
-    for key in $( gcloud --project "{{ openshift_gcp_project }}" compute project-info describe --flatten=commonInstanceMetadata.items[] '--format=value(commonInstanceMetadata.items.key)' ); do
-        if [[ "${key}" == "{{ openshift_gcp_prefix }}"* ]]; then
-            gcloud --project "{{ openshift_gcp_project }}" compute project-info remove-metadata "--keys=${key}"
-        fi
-    done
-) &
-
-# Instances and disks used for image building
-(
-    teardown "{{ openshift_gcp_prefix }}build-image-instance" compute instances --zone "{{ openshift_gcp_zone }}"
-    teardown "{{ openshift_gcp_prefix }}build-image-instance" compute disks --zone "{{ openshift_gcp_zone }}"
-) &
-
-# DNS
-(
-dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
-if gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
-    # Retry DNS changes until they succeed since this may be a shared resource
-    while true; do
-        dns="${TMPDIR:-/tmp}/dns.yaml"
-        rm -f "${dns}"
-
-        # export all dns records that match into a zone format, and turn each line into a set of args for
-        # record-sets transaction.
-        gcloud dns record-sets export --project "{{ openshift_gcp_project }}" -z "${dns_zone}" --zone-file-format "${dns}"
-        if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \
-                awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input"
-        then
-            rm -f "${dns}"
-            gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
-            cat "${dns}.input" | xargs -L1 gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}"
-
-            # Commit all DNS changes, retrying if preconditions are not met
-            if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
-                rc=$?
-                if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
-                    continue
-                fi
-                exit $rc
-            fi
-        fi
-        rm "${dns}.input"
-        break
-    done
-fi
-) &
-
-(
-# Router network rules
-teardown "{{ openshift_gcp_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}"
-teardown "{{ openshift_gcp_prefix }}router-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}"
-teardown "{{ openshift_gcp_prefix }}router-network-lb-health-check" compute http-health-checks
-teardown "{{ openshift_gcp_prefix }}router-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}"
-
-# Internal master network rules
-teardown "{{ openshift_gcp_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}"
-teardown "{{ openshift_gcp_prefix }}master-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}"
-teardown "{{ openshift_gcp_prefix }}master-network-lb-health-check" compute http-health-checks
-teardown "{{ openshift_gcp_prefix }}master-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}"
-) &
-
-(
-# Master SSL network rules
-teardown "{{ openshift_gcp_prefix }}master-ssl-lb-rule" compute forwarding-rules --global
-teardown "{{ openshift_gcp_prefix }}master-ssl-lb-target" compute target-tcp-proxies
-teardown "{{ openshift_gcp_prefix }}master-ssl-lb-ip" compute addresses --global
-teardown "{{ openshift_gcp_prefix }}master-ssl-lb-backend" compute backend-services --global
-teardown "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" compute health-checks
-) &
-
-# Firewall rules
-(
-    if ! firewalls=$( gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules list --filter="network~projects/{{ openshift_gcp_project }}/global/networks/{{ openshift_gcp_network_name }}" --format="value[terminator=' '](name)" 2>/dev/null ); then
-        exit 0
-    fi
-    firewalls="${firewalls%?}"
-    if [[ -z "${firewalls}" ]]; then
-        exit 0
-    fi
-    gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules delete -q ${firewalls}
-) &
-
-for i in `jobs -p`; do wait $i; done
-
-{% for node_group in openshift_gcp_node_group_config %}
-# teardown {{ node_group.name }} - any load balancers referencing these groups must be removed
-(
-    teardown "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ openshift_gcp_zone }}"
-    teardown "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" compute instance-templates
-) &
-{% endfor %}
-
-for i in `jobs -p`; do wait $i; done
-
-# Images specifically located under this cluster prefix family
-for name in $( gcloud --project "{{ openshift_gcp_project }}" compute images list "--filter=family={{ openshift_gcp_prefix }}images" '--format=value(name)' ); do
-    ( gcloud --project "{{ openshift_gcp_project }}" compute images delete "${name}" ) &
-done
-
-# Disks
-(
-    if ! disks=$( gcloud --project "{{ openshift_gcp_project }}" compute disks list --filter="users~projects/{{ openshift_gcp_project }}/zones/{{ openshift_gcp_zone }}/instances/{{ openshift_gcp_prefix }}.*" --format="value[terminator=' '](name)" 2>/dev/null ); then
-        exit 0
-    fi
-    disks="${disks%?}"
-    if [[ -z "${disks}" ]]; then
-        echo "warning: No disks in use by {{ openshift_gcp_prefix }}" 1>&2
-        exit 0
-    fi
-    gcloud --project "{{ openshift_gcp_project }}" compute disks delete -q "${disks}"
-) &
-
-# Network
-( teardown "{{ openshift_gcp_network_name }}" compute networks ) &
-
-for i in `jobs -p`; do wait $i; done

+ 0 - 20
roles/openshift_gcp/templates/yum_repo.j2

@@ -1,20 +0,0 @@
-{% for repo in provision_custom_repositories %}
-[{{ repo.id | default(repo.name) }}]
-name={{ repo.name | default(repo.id) }}
-baseurl={{ repo.baseurl }}
-{% set enable_repo = repo.enabled | default(1) %}
-enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }}
-{% set enable_gpg_check = repo.gpgcheck | default(1) %}
-gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }}
-{% if 'sslclientcert' in repo %}
-sslclientcert={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_cert" if repo.sslclientcert }}
-{% endif %}
-{% if 'sslclientkey' in repo %}
-sslclientkey={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_key" if repo.sslclientkey }}
-{% endif %}
-{% for key, value in repo.iteritems() %}
-{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck', 'sslclientkey', 'sslclientcert'] and value is defined %}
-{{ key }}={{ value }}
-{% endif %}
-{% endfor %}
-{% endfor %}

+ 11 - 3
roles/openshift_node40/tasks/config.yml

@@ -1,16 +1,24 @@
 ---
-
 - name: get worker ignition file
   command: >
     curl -k {{ openshift_bootstrap_endpoint }}
   register: l_worker_bootstrap
+  when: openshift_bootstrap_endpoint is defined
+
+- set_fact:
+    ign_contents: "{{ l_worker_bootstrap.stdout }}"
+  when: openshift_bootstrap_endpoint is defined
+
+- set_fact:
+    ign_contents: "{{ lookup('file', ignition_file) }}"
+  when: ignition_file is defined
 
 - debug:
-    var: l_worker_bootstrap.stdout
+    var: ign_contents
 
 - name: parse ignition file
   parse_ignition:
-    ign_file_contents: "{{ l_worker_bootstrap.stdout }}"
+    ign_file_contents: "{{ ign_contents }}"
   register: l_parse_ignition_res
 
 - import_tasks: create_files_from_ignition.yml

+ 3 - 1
roles/openshift_node40/tasks/create_files_from_ignition.yml

@@ -1,6 +1,8 @@
 ---
 - name: Create all the directories we will need
-  command: "mkdir -p {{ item }}"
+  file:
+    path: "{{ item }}"
+    state: directory
   with_items: "{{ l_parse_ignition_dict.dir_list }}"
 
 - name: create files from ignition contents

+ 3 - 1
roles/openshift_node40/tasks/systemd.yml

@@ -1,7 +1,8 @@
 ---
 
 - name: daemon reload
-  command: "systemctl daemon-reload"
+  systemd:
+    daemon_reload: yes
 
 # dictionary of kv pairs, servicename: enabled, eg:
 # {'kubernetes': "true"}
@@ -11,3 +12,4 @@
     state: "{{ 'restarted' if (item.value | bool) else 'stopped' }}"
     enabled: "{{ item.value | bool }}"
   with_dict: "{{ l_parse_ignition_res.systemd_dict }}"
+  when: item.key not in excluded_services | default([])

+ 0 - 7
test/ci/README.md

@@ -1,7 +0,0 @@
-* Copy `test/ci/vars.yml.sample` to `test/ci/vars.yml`
-* Adjust it your liking - this would be the host configuration
-* Adjust `inventory/group_vars/OSEv3/vars.yml` - this would be Origin-specific config
-* Provision instances via `ansible-playbook -vv -i test/ci/inventory/ test/ci/launch.yml`
-  This would place inventory file in `test/ci/inventory/hosts` and run prerequisites and deploy.
-
-* Once the setup is complete run `ansible-playbook -vv -i test/ci/inventory/ test/ci/deprovision.yml`

+ 0 - 45
test/ci/deprovision.yml

@@ -1,45 +0,0 @@
----
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  tasks:
-    - include_vars: "{{ item }}"
-      with_first_found:
-        - vars.yml
-        - vars.yaml
-
-    - name: Gather ec2 facts
-      ec2_instance_facts:
-        region: "{{ aws_region }}"
-        filters:
-          tag-key: "kubernetes.io/cluster/{{ aws_cluster_id }}"
-      register: ec2
-
-    - name: Terminate instances
-      ec2:
-        instance_ids: "{{ item.instance_id }}"
-        region: "{{ aws_region }}"
-        state: absent
-        wait: no
-      with_items: "{{ ec2.instances }}"
-      when: not aws_use_auto_terminator | default(true)
-
-    - when: aws_use_auto_terminator | default(true)
-      block:
-        - name: Stop VMs
-          ec2:
-            instance_ids: "{{ item.instance_id }}"
-            region: "{{ aws_region }}"
-            state: stopped
-            wait: no
-          with_items: "{{ ec2.instances }}"
-          ignore_errors: true
-
-        - name: Rename VMs
-          ec2_tag:
-            resource: "{{ item.instance_id }}"
-            region: "{{ aws_region }}"
-            tags:
-              Name: "{{ item.tags.Name }}-terminate"
-          when: "'-terminate' not in item.tags.Name"
-          with_items: "{{ ec2.instances }}"

+ 0 - 113
test/ci/inventory/group_vars/OSEv3/vars.yml

@@ -1,113 +0,0 @@
----
-ansible_become: true
-ansible_become_sudo: true
-
-openshift_deployment_type: origin
-openshift_repos_enable_testing: false
-
-#Minimal set of services
-openshift_web_console_install: true
-openshift_console_install: true
-openshift_metrics_install_metrics: false
-openshift_metrics_install_logging: false
-openshift_logging_install_logging: false
-openshift_management_install_management: false
-template_service_broker_install: false
-ansible_service_broker_install: false
-openshift_enable_service_catalog: false
-osm_use_cockpit: false
-openshift_monitoring_deploy: false
-openshift_metering_install: false
-openshift_metrics_server_install: false
-openshift_monitor_availability_install: false
-openshift_enable_olm: false
-openshift_descheduler_install: false
-openshift_node_problem_detector_install: false
-openshift_autoheal_deploy: false
-openshift_cluster_autoscaler_install: false
-
-# debugging
-debug_level: 4
-etcd_debug: true
-etcd_log_package_levels: 'auth=INFO,etcdmain=DEBUG,etcdserver=DEBUG'
-openshift_docker_options: "--log-driver=journald"
-
-#Disable journald persistence
-journald_vars_to_replace:
-  - { var: Storage, val: volatile }
-  - { var: Compress, val: no }
-  - { var: SyncIntervalSec, val: 1s }
-  - { var: RateLimitInterval, val: 1s }
-  - { var: RateLimitBurst, val: 10000 }
-  - { var: SystemMaxUse, val: 8G }
-  - { var: SystemKeepFree, val: 20% }
-  - { var: SystemMaxFileSize, val: 10M }
-  - { var: MaxRetentionSec, val: 1month }
-  - { var: MaxFileSec, val: 1day }
-  - { var: ForwardToSyslog, val: no }
-  - { var: ForwardToWall, val: no }
-
-#Other settings
-openshift_enable_origin_repo: false
-osm_default_node_selector: "node-role.kubernetes.io/compute=true"
-openshift_hosted_infra_selector: "node-role.kubernetes.io/infra=true"
-openshift_logging_es_nodeselector:
-  node-role.kubernetes.io/infra: "true"
-openshift_logging_es_ops_nodeselector:
-  node-role.kubernetes.io/infra: "true"
-osm_controller_args:
-  enable-hostpath-provisioner:
-    - "true"
-openshift_hosted_router_create_certificate: true
-openshift_master_audit_config:
-  enabled: true
-openshift_master_identity_providers:
-  - name: "allow_all"
-    login: "true"
-    challenge: "true"
-    kind: "AllowAllPasswordIdentityProvider"
-openshift_template_service_broker_namespaces:
-  - "openshift"
-enable_excluders: "true"
-osm_cluster_network_cidr: "10.128.0.0/14"
-openshift_portal_net: "172.30.0.0/16"
-osm_host_subnet_length: 9
-openshift_check_min_host_disk_gb: 1.5
-openshift_check_min_host_memory_gb: 1.9
-openshift_disable_check: package_update,package_availability,memory_availability,disk_availability
-
-openshift_logging_use_mux: false
-openshift_logging_use_ops: true
-openshift_logging_es_log_appenders:
-  - "console"
-openshift_logging_fluentd_journal_read_from_head: false
-openshift_logging_fluentd_audit_container_engine: true
-
-openshift_logging_curator_cpu_request: "100m"
-openshift_logging_curator_memory_limit: "32Mi"
-openshift_logging_curator_ops_cpu_request: "100m"
-openshift_logging_curator_ops_memory_limit: "32Mi"
-openshift_logging_elasticsearch_proxy_cpu_request: "100m"
-openshift_logging_elasticsearch_proxy_memory_limit: "32Mi"
-openshift_logging_es_cpu_request: "400m"
-openshift_logging_es_memory_limit: "4Gi"
-openshift_logging_es_ops_cpu_request: "400m"
-openshift_logging_es_ops_memory_limit: "4Gi"
-openshift_logging_eventrouter_cpu_request: "100m"
-openshift_logging_eventrouter_memory_limit: "64Mi"
-openshift_logging_fluentd_cpu_request: "100m"
-openshift_logging_fluentd_memory_limit: "256Mi"
-openshift_logging_kibana_cpu_request: "100m"
-openshift_logging_kibana_memory_limit: "128Mi"
-openshift_logging_kibana_ops_cpu_request: "100m"
-openshift_logging_kibana_ops_memory_limit: "128Mi"
-openshift_logging_kibana_ops_proxy_cpu_request: "100m"
-openshift_logging_kibana_ops_proxy_memory_limit: "64Mi"
-openshift_logging_kibana_proxy_cpu_request: "100m"
-openshift_logging_kibana_proxy_memory_limit: "64Mi"
-openshift_logging_mux_cpu_request: "400m"
-openshift_logging_mux_memory_limit: "256Mi"
-
-openshift_master_cluster_method: native
-
-openshift_node_port_range: '30000-32000'

+ 0 - 112
test/ci/launch.yml

@@ -1,112 +0,0 @@
----
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  tasks:
-    - include_vars: "{{ item }}"
-      with_first_found:
-        - vars.yml
-        - vars.yaml
-
-    - name: list available AMIs
-      ec2_ami_facts:
-        region: "{{ aws_region }}"
-        filters: "{{ aws_ami_tags }}"
-      register: ami_facts
-      when: aws_image is not defined
-
-    - name: determine which AMI to use
-      set_fact:
-        aws_image: "{{ ami_facts.images[-1].image_id }}"
-      when: aws_image is not defined
-
-    - name: determine which AMI to use
-      set_fact:
-        aws_image: "{{ ami_facts.images[-1].image_id }}"
-      when: aws_image is not defined
-
-    - name: Create EC2 instance
-      ec2:
-        region: "{{ aws_region }}"
-        key_name: "{{ aws_key }}"
-        instance_type: "{{ item.aws_flavor }}"
-        image: "{{ item.aws_image | default(aws_image) }}"
-        wait: yes
-        group: "{{ item.aws_security_group }}"
-        count: 1
-        vpc_subnet_id: "{{ aws_subnet }}"
-        assign_public_ip: yes
-        instance_tags: "{{ aws_instance_tags }}"
-        volumes: "{{ item.aws_volumes | default(omit) }}"
-      register: ec2
-      with_items: "{{ aws_instances }}"
-      vars:
-        aws_instance_tags: |
-          {
-            "kubernetes.io/cluster/{{ aws_cluster_id }}": "true",
-            "Name": "{{ item.name }}",
-            "ansible-groups": "{{ item.ansible_groups | join(',') }}",
-            "ansible-node-group": "{{ item.node_group }}",
-            "expirationDate": "{{ item.aws_expiration_date | default(aws_expiration_date) }}"
-          }
-
-    - name: Add machine to inventory
-      add_host:
-        name: "{{ item.instances.0.tags['Name'] }}"
-        ansible_host: "{{ item.instances.0.dns_name }}"
-        ansible_user: "{{ item.instances.0.aws_user | default(aws_user)}}"
-        groups: "{{ item.instances.0.tags['ansible-groups'].split(',') }}"
-        aws_region: "{{ aws_region }}"
-        aws_ip: "{{ item.instances.0.public_ip }}"
-        aws_id: "{{ item.instances.0.id }}"
-        openshift_node_group_name: "{{ item.instances.0.tags['ansible-node-group'] }}"
-      with_items: "{{ ec2.results }}"
-
-    - name: write the inventory
-      template:
-        src: ./template-inventory.j2
-        dest: "inventory/hosts"
-
-    - name: Refresh inventory to ensure new instances exist in inventory
-      meta: refresh_inventory
-
-- hosts: all
-  gather_facts: no
-  become: true
-  tasks:
-    - wait_for_connection: {}
-    - name: Make sure hostname is set to public ansible host
-      hostname:
-        name: "{{ ansible_host }}"
-    - name: Detecting Operating System
-      shell: ls /run/ostree-booted
-      ignore_errors: yes
-      failed_when: false
-      register: ostree_output
-    - name: Update all packages
-      package:
-        name: '*'
-        state: latest
-      when: ostree_output.rc != 0
-      register: yum_update
-    - name: Update Atomic system
-      command: atomic host upgrade
-      when: ostree_output.rc == 0
-      register: ostree_update
-    - name: Reboot machines
-      shell: sleep 5 && systemctl reboot
-      async: 1
-      poll: 0
-      ignore_errors: true
-      when: yum_update | changed or ostree_update | changed
-    - name: Wait for connection
-      wait_for_connection:
-        connect_timeout: 20
-        sleep: 5
-        delay: 5
-        timeout: 300
-    - setup: {}
-
-- import_playbook: ../../playbooks/openshift-node/network_manager.yml
-- import_playbook: ../../playbooks/prerequisites.yml
-- import_playbook: ../../playbooks/deploy_cluster.yml

+ 0 - 26
test/ci/template-inventory.j2

@@ -1,26 +0,0 @@
-[OSEv3:vars]
-ansible_python_interpreter="{{ python }}"
-ansible_user="{{ aws_user }}"
-aws_region="{{ aws_region }}"
-openshift_master_default_subdomain="{{ hostvars[groups[('lb' in groups) | ternary('lb', 'masters')][0]]["aws_ip"] }}.xip.io"
-
-[OSEv3:children]
-{% for group in groups %}
-{% if group not in ["all", "ungrouped", "OSEv3"] %}
-{{group}}
-{% endif %}
-{% endfor %}
-
-{% for group in groups %}
-{% if group not in ["all", "ungrouped", "OSEv3"] %}
-[{{group}}]
-{% for entry in groups[group] %}
-{% set addon_opts = "" %}
-{% if group == "nodes" %}
-{% set addon_opts = addon_opts + " openshift_node_group_name='" + hostvars[entry]['openshift_node_group_name'] + "'" %}
-{% endif %}
-{{ entry }} ansible_host='{{ hostvars[entry]['ansible_host'] }}' aws_id='{{ hostvars[entry]['aws_id'] }}' {{ addon_opts }}
-{% endfor %}
-{% endif %}
-
-{% endfor %}

+ 0 - 46
test/ci/vars.yml.sample

@@ -1,46 +0,0 @@
----
-vm_prefix: "ci_test"
-#aws_use_auto_terminator is set to True by default, as rh-dev account doesn't have permission
-# to terminate instances. These should be stopped and renamed to include 'terminate' instead
-#aws_use_auto_terminator: false
-
-type: aws
-aws_user: "ec2-user"
-python: "/usr/bin/python"
-
-aws_key: "libra"
-aws_region: "us-east-1"
-aws_cluster_id: "ci"
-# us-east-1d
-aws_subnet: "subnet-cf57c596"
-
-aws_expiration_date: "{{ lookup('pipe','date -d \"4 hours\" --iso=minutes --utc') }}"
-
-aws_ami_tags:
-  "tag:operating_system": "rhel"
-  "tag:image_stage": "base"
-  "tag:ready": "yes"
-
-aws_instances:
-- name: "{{ vm_prefix }}-master"
-  ansible_groups:
-    - masters
-    - etcd
-    - nodes
-  aws_flavor: t2.large
-  aws_security_group: public
-  node_group: "node-config-all-in-one"
-  # Use custom AMI tags
-  # aws_ami_tags:
-  #   operating_system: "rhel"
-  #   image_stage: "base"
-  #   ready: "yes"
-  # Use custom AMI
-  #aws_image: "ami-70e8fd66"
-  # Attach custom volumes
-  #aws_volumes:
-  # - device_name: /dev/sdb
-  #   volume_size: 50
-  #   delete_on_termination: yes
-  #Set expiration date for instances on CI namespace
-  #aws_expiration_date: "{{ lookup('pipe','date -d \"4 hours\" --iso=minutes --utc') }}"

+ 5 - 13
playbooks/gcp/openshift-cluster/build_image.yml

@@ -24,10 +24,6 @@
   connection: local
   gather_facts: no
   tasks:
-  - name: Set facts
-    set_fact:
-      openshift_master_unsupported_embedded_etcd: True
-
   - name: Create the image instance disk
     gce_pd:
       service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
@@ -79,16 +75,12 @@
 
 - name: Add custom repositories
   hosts: nodes
-  handlers:
-  - import_tasks: ../../roles/openshift_repos/handlers/main.yml
-  tasks:
-  - include_role:
-      name: openshift_gcp
-      tasks_from: add_custom_repositories.yml
+  roles:
+  - role: openshift_repos
 
-# This is the part that installs all of the software and configs for the instance
-# to become a node.
-- import_playbook: ../../openshift-node/private/image_prep.yml
+- import_playbook: ../../playbooks/init/base_packages.yml
+  vars:
+    l_base_packages_hosts: nodes
 
 # Add additional GCP specific behavior
 - hosts: nodes

+ 1 - 2
playbooks/gcp/openshift-cluster/deprovision.yml

@@ -6,5 +6,4 @@
   tasks:
   - include_role:
       name: openshift_gcp
-    vars:
-      state: absent
+      tasks_from: deprovision.yml

+ 29 - 0
test/gcp/install.yml

@@ -0,0 +1,29 @@
+# This playbook installs onto a provisioned cluster
+#TODO: split into parts: nodes.yml, bootstrap.yml, masters.yml, workers.yml, bootkube/post_setup.yml
+---
+- hosts: localhost
+  connection: local
+  tasks:
+  - name: place all scale groups into Ansible groups
+    include_role:
+      name: openshift_gcp
+      tasks_from: setup_scale_group_facts.yml
+
+- hosts: nodes
+  tasks:
+  - name: Disable google hostname updater
+    file:
+      path: /etc/dhcp/dhclient.d/google_hostname.sh
+      mode: 0644
+
+- name: run the deploy_cluster_40
+  import_playbook: ../../playbooks/deploy_cluster_40.yml
+
+- name: destroy bootstrap node
+  hosts: localhost
+  connection: local
+  tasks:
+  - name: Scale down bootstrap node and update public API DNS record
+    include_role:
+      name: openshift_gcp
+      tasks_from: remove_bootstrap.yml

playbooks/gcp/openshift-cluster/inventory.yml → test/gcp/inventory.yml


playbooks/gcp/openshift-cluster/launch.yml → test/gcp/launch.yml


playbooks/gcp/openshift-cluster/provision.yml → test/gcp/provision.yml


+ 1 - 0
test/gcp/roles

@@ -0,0 +1 @@
+../../roles

playbooks/gcp/openshift-cluster/upgrade.yml → test/gcp/upgrade.yml