Просмотр исходного кода

Merge pull request #10695 from vrutkovs/devel-40-ci-test

Devel 4.0: CI test
Scott Dodson 6 лет назад
Родитель
Сommit
09ac753604
86 измененных файлов с 3749 добавлено и 1613 удалено
  1. 2 2
      images/installer/Dockerfile
  2. 0 6
      images/installer/origin-extra-root/etc/yum.repos.d/centos-ansible26.repo
  3. 1 1
      images/installer/root/usr/local/bin/entrypoint-provider
  4. 2 0
      inventory/dynamic/gcp/ansible.cfg
  5. 4 1
      inventory/dynamic/gcp/group_vars/all/00_defaults.yml
  6. 1 1
      inventory/dynamic/gcp/hosts.sh
  7. 92 0
      playbooks/bootkube.yml
  8. 29 0
      playbooks/bootkube_node.yml
  9. 174 0
      playbooks/deploy_cluster_40.yml
  10. 0 14
      playbooks/gcp/OWNERS
  11. 0 186
      playbooks/gcp/openshift-cluster/build_base_image.yml
  12. 0 36
      playbooks/gcp/openshift-cluster/install.yml
  13. 0 12
      playbooks/gcp/openshift-cluster/install_gcp.yml
  14. 0 23
      playbooks/gcp/openshift-cluster/openshift_node_group.yml
  15. 0 9
      playbooks/gcp/openshift-cluster/publish_image.yml
  16. 0 1
      playbooks/gcp/openshift-cluster/roles
  17. 1 1
      playbooks/init/base_packages.yml
  18. 1 60
      playbooks/init/basic_facts.yml
  19. 0 5
      playbooks/init/main.yml
  20. 1 17
      playbooks/init/version.yml
  21. 12 8
      playbooks/openshift-node/scaleup.yml
  22. 0 3
      playbooks/prerequisites.yml
  23. 1 1
      requirements.txt
  24. 5 0
      roles/container_runtime/tasks/crio_firewall.yml
  25. 0 2
      roles/container_runtime/tasks/docker_storage_setup_overlay.yml
  26. 1 1
      roles/container_runtime/tasks/package_crio.yml
  27. 0 164
      roles/container_runtime/tasks/package_docker.yml
  28. 1 9
      roles/container_runtime/templates/crio-network.j2
  29. 1 1
      roles/container_runtime/templates/crio.conf.j2
  30. 83 0
      roles/lib_utils/action_plugins/parse_ignition.py
  31. 1072 0
      roles/lib_utils/test/test_data/bootstrap.ign.json
  32. 1 0
      roles/lib_utils/test/test_data/bs.ign.json
  33. 700 0
      roles/lib_utils/test/test_data/bs2.ign.json
  34. 88 0
      roles/lib_utils/test/test_data/example.ign.json
  35. 66 0
      roles/lib_utils/test/test_parse_ignition.py
  36. 3 3
      roles/openshift_facts/defaults/main.yml
  37. 102 12
      roles/openshift_gcp/defaults/main.yml
  38. 0 20
      roles/openshift_gcp/tasks/add_custom_repositories.yml
  39. 0 10
      roles/openshift_gcp/tasks/configure_gcp_base_image.yml
  40. 0 40
      roles/openshift_gcp/tasks/configure_master_bootstrap.yml
  41. 6 0
      roles/openshift_gcp/tasks/configure_master_healthcheck.yml
  42. 141 0
      roles/openshift_gcp/tasks/deprovision.yml
  43. 9 2
      roles/openshift_gcp/tasks/dynamic_inventory.yml
  44. 246 36
      roles/openshift_gcp/tasks/main.yml
  45. 4 1
      roles/openshift_gcp/tasks/provision_ssh_keys.yml
  46. 0 32
      roles/openshift_gcp/tasks/publish_image.yml
  47. 93 0
      roles/openshift_gcp/tasks/remove_bootstrap.yml
  48. 15 28
      roles/openshift_gcp/tasks/setup_scale_group_facts.yml
  49. 39 0
      roles/openshift_gcp/templates/additional_settings.j2.sh
  50. 0 13
      roles/openshift_gcp/templates/dns.j2.sh
  51. 2 2
      roles/openshift_gcp/templates/master_healthcheck.j2
  52. 0 7
      roles/openshift_gcp/templates/openshift-bootstrap-update.j2
  53. 0 304
      roles/openshift_gcp/templates/provision.j2.sh
  54. 11 144
      roles/openshift_gcp/templates/remove.j2.sh
  55. 0 20
      roles/openshift_gcp/templates/yum_repo.j2
  56. 61 0
      roles/openshift_node40/README.md
  57. 160 0
      roles/openshift_node40/defaults/main.yml
  58. 24 0
      roles/openshift_node40/files/clean-up-crio-pods.sh
  59. 128 0
      roles/openshift_node40/files/networkmanager/99-origin-dns.sh
  60. 18 0
      roles/openshift_node40/files/openshift-node
  61. 20 0
      roles/openshift_node40/handlers/main.yml
  62. 17 0
      roles/openshift_node40/meta/main.yml
  63. 32 0
      roles/openshift_node40/tasks/bootkube_config.yml
  64. 49 0
      roles/openshift_node40/tasks/config.yml
  65. 16 0
      roles/openshift_node40/tasks/create_files_from_ignition.yml
  66. 15 0
      roles/openshift_node40/tasks/install.yml
  67. 15 0
      roles/openshift_node40/tasks/systemd.yml
  68. 69 0
      roles/openshift_node40/templates/bootstrap.yml.j2
  69. 20 0
      roles/openshift_node40/templates/multipath.conf.j2
  70. 26 0
      roles/openshift_node40/templates/node.service.j2
  71. 17 0
      roles/openshift_node40/templates/origin-dns.conf.j2
  72. 1 11
      roles/openshift_version/tasks/first_master.yml
  73. 0 7
      test/ci/README.md
  74. 0 45
      test/ci/deprovision.yml
  75. 0 113
      test/ci/inventory/group_vars/OSEv3/vars.yml
  76. 0 112
      test/ci/launch.yml
  77. 0 26
      test/ci/template-inventory.j2
  78. 0 46
      test/ci/vars.yml.sample
  79. 20 13
      playbooks/gcp/openshift-cluster/build_image.yml
  80. 1 2
      playbooks/gcp/openshift-cluster/deprovision.yml
  81. 29 0
      test/gcp/install.yml
  82. 0 0
      test/gcp/inventory.yml
  83. 0 0
      test/gcp/launch.yml
  84. 0 0
      test/gcp/provision.yml
  85. 1 0
      test/gcp/roles
  86. 0 0
      test/gcp/upgrade.yml

+ 2 - 2
images/installer/Dockerfile

@@ -10,13 +10,13 @@ COPY images/installer/origin-extra-root /
 # install ansible and deps
 RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl python2-passlib httpd-tools openssh-clients origin-clients iproute patch" \
  && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
- && EPEL_PKGS="ansible-2.6.5 python2-boto python2-boto3 python2-crypto which python2-pip.noarch python2-scandir python2-packaging azure-cli-2.0.46" \
+ && EPEL_PKGS="ansible python2-boto python2-crypto which python2-pip.noarch python2-scandir python2-packaging azure-cli-2.0.46" \
  && yum install -y epel-release \
  && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
  && if [ "$(uname -m)" == "x86_64" ]; then yum install -y https://sdodson.fedorapeople.org/google-cloud-sdk-183.0.0-3.el7.x86_64.rpm ; fi \
  && yum install -y java-1.8.0-openjdk-headless \
  && rpm -V $INSTALL_PKGS $EPEL_PKGS $EPEL_TESTING_PKGS \
- && pip install 'apache-libcloud~=2.2.1' 'SecretStorage<3' 'ansible[azure]' \
+ && pip install 'apache-libcloud~=2.2.1' 'SecretStorage<3' 'ansible[azure]' 'google-auth' 'boto3==1.4.6' \
  && yum clean all
 
 LABEL name="openshift/origin-ansible" \

+ 0 - 6
images/installer/origin-extra-root/etc/yum.repos.d/centos-ansible26.repo

@@ -1,6 +0,0 @@
-
-[centos-ansible26-testing]
-name=CentOS Ansible 2.6 testing repo
-baseurl=https://cbs.centos.org/repos/configmanagement7-ansible-26-testing/x86_64/os/
-enabled=1
-gpgcheck=0

+ 1 - 1
images/installer/root/usr/local/bin/entrypoint-provider

@@ -45,7 +45,7 @@ if [[ -f "${FILES}/ssh-privatekey" ]]; then
   else
     keyfile="${HOME}/.ssh/id_rsa"
   fi
-  mkdir "${HOME}/.ssh"
+  mkdir -p "${HOME}/.ssh"
   rm -f "${keyfile}"
   cat "${FILES}/ssh-privatekey" > "${keyfile}"
   chmod 0600 "${keyfile}"

+ 2 - 0
inventory/dynamic/gcp/ansible.cfg

@@ -28,6 +28,8 @@ inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
 # work around privilege escalation timeouts in ansible:
 timeout = 30
 
+stdout_callback = yaml
+
 # Uncomment to use the provided example inventory
 inventory = hosts.sh
 

+ 4 - 1
inventory/dynamic/gcp/group_vars/all/00_defaults.yml

@@ -20,6 +20,9 @@ openshift_master_cluster_hostname: "internal-openshift-master.{{ public_hosted_z
 openshift_master_cluster_public_hostname: "openshift-master.{{ public_hosted_zone }}"
 openshift_master_default_subdomain: "{{ wildcard_zone }}"
 
+mcd_port: 49500
+mcd_endpoint: "{{ openshift_master_cluster_public_hostname }}:{{ mcd_port }}"
+
 # Cloud specific settings
 openshift_cloudprovider_kind: gce
 openshift_hosted_registry_storage_provider: gcs
@@ -31,7 +34,7 @@ openshift_master_identity_providers:
 openshift_node_port_range: 30000-32000
 openshift_node_open_ports: [{"service":"Router stats port", "port":"1936/tcp"}, {"service":"Allowed open host ports", "port":"9000-10000/tcp"}, {"service":"Allowed open host ports", "port":"9000-10000/udp"}]
 os_sdn_network_plugin_name: redhat/openshift-ovs-networkpolicy
-openshift_node_sdn_mtu: 1410
+openshift_node_sdn_mtu: 1500
 osm_cluster_network_cidr: 172.16.0.0/16
 osm_host_subnet_length: 9
 openshift_portal_net: 172.30.0.0/16

+ 1 - 1
inventory/dynamic/gcp/hosts.sh

@@ -5,7 +5,7 @@ set -euo pipefail
 # Use a playbook to calculate the inventory dynamically from
 # the provided cluster variables.
 src="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-if ! out="$( ansible-playbook --inventory-file "${src}/none" ${src}/../../../playbooks/gcp/openshift-cluster/inventory.yml 2>&1 )"; then
+if ! out="$( ansible-playbook --inventory-file "${src}/none" ${src}/../../../test/gcp/inventory.yml 2>&1 )"; then
   echo "error: Inventory configuration failed" 1>&2
   echo "$out" 1>&2
   echo "{}"

+ 92 - 0
playbooks/bootkube.yml

@@ -0,0 +1,92 @@
+---
+# Generate config using openshift-installer, set Base Domain to testing.tt
+# Add bootstrap host in [bootstrap] group and set ignition_file
+# Add master host to [masters] group
+# Add worker hosts in [workers] group
+# Make sure bootstrap has <clusterid>-api.<dns base> name
+# Make sure masters have <clusterid>-etcd-<index>.<dns base> name
+
+# FIXME: use dnsmasq to fake DNS entries
+
+- import_playbook: init/main.yml
+  vars:
+    l_install_base_packages: True
+    l_repo_hosts: "all:!all"
+
+# TODO: proper firewalld setup
+# 49500 on bootstrap; 2379, 6443, 10250 on masters, 10250 on workers
+
+- import_playbook: container-runtime/private/setup_storage.yml
+
+- import_playbook: container-runtime/private/config.yml
+
+- name: install nodes
+  hosts: nodes
+  tasks:
+  - import_role:
+      name: openshift_node40
+      tasks_from: install.yml
+
+- name: Config bootstrap node
+  hosts: bootstrap
+  tasks:
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml
+
+- name: Start masters
+  hosts: masters
+  tasks:
+  # TODO Read this from master's ignition file
+  - set_fact:
+      openshift_bootstrap_endpoint: "https://{{ bootstrap }}:49500/config/master?etcd_index={{ index }}"
+    vars:
+      bootstrap: "{{ hostvars[groups['bootstrap'][0]]['ansible_host'] }}"
+      index: "{{ groups['masters'].index(inventory_hostname) }}"
+  - name: Wait for bootstrap endpoint to show up
+    uri:
+      url: "{{ openshift_bootstrap_endpoint }}"
+      validate_certs: false
+    delay: 10
+    retries: 60
+    register: result
+    until:
+    - "'status' in result"
+    - result.status == 200
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - name: Make sure etcd user exists
+    user:
+      name: etcd
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml
+
+- name: Start workers
+  hosts: workers
+  tasks:
+  # TODO Read this from master's ignition file
+  - set_fact:
+      openshift_bootstrap_endpoint: "https://{{ bootstrap }}:49500/config/worker"
+    vars:
+      bootstrap: "{{ hostvars[groups['bootstrap'][0]]['ansible_host'] }}"
+  - name: Wait for bootstrap endpoint to show up
+    uri:
+      url: "{{ openshift_bootstrap_endpoint }}"
+      validate_certs: false
+    delay: 10
+    retries: 60
+    register: result
+    until:
+    - "'status' in result"
+    - result.status == 200
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml

+ 29 - 0
playbooks/bootkube_node.yml

@@ -0,0 +1,29 @@
+---
+# l_scale_up_hosts may be passed in via various scaleup plays.
+
+- import_playbook: init/main.yml
+  vars:
+    l_install_base_packages: True
+    l_repo_hosts: "all:!all"
+
+# This is required for container runtime for crio, only needs to run once.
+- name: Configure os_firewall
+  hosts: "{{ l_scale_up_hosts | default(l_default_firewall_hosts) }}"
+  vars:
+    l_default_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config"
+  roles:
+  - role: os_firewall
+
+- import_playbook: container-runtime/private/setup_storage.yml
+
+- import_playbook: container-runtime/private/config.yml
+
+- name: install nodes
+  hosts: bootkube_nodes
+  tasks:
+  - import_role:
+      name: openshift_node40
+      tasks_from: install.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: bootkube_config.yml

+ 174 - 0
playbooks/deploy_cluster_40.yml

@@ -0,0 +1,174 @@
+---
+- name: run the init
+  import_playbook: init/main.yml
+  vars:
+    l_init_fact_hosts: "nodes"
+    l_openshift_version_set_hosts: "nodes"
+    l_install_base_packages: True
+    l_repo_hosts: "all:!all"
+
+# TODO(michaelgugino): break up the rest of this file into reusable chunks.
+- name: Install nodes
+  hosts: nodes
+  roles:
+  - role: container_runtime
+  tasks:
+  - import_role:
+      name: container_runtime
+      tasks_from: docker_storage_setup_overlay.yml
+  - import_role:
+      name: container_runtime
+      tasks_from: extra_storage_setup.yml
+  - import_role:
+      name: container_runtime
+      tasks_from: package_crio.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: install.yml
+
+- name: Config bootstrap node
+  hosts: bootstrap
+  tasks:
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml
+    vars:
+      excluded_services:
+      - progress.service
+
+- name: Start masters
+  hosts: masters
+  tasks:
+  # This is required for openshift_node40/config.yml
+  - set_fact:
+      openshift_bootstrap_endpoint: "https://{{ mcd_endpoint }}/config/master"
+  - name: Wait for bootstrap endpoint to show up
+    uri:
+      url: "{{ openshift_bootstrap_endpoint }}"
+      validate_certs: false
+    delay: 10
+    retries: 60
+    register: result
+    until:
+    - "'status' in result"
+    - result.status == 200
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - name: Make sure etcd user exists
+    user:
+      name: etcd
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml
+
+- name: Start workers
+  hosts: workers
+  tasks:
+  # This is required for openshift_node40/config.yml
+  - set_fact:
+      openshift_bootstrap_endpoint: "https://{{ mcd_endpoint }}/config/worker"
+  - name: Wait for bootstrap endpoint to show up
+    uri:
+      url: "{{ openshift_bootstrap_endpoint }}"
+      validate_certs: false
+    delay: 10
+    retries: 60
+    register: result
+    until:
+    - "'status' in result"
+    - result.status == 200
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml
+
+- name: Wait for nodes to become ready
+  hosts: bootstrap
+  tasks:
+  - name: Wait for temporary control plane to show up
+    oc_obj:
+      state: list
+      kind: pod
+      namespace: kube-system
+      kubeconfig: /opt/openshift/auth/kubeconfig
+    register: control_plane_pods
+    retries: 60
+    delay: 10
+    until:
+    - "'results' in control_plane_pods and 'results' in control_plane_pods.results"
+    - control_plane_pods.results.results[0]['items'] | length > 0
+  - name: Wait for master nodes to show up
+    oc_obj:
+      state: list
+      kind: node
+      selector: "node-role.kubernetes.io/master"
+      kubeconfig: /opt/openshift/auth/kubeconfig
+    register: master_nodes
+    retries: 60
+    delay: 10
+    until:
+    - "'results' in master_nodes and 'results' in master_nodes.results"
+    - master_nodes.results.results[0]['items'] | length > 0
+  - name: Wait for bootkube service to finish
+    service_facts: {}
+    #10 mins to complete temp plane
+    retries: 120
+    delay: 5
+    until: "'bootkube.service' not in ansible_facts.services"
+    ignore_errors: true
+  - name: Fetch kubeconfig for test container
+    fetch:
+      src: /opt/openshift/auth/kubeconfig
+      dest: /tmp/artifacts/installer/auth/kubeconfig
+      flat: yes
+
+  - name: Wait for core operators to appear and complete
+    oc_obj:
+      state: list
+      kind: ClusterOperator
+      name: "{{ item }}"
+      kubeconfig: /opt/openshift/auth/kubeconfig
+    register: operator
+    #Give each operator 5 mins to come up
+    retries: 60
+    delay: 5
+    until:
+    - "'results' in operator"
+    - "'results' in operator.results"
+    - operator.results.results | length > 0
+    - "'status' in operator.results.results[0]"
+    - "'conditions' in operator.results.results[0]['status']"
+    - operator.results.results[0].status.conditions | selectattr('type', 'match', '^Available$') | map(attribute='status') | join | bool == True
+    - operator.results.results[0].status.conditions | selectattr('type', 'match', '^Progressing$') | map(attribute='status') | join | bool == False
+    - operator.results.results[0].status.conditions | selectattr('type', 'match', '^Failing$') | map(attribute='status') | join | bool == False
+    with_items:
+    - machine-config-operator
+    # Fails often with 'x of y nodes are not at revision n'
+    #- openshift-cluster-kube-apiserver-operator
+    # Failing with 'ConfigObservationFailing: configmap/cluster-config-v1.kube-system: no recognized cloud provider platform found' - https://github.com/openshift/cluster-kube-controller-manager-operator/issues/100
+    #- openshift-cluster-kube-controller-manager-operator
+    # Fails often with 'x of y nodes are not at revision n'
+    #- openshift-cluster-kube-scheduler-operator
+    #- openshift-cluster-openshift-apiserver-operator
+    - openshift-cluster-openshift-controller-manager-operator
+    - openshift-ingress-operator
+    ignore_errors: true
+
+  - block:
+    - name: Output the operators status
+      oc_obj:
+        state: list
+        kind: ClusterOperator
+        selector: ""
+        kubeconfig: /opt/openshift/auth/kubeconfig
+    - fail:
+        msg: Required operators didn't complete the install
+    when: operator.failed
+
+  - pause: {}

+ 0 - 14
playbooks/gcp/OWNERS

@@ -1,14 +0,0 @@
-# approval == this is a good idea /approve
-approvers:
-  - smarterclayton
-  - michaelgugino
-  - mtnbikenc
-  - sdodson
-  - vrutkovs
-# review == this code is good /lgtm
-reviewers:
-  - smarterclayton
-  - michaelgugino
-  - mtnbikenc
-  - sdodson
-  - vrutkovs

+ 0 - 186
playbooks/gcp/openshift-cluster/build_base_image.yml

@@ -1,186 +0,0 @@
----
-# This playbook ensures that a base image is up to date with all of the required settings
-- name: Verify prerequisites for image build
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  tasks:
-  - name: Require openshift_gcp_root_image
-    fail:
-      msg: "A root OS image name or family is required for base image building.  Please ensure `openshift_gcp_root_image` is defined."
-    when: openshift_gcp_root_image is undefined
-
-- name: Provision ssh key
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  tasks:
-  - name: Set up core host GCP configuration
-    import_role:
-      name: openshift_gcp
-      tasks_from: provision_ssh_keys.yml
-
-- name: Launch image build instance
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  tasks:
-  - name: Create the image instance disk
-    gce_pd:
-      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
-      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
-      project_id: "{{ openshift_gcp_project }}"
-      zone: "{{ openshift_gcp_zone }}"
-      name: "{{ openshift_gcp_prefix }}build-image-instance"
-      disk_type: pd-ssd
-      image: "{{ openshift_gcp_root_image }}"
-      size_gb: 10
-      state: present
-
-  - name: Launch the image build instance
-    gce:
-      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
-      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
-      project_id: "{{ openshift_gcp_project }}"
-      zone: "{{ openshift_gcp_zone }}"
-      machine_type: n1-standard-1
-      instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
-      state: present
-      tags:
-      - build-image-instance
-      disk_auto_delete: false
-      disks:
-      - "{{ openshift_gcp_prefix }}build-image-instance"
-    register: gce
-
-  - add_host:
-      hostname: "{{ item.public_ip }}"
-      groupname: build_instance_ips
-    with_items: "{{ gce.instance_data }}"
-
-  - name: Wait for instance to respond to SSH
-    wait_for:
-      delay: 1
-      host: "{{ item.public_ip }}"
-      port: 22
-      state: started
-      timeout: 120
-    with_items: "{{ gce.instance_data }}"
-
-- name: Prepare instance content sources
-  pre_tasks:
-  - set_fact:
-      allow_rhel_subscriptions: "{{ rhsub_skip | default('no', True) | lower in ['no', 'false'] }}"
-  - set_fact:
-      using_rhel_subscriptions: "{{ (deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise'] or ansible_distribution == 'RedHat') and allow_rhel_subscriptions }}"
-  hosts: build_instance_ips
-  roles:
-  - role: rhel_subscribe
-    when: using_rhel_subscriptions
-  - role: openshift_repos
-    vars:
-      openshift_additional_repos: []
-  post_tasks:
-  - name: Add custom repositories
-    include_role:
-      name: openshift_gcp
-      tasks_from: add_custom_repositories.yml
-  - name: Add the Google Cloud repo
-    yum_repository:
-      name: google-cloud
-      description: Google Cloud Compute
-      baseurl: https://packages.cloud.google.com/yum/repos/google-cloud-compute-el7-x86_64
-      gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
-      gpgcheck: yes
-      repo_gpgcheck: yes
-      state: present
-    when: ansible_os_family == "RedHat"
-  - name: Add the jdetiber-qemu-user-static copr repo
-    yum_repository:
-      name: jdetiber-qemu-user-static
-      description: QEMU user static COPR
-      baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/epel-7-$basearch/
-      gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/pubkey.gpg
-      gpgcheck: yes
-      repo_gpgcheck: no
-      state: present
-    when: ansible_os_family == "RedHat"
-  - name: Accept GPG keys for the repos
-    command: yum -q makecache -y --disablerepo='*' --enablerepo='google-cloud,jdetiber-qemu-user-static'
-  - name: Install qemu-user-static
-    package:
-      name: qemu-user-static
-      state: present
-  - name: Disable yum-cron service (installed by Google Cloud by default)
-    systemd:
-      name: yum-cron
-      state: stopped
-      enabled: no
-  - name: Start and enable systemd-binfmt service
-    systemd:
-      name: systemd-binfmt
-      state: started
-      enabled: yes
-
-- name: Build image
-  hosts: build_instance_ips
-  pre_tasks:
-  - name: Set up core host GCP configuration
-    include_role:
-      name: openshift_gcp
-      tasks_from: configure_gcp_base_image.yml
-  roles:
-  - role: os_update_latest
-  post_tasks:
-  - name: Disable all repos on RHEL
-    command: subscription-manager repos --disable="*"
-    when: using_rhel_subscriptions
-  - name: Enable repos for packages on RHEL
-    command: subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms"
-    when: using_rhel_subscriptions
-  - name: Install common image prerequisites
-    package:
-      name: "{{ pkg_list | join(',') }}"
-      state: latest
-    vars:
-      pkg_list:
-      # required by Ansible
-      - PyYAML
-      - google-compute-engine
-      - google-compute-engine-init
-      - google-config
-      - wget
-      - git
-      - net-tools
-      - bind-utils
-      - iptables-services
-      - bridge-utils
-      - bash-completion
-  - name: Clean yum metadata
-    command: yum clean all
-    args:
-      warn: no
-    when: ansible_os_family == "RedHat"
-
-- name: Commit image
-  hosts: localhost
-  connection: local
-  tasks:
-  - name: Terminate the image build instance
-    gce:
-      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
-      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
-      project_id: "{{ openshift_gcp_project }}"
-      zone: "{{ openshift_gcp_zone }}"
-      instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
-      state: absent
-  - name: Save the new image
-    command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_base_image_name | default(openshift_gcp_base_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_base_image }}"
-  - name: Remove the image instance disk
-    gce_pd:
-      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
-      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
-      project_id: "{{ openshift_gcp_project }}"
-      zone: "{{ openshift_gcp_zone }}"
-      name: "{{ openshift_gcp_prefix }}build-image-instance"
-      state: absent

+ 0 - 36
playbooks/gcp/openshift-cluster/install.yml

@@ -1,36 +0,0 @@
-# This playbook installs onto a provisioned cluster
----
-- hosts: localhost
-  connection: local
-  tasks:
-  - name: place all scale groups into Ansible groups
-    include_role:
-      name: openshift_gcp
-      tasks_from: setup_scale_group_facts.yml
-
-- name: run the init
-  import_playbook: ../../init/main.yml
-
-- import_playbook: ../../openshift-checks/private/install.yml
-
-- name: ensure master nodes are ready for bootstrapping
-  import_playbook: ../../openshift-node/private/bootstrap.yml
-
-- name: configure the control plane
-  import_playbook: ../../common/private/control_plane.yml
-
-- name: run the GCP specific post steps
-  import_playbook: install_gcp.yml
-
-- name: install components
-  import_playbook: ../../common/private/components.yml
-
-- name: Copy the kubeconfig, used by CI to determine when the containers are ready
-  hosts: oo_first_master
-  gather_facts: no
-  tasks:
-  - name: Retrieve cluster configuration
-    fetch:
-      src: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
-      dest: "/tmp/"
-      flat: yes

+ 0 - 12
playbooks/gcp/openshift-cluster/install_gcp.yml

@@ -1,12 +0,0 @@
----
-- hosts: masters
-  gather_facts: no
-  tasks:
-  - name: create master health check service
-    include_role:
-      name: openshift_gcp
-      tasks_from: configure_master_healthcheck.yml
-  - name: configure master bootstrap distribution
-    include_role:
-      name: openshift_gcp
-      tasks_from: configure_master_bootstrap.yml

+ 0 - 23
playbooks/gcp/openshift-cluster/openshift_node_group.yml

@@ -1,23 +0,0 @@
-# This playbook installs onto a provisioned cluster
----
-- hosts: localhost
-  connection: local
-  tasks:
-  - name: place all scale groups into Ansible groups
-    include_role:
-      name: openshift_gcp
-      tasks_from: setup_scale_group_facts.yml
-    vars:
-      all_nodes: true
-
-- import_playbook: ../../init/main.yml
-  vars:
-    l_init_fact_hosts: "oo_masters_to_config"
-    l_openshift_version_set_hosts: "all:!all"
-    l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
-
-- name: Setup node-group configmaps
-  hosts: oo_first_master
-  tasks:
-  - import_role:
-      name: openshift_node_group

+ 0 - 9
playbooks/gcp/openshift-cluster/publish_image.yml

@@ -1,9 +0,0 @@
----
-- name: Publish the most recent image
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  tasks:
-  - import_role:
-      name: openshift_gcp
-      tasks_from: publish_image.yml

+ 0 - 1
playbooks/gcp/openshift-cluster/roles

@@ -1 +0,0 @@
-../../../roles

+ 1 - 1
playbooks/init/base_packages.yml

@@ -36,7 +36,7 @@
       - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
       - libsemanage-python
       - yum-utils
-      - "{{ 'python3-docker' if ansible_distribution == 'Fedora' else 'python-docker' }}"
+      - "{{ 'python3-docker' if ansible_distribution == 'Fedora' else 'python-docker-py' }}"
       pkg_list_non_fedora:
       - 'python-ipaddress'
       pkg_list_use_non_fedora: "{{ ansible_distribution != 'Fedora' | bool }}"

+ 1 - 60
playbooks/init/basic_facts.yml

@@ -25,66 +25,7 @@
   - name: set openshift_deployment_type if unset
     set_fact:
       openshift_deployment_type: "{{ deployment_type }}"
+      openshift_is_atomic: False
     when:
     - openshift_deployment_type is undefined
     - deployment_type is defined
-
-- name: Retrieve existing master configs and validate
-  hosts: oo_masters_to_config
-  gather_facts: no
-  any_errors_fatal: true
-  roles:
-  - openshift_facts
-  tasks:
-  - import_role:
-      name: openshift_control_plane
-      tasks_from: check_existing_config.yml
-
-  - when:
-    - l_existing_config_master_config is defined
-    - l_existing_config_master_config.networkConfig is defined
-    block:
-    - set_fact:
-        openshift_portal_net: "{{ l_existing_config_master_config.networkConfig.serviceNetworkCIDR }}"
-
-    - set_fact:
-        osm_cluster_network_cidr: "{{ l_existing_config_master_config.networkConfig.clusterNetworks[0].cidr }}"
-        osm_host_subnet_length: "{{ l_existing_config_master_config.networkConfig.clusterNetworks[0].hostSubnetLength }}"
-      when:
-      - l_existing_config_master_config.networkConfig.clusterNetworks is defined
-      # End block
-
-- name: Initialize special first-master variables
-  hosts: oo_first_master
-  roles:
-  - role: openshift_facts
-  tasks:
-  - when: not (osm_default_node_selector is defined)
-    block:
-    - set_fact:
-        # l_existing_config_master_config is set in openshift_control_plane/tasks/check_existing_config.yml
-        openshift_master_config_node_selector: "{{ l_existing_config_master_config.projectConfig.defaultNodeSelector }}"
-      when:
-      - l_existing_config_master_config is defined
-      - l_existing_config_master_config.projectConfig is defined
-      - l_existing_config_master_config.projectConfig.defaultNodeSelector is defined
-      - l_existing_config_master_config.projectConfig.defaultNodeSelector != ''
-
-  - set_fact:
-      # We need to setup openshift_client_binary here for special uses of delegate_to in
-      # later roles and plays.
-      first_master_client_binary: "{{  openshift_client_binary }}"
-      #Some roles may require this to be set for first master
-      openshift_client_binary: "{{ openshift_client_binary }}"
-      # we need to know if a default node selector has been manually set outside the installer
-      l_osm_default_node_selector: '{{ osm_default_node_selector | default(openshift_master_config_node_selector) | default("node-role.kubernetes.io/compute=true") }}'
-
-- name: Disable web console if required
-  hosts: oo_masters_to_config
-  gather_facts: no
-  tasks:
-  - set_fact:
-      openshift_web_console_install: False
-    when:
-    - openshift_deployment_subtype is defined
-    - openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features )

+ 0 - 5
playbooks/init/main.yml

@@ -30,13 +30,8 @@
 - import_playbook: base_packages.yml
   when: l_install_base_packages | default(False) | bool
 
-- import_playbook: cluster_facts.yml
-
 - import_playbook: version.yml
 
-- import_playbook: sanity_checks.yml
-  when: not (skip_sanity_checks | default(False))
-
 - name: Initialization Checkpoint End
   hosts: all
   gather_facts: false

+ 1 - 17
playbooks/init/version.yml

@@ -1,23 +1,7 @@
 ---
 - name: Determine openshift_version to configure on first master
-  hosts: "{{ l_openshift_version_determine_hosts | default('oo_first_master') }}"
+  hosts: oo_nodes_to_config
   tasks:
   - include_role:
       name: openshift_version
       tasks_from: first_master.yml
-
-# NOTE: We set this even on etcd hosts as they may also later run as masters,
-# and we don't want to install wrong version of docker and have to downgrade
-# later.
-- name: Set openshift_version for etcd, node, and master hosts
-  hosts: "{{ l_openshift_version_set_hosts | default(l_default_version_set_hosts) }}"
-  vars:
-    l_default_version_set_hosts: "oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master"
-    l_first_master_openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
-    l_first_master_openshift_pkg_version: "{{ hostvars[groups.oo_first_master.0].openshift_pkg_version | default('') }}"
-    l_first_master_openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag}}"
-  tasks:
-  - set_fact:
-      openshift_version: "{{ l_first_master_openshift_version }}"
-      openshift_pkg_version: "{{ l_first_master_openshift_pkg_version }}"
-      openshift_image_tag: "{{ l_first_master_openshift_image_tag }}"

+ 12 - 8
playbooks/openshift-node/scaleup.yml

@@ -31,11 +31,15 @@
     l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
     l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
 
-- import_playbook: ../init/version.yml
-  vars:
-    l_openshift_version_set_hosts: "oo_nodes_to_config:!oo_first_master"
-
-- import_playbook: private/bootstrap.yml
-- import_playbook: private/join.yml
-
-- import_playbook: ../openshift-glusterfs/private/add_hosts.yml
+- name: install nodes
+  hosts: oo_nodes_to_config
+  tasks:
+  - import_role:
+      name: openshift_node40
+      tasks_from: install.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml

+ 0 - 3
playbooks/prerequisites.yml

@@ -13,9 +13,6 @@
     l_install_base_packages: True
     l_repo_hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}"
 
-- import_playbook: init/validate_hostnames.yml
-  when: not (skip_validate_hostnames | default(False))
-
 # This is required for container runtime for crio, only needs to run once.
 - name: Configure os_firewall
   hosts: "{{ l_scale_up_hosts | default(l_default_firewall_hosts) }}"

+ 1 - 1
requirements.txt

@@ -1,6 +1,6 @@
 # Versions are pinned to prevent pypi releases arbitrarily breaking
 # tests with new APIs/semantics. We want to update versions deliberately.
-ansible==2.6.5
+ansible==2.7.1
 boto==2.44.0
 click==6.7
 pyOpenSSL==17.5.0

+ 5 - 0
roles/container_runtime/tasks/crio_firewall.yml

@@ -1,6 +1,11 @@
 ---
 - when: r_crio_firewall_enabled | bool and not r_crio_use_firewalld | bool
   block:
+  - name: Make sure iptables-services is installed
+    package:
+      name: iptables-services
+      state: present
+
   - name: Add iptables allow rules
     os_firewall_manage_iptables:
       name: "{{ item.service }}"

+ 0 - 2
roles/container_runtime/tasks/docker_storage_setup_overlay.yml

@@ -6,5 +6,3 @@
     owner: root
     group: root
     mode: 0664
-  when:
-  - container_runtime_docker_storage_type == 'overlay2'

+ 1 - 1
roles/container_runtime/tasks/package_crio.yml

@@ -35,8 +35,8 @@
     pkg_list:
       - cri-o
       - cri-tools
-      - atomic
       - skopeo
+      - podman
 
 - name: Remove CRI-O default configuration files
   file:

+ 0 - 164
roles/container_runtime/tasks/package_docker.yml

@@ -1,165 +1 @@
 ---
-- import_tasks: common/pre.yml
-
-- name: Get current installed Docker version
-  command: "{{ repoquery_installed }} --qf '%{version}' docker"
-  register: curr_docker_version
-  retries: 4
-  until: curr_docker_version is succeeded
-  changed_when: false
-
-# Some basic checks to ensure the role will complete
-- import_tasks: docker_sanity.yml
-
-# Make sure Docker is installed, but does not update a running version.
-# Docker upgrades are handled by a separate playbook.
-# Note: The curr_docker_version.stdout check can be removed when https://github.com/ansible/ansible/issues/33187 gets fixed.
-- name: Install Docker
-  package:
-    name: "{{ pkg_list | join(',') }}"
-    state: present
-  register: result
-  until: result is succeeded
-  vars:
-    pkg_list:
-    - "docker{{ '-' + docker_version if docker_version is defined else '' }}"
-    - skopeo
-
-- block:
-  # Extend the default Docker service unit file when using iptables-services
-  - name: Ensure docker.service.d directory exists
-    file:
-      path: "{{ docker_systemd_dir }}"
-      state: directory
-
-  - name: Configure Docker service unit file
-    template:
-      dest: "{{ docker_systemd_dir }}/custom.conf"
-      src: custom.conf.j2
-    notify:
-    - restart container runtime
-  when: not (os_firewall_use_firewalld | default(False)) | bool
-
-- stat:
-    path: /etc/sysconfig/docker
-    get_checksum: false
-    get_mime: false
-  register: docker_check
-
-- name: Set registry params
-  lineinfile:
-    dest: /etc/sysconfig/docker
-    regexp: '^{{ item.reg_conf_var }}=.*$'
-    line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | lib_utils_oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
-  when:
-  - item.reg_fact_val != []
-  - docker_check.stat.isreg is defined
-  - docker_check.stat.isreg
-  with_items:
-  - reg_conf_var: ADD_REGISTRY
-    reg_fact_val: "{{ l2_docker_additional_registries }}"
-    reg_flag: --add-registry
-  - reg_conf_var: BLOCK_REGISTRY
-    reg_fact_val: "{{ l2_docker_blocked_registries }}"
-    reg_flag: --block-registry
-  - reg_conf_var: INSECURE_REGISTRY
-    reg_fact_val: "{{ l2_docker_insecure_registries }}"
-    reg_flag: --insecure-registry
-  notify:
-  - restart container runtime
-
-- name: Place additional/blocked/insecure registries in /etc/containers/registries.conf
-  template:
-    dest: "{{ containers_registries_conf_path }}"
-    src: registries.conf.j2
-  when: openshift_docker_use_etc_containers | bool
-  notify:
-  - restart container runtime
-
-- name: Set Proxy Settings
-  lineinfile:
-    dest: /etc/sysconfig/docker
-    regexp: '^{{ item.reg_conf_var }}=.*$'
-    line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
-    state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
-  with_items:
-  - reg_conf_var: HTTP_PROXY
-    reg_fact_val: "{{ docker_http_proxy }}"
-  - reg_conf_var: HTTPS_PROXY
-    reg_fact_val: "{{ docker_https_proxy }}"
-  - reg_conf_var: NO_PROXY
-    reg_fact_val: "{{ docker_no_proxy }}"
-  notify:
-  - restart container runtime
-  when:
-  - docker_check.stat.isreg is defined
-  - docker_check.stat.isreg
-  - docker_http_proxy != '' or docker_https_proxy != ''
-
-- name: Set various Docker options
-  lineinfile:
-    dest: /etc/sysconfig/docker
-    regexp: '^OPTIONS=.*$'
-    line: "OPTIONS='\
-      {% if ansible_selinux.status | default(None) == 'enabled' and openshift_docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \
-      {% if openshift_docker_log_driver %} --log-driver {{ openshift_docker_log_driver }}{% endif %} \
-      {% if l2_docker_log_options != [] %} {{ l2_docker_log_options |  lib_utils_oo_split() | lib_utils_oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \
-      {% if (openshift_docker_hosted_registry_insecure | bool) and openshift_docker_hosted_registry_network %} --insecure-registry={{ openshift_docker_hosted_registry_network }} {% endif %} \
-      {% if docker_options is defined %} {{ docker_options }}{% endif %} \
-      {% if openshift_docker_options %} {{ openshift_docker_options }}{% endif %} \
-      --signature-verification={{ openshift_docker_signature_verification | bool }}'"
-  when: docker_check.stat.isreg is defined and docker_check.stat.isreg
-  notify:
-  - restart container runtime
-
-- stat:
-    path: /etc/sysconfig/docker-network
-    get_checksum: false
-    get_mime: false
-  register: sysconfig_docker_network_check
-
-- name: Configure Docker Network OPTIONS
-  lineinfile:
-    dest: /etc/sysconfig/docker-network
-    regexp: '^DOCKER_NETWORK_OPTIONS=.*$'
-    line: "DOCKER_NETWORK_OPTIONS='\
-      {% if openshift.node is defined and openshift.node.sdn_mtu is defined %} --mtu={{ openshift.node.sdn_mtu }}{% endif %}'"
-  when:
-  - sysconfig_docker_network_check.stat.isreg is defined
-  - sysconfig_docker_network_check.stat.isreg
-  notify:
-  - restart container runtime
-
-# The following task is needed as the systemd module may report a change in
-# state even though docker is already running.
-- name: Detect if docker is already started
-  command: "systemctl show docker -p ActiveState"
-  changed_when: False
-  register: r_docker_already_running_result
-
-- name: Start the Docker service
-  systemd:
-    name: docker
-    enabled: yes
-    state: started
-    daemon_reload: yes
-  register: r_docker_package_docker_start_result
-  until: not (r_docker_package_docker_start_result is failed)
-  retries: 3
-  delay: 30
-
-- set_fact:
-    docker_service_status_changed: "{{ (r_docker_package_docker_start_result is changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
-
-- name: Check for docker_storage_path/overlay2
-  stat:
-    path: "{{ docker_storage_path }}/overlay2"
-  register: dsp_stat
-
-- name: Fixup SELinux permissions for docker
-  shell: |
-           semanage fcontext -a -e /var/lib/docker/overlay2 "{{ docker_storage_path }}/overlay2"
-           restorecon -R -v "{{ docker_storage_path }}/overlay2"
-  when: dsp_stat.stat.exists
-
-- import_tasks: common/post.yml

+ 1 - 9
roles/container_runtime/templates/crio-network.j2

@@ -1,9 +1 @@
-{% if 'http_proxy' in openshift.common %}
-HTTP_PROXY={{ openshift.common.http_proxy }}
-{% endif %}
-{% if 'https_proxy' in openshift.common %}
-HTTPS_PROXY={{ openshift.common.https_proxy }}
-{% endif %}
-{% if 'no_proxy' in openshift.common %}
-NO_PROXY={{ openshift.common.no_proxy }}
-{% endif %}
+CRIO_NETWORK_OPTIONS="--cni-config-dir=/etc/kubernetes/cni/net.d --cni-plugin-dir=/var/lib/cni/bin"

+ 1 - 1
roles/container_runtime/templates/crio.conf.j2

@@ -152,7 +152,7 @@ image_volumes = "mkdir"
 
 # registries is used to specify a comma separated list of registries to be used
 # when pulling an unqualified image (e.g. fedora:rawhide).
-# registries = []
+registries = ['docker.io']
 
 # The "crio.network" table contains settings pertaining to the
 # management of CNI plugins.

+ 83 - 0
roles/lib_utils/action_plugins/parse_ignition.py

@@ -0,0 +1,83 @@
+"""Ansible action plugin to decode ignition payloads"""
+
+import base64
+import os
+import six
+from six.moves import urllib
+from ansible.plugins.action import ActionBase
+
+
+# pylint: disable=too-many-function-args
+def get_file_data(encoded_contents):
+    """Decode data URLs as specified in RFC 2397"""
+    # The following source is adapted from Python3 source
+    # License: https://github.com/python/cpython/blob/3.7/LICENSE
+    # retrieved from: https://github.com/python/cpython/blob/3.7/Lib/urllib/request.py
+    _, data = encoded_contents.split(":", 1)
+    mediatype, data = data.split(",", 1)
+
+    # even base64 encoded data URLs might be quoted so unquote in any case:
+    data = urllib.parse.unquote(data)
+    if mediatype.endswith(";base64"):
+        data = base64.b64decode(data).decode('utf-8')
+        mediatype = mediatype[:-7]
+    # End PSF software
+    return data
+
+
+# pylint: disable=too-many-function-args
+def get_files(files_dict, systemd_dict, dir_list, data):
+    """parse data to populate file_dict"""
+    files = data.get('storage', []).get('files', [])
+    for item in files:
+        path = item["path"]
+        dir_list.add(os.path.dirname(path))
+        # remove prefix "data:,"
+        encoded_contents = item['contents']['source']
+        contents = get_file_data(encoded_contents)
+        # convert from int to octal, padding at least to 4 places.
+        # eg, 420 becomes '0644'
+        mode = str(format(int(item["mode"]), '04o'))
+        inode = {"contents": contents, "mode": mode}
+        files_dict[path] = inode
+    # get the systemd units files we're here
+    systemd_units = data.get('systemd', []).get('units', [])
+    for item in systemd_units:
+        contents = item['contents']
+        if six.PY2:
+            # pylint: disable=redefined-variable-type
+            contents = contents.decode('unicode-escape')
+        mode = "0644"
+        inode = {"contents": contents, "mode": mode}
+        name = item['name']
+        path = '/etc/systemd/system/' + name
+        dir_list.add(os.path.dirname(path))
+        files_dict[path] = inode
+        enabled = item.get('enabled') or True
+        systemd_dict[name] = enabled
+
+
+# pylint: disable=too-few-public-methods
+class ActionModule(ActionBase):
+    """ActionModule for parse_ignition.py"""
+
+    def run(self, tmp=None, task_vars=None):
+        """Run parse_ignition action plugin"""
+        result = super(ActionModule, self).run(tmp, task_vars)
+        result["changed"] = False
+        result["failed"] = False
+        result["msg"] = "Parsed successfully"
+        files_dict = {}
+        systemd_dict = {}
+        dir_list = set()
+        result["files_dict"] = files_dict
+        result["systemd_dict"] = systemd_dict
+
+        # self.task_vars holds all in-scope variables.
+        # Ignore settting self.task_vars outside of init.
+        # pylint: disable=W0201
+        self.task_vars = task_vars or {}
+        ign_file_contents = self._task.args.get('ign_file_contents')
+        get_files(files_dict, systemd_dict, dir_list, ign_file_contents)
+        result["dir_list"] = list(dir_list)
+        return result

Разница между файлами не показана из-за своего большого размера
+ 1072 - 0
roles/lib_utils/test/test_data/bootstrap.ign.json


Разница между файлами не показана из-за своего большого размера
+ 1 - 0
roles/lib_utils/test/test_data/bs.ign.json


Разница между файлами не показана из-за своего большого размера
+ 700 - 0
roles/lib_utils/test/test_data/bs2.ign.json


Разница между файлами не показана из-за своего большого размера
+ 88 - 0
roles/lib_utils/test/test_data/example.ign.json


+ 66 - 0
roles/lib_utils/test/test_parse_ignition.py

@@ -0,0 +1,66 @@
+'''
+ Unit tests for wildcard
+'''
+import json
+import os
+import sys
+
+MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'action_plugins'))
+sys.path.insert(0, MODULE_PATH)
+ASSET_PATH = os.path.realpath(os.path.join(__file__, os.pardir, 'test_data'))
+
+# pylint: disable=import-error,wrong-import-position,missing-docstring
+import parse_ignition  # noqa: E402
+
+
+def read_ign(path):
+    with open(path) as ign_in:
+        data = json.loads(ign_in.read())
+    return data
+
+
+def write_out_files(files_dict):
+    for path in files_dict:
+        with open('/tmp/bsoutput/' + path.replace('/', '__'), 'w') as fpath:
+            fpath.write(files_dict[path]['contents'])
+
+
+def test_parse_json():
+    ign_data = read_ign(os.path.join(ASSET_PATH, 'example.ign.json'))
+    files_dict = {}
+    systemd_dict = {}
+    dir_list = set()
+    result = {}
+    result['files_dict'] = files_dict
+    result['systemd_dict'] = systemd_dict
+    parse_ignition.get_files(files_dict, systemd_dict, dir_list, ign_data)
+
+
+def test_parse_json_encoded_files():
+    ign_data = read_ign(os.path.join(ASSET_PATH, 'bootstrap.ign.json'))
+    files_dict = {}
+    systemd_dict = {}
+    dir_list = set()
+    result = {}
+    result['files_dict'] = files_dict
+    result['systemd_dict'] = systemd_dict
+    parse_ignition.get_files(files_dict, systemd_dict, dir_list, ign_data)
+    # print(files_dict['/opt/tectonic/manifests/cluster-config.yaml']['contents'])
+
+
+def parse_json2():
+    ign_data = read_ign(os.path.join(ASSET_PATH, 'bs.ign.json'))
+    files_dict = {}
+    systemd_dict = {}
+    dir_list = set()
+    result = {}
+    result['files_dict'] = files_dict
+    result['systemd_dict'] = systemd_dict
+    parse_ignition.get_files(files_dict, systemd_dict, dir_list, ign_data)
+    write_out_files(files_dict)
+
+
+if __name__ == '__main__':
+    test_parse_json()
+    test_parse_json_encoded_files()
+    parse_json2()

+ 3 - 3
roles/openshift_facts/defaults/main.yml

@@ -45,8 +45,8 @@ osm_image: "{{ l_osm_registry_url | regex_replace('${component}' | regex_escape,
 repoquery_cmd: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0', 'repoquery --plugins') }}"
 repoquery_installed: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed', 'repoquery --plugins --installed') }}"
 
-openshift_use_crio: False
-openshift_use_crio_only: False
+openshift_use_crio: True
+openshift_use_crio_only: True
 openshift_crio_enable_docker_gc: False
 openshift_crio_var_sock: "/var/run/crio/crio.sock"
 openshift_crio_pause_image: "{{ l_os_registry_url | regex_replace('${component}' | regex_escape, 'pod') }}"
@@ -136,7 +136,7 @@ openshift_service_type_dict:
   openshift-enterprise: atomic-openshift
 
 openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
-openshift_master_api_port: "8443"
+openshift_master_api_port: "6443"
 openshift_ca_host: "{{ groups.oo_first_master.0 }}"
 openshift_use_openshift_sdn: true
 os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"

+ 102 - 12
roles/openshift_gcp/defaults/main.yml

@@ -31,21 +31,21 @@ openshift_gcp_registry_bucket_name: "{{ openshift_gcp_prefix }}-docker-registry"
 openshift_gcp_master_dns_ttl: 300
 
 openshift_gcp_node_group_config:
-  - name: master
-    suffix: m
-    tags: ocp-master
+  - name: bootstrap
+    suffix: b
+    tags: ocp-bootstrap ocp-node
     machine_type: n1-standard-2
     boot_disk_size: 150
     scale: 1
-  - name: infra
-    suffix: i
-    tags: ocp-infra-node ocp-node
+  - name: master
+    suffix: m
+    tags: ocp-master ocp-node
     machine_type: n1-standard-2
     boot_disk_size: 150
     scale: 1
-  - name: node
+  - name: worker
     suffix: n
-    tags: ocp-node
+    tags: ocp-worker ocp-node
     machine_type: n1-standard-2
     boot_disk_size: 150
     scale: 3
@@ -61,7 +61,97 @@ openshift_gcp_user_data_file: ''
 
 openshift_gcp_multizone: False
 
-openshift_gcp_node_group_mapping:
-  masters: 'node-config-master'
-  infra: 'node-config-infra'
-  compute: 'node-config-compute'
+provision_custom_repositories: []
+
+mcd_port: 49500
+openshift_gcp_kubernetes_api_port: 6443
+openshift_gcp_master_healthcheck_port: 8080
+
+openshift_gcp_firewall_rules:
+  - rule: icmp
+    allowed:
+      - ip_protocol: 'icmp'
+  - rule: ssh-external
+    allowed:
+      - ip_protocol: 'tcp'
+        ports:
+          - '22'
+  - rule: ssh-internal
+    allowed:
+      - ip_protocol: 'tcp'
+        ports:
+          - '22'
+    source_tags:
+      - ssh-bastion
+  - rule: master-internal
+    allowed:
+      - ip_protocol: 'tcp'
+        ports:
+          - '2224'
+          - '2379'
+          - '2380'
+          - '4001'
+          #kube-system/kubelet:cadvisor
+          - '4193'
+          - "{{ openshift_gcp_kubernetes_api_port }}"
+          - "{{ internal_console_port }}"
+          - '8053'
+          - '8444'
+          - "{{ openshift_gcp_master_healthcheck_port }}"
+          #cadvisor port
+          - '9100'
+          # CVO port
+          - '9099'
+          - '10250'
+          - '10255'
+          - '24224'
+          - "{{ mcd_port }}"
+      - ip_protocol: 'udp'
+        ports:
+          - '4789'
+          - '5404'
+          - '5405'
+          - '10255'
+          - '24224'
+    source_tags:
+      - ocp
+    target_tags:
+      - ocp-master
+      - ocp-bootstrap
+  - rule: master-external
+    allowed:
+      - ip_protocol: 'tcp'
+        ports:
+          - "{{ openshift_gcp_master_healthcheck_port }}"
+          - "{{ openshift_gcp_kubernetes_api_port }}"
+          - "{{ openshift_master_api_port }}"
+          - "{{ mcd_port }}"
+    target_tags:
+      - ocp-master
+      - ocp-bootstrap
+  - rule: node-internal
+    allowed:
+      - ip_protocol: 'tcp'
+        ports:
+          - '1936'
+          - '10250'
+          - '10255'
+          - '9000-10000'
+      - ip_protocol: 'udp'
+        ports:
+          - '4789'
+          - '10255'
+    source_tags:
+      - ocp
+    target_tags:
+      - ocp-worker
+  - rule: node-external
+    allowed:
+      - ip_protocol: 'tcp'
+        ports:
+          - "{{ openshift_node_port_range }}"
+      - ip_protocol: 'udp'
+        ports:
+          - "{{ openshift_node_port_range }}"
+    target_tags:
+      - ocp-worker

+ 0 - 20
roles/openshift_gcp/tasks/add_custom_repositories.yml

@@ -1,20 +0,0 @@
----
-- name: Copy custom repository secrets
-  copy:
-    src: "{{ files_dir }}/{{ item.1.sslclientcert }}"
-    dest: /var/lib/yum/custom_secret_{{ item.0 }}_cert
-  when: item.1.sslclientcert | default(false)
-  with_indexed_items: "{{ provision_custom_repositories }}"
-- name: Copy custom repository secrets
-  copy:
-    src: "{{ files_dir }}/{{ item.1.sslclientkey }}"
-    dest: /var/lib/yum/custom_secret_{{ item.0 }}_key
-  when: item.1.sslclientkey | default(false)
-  with_indexed_items: "{{ provision_custom_repositories }}"
-
-- name: Create any custom repos that are defined
-  template:
-    src: yum_repo.j2
-    dest: /etc/yum.repos.d/provision_custom_repositories.repo
-  when: provision_custom_repositories | length > 0
-  notify: refresh cache

+ 0 - 10
roles/openshift_gcp/tasks/configure_gcp_base_image.yml

@@ -1,10 +0,0 @@
-# GCE instances are starting with xfs AND barrier=1, which is only for extfs.
----
-- name: Remove barrier=1 from XFS fstab entries
-  command: sed -i -e 's/xfs\(.*\)barrier=1/xfs\1/g; s/, / /g' /etc/fstab
-
-- name: Ensure the root filesystem has XFS group quota turned on
-  command: sed -i -e 's/linux16 \(.*\)$/linux16 \1 rootflags=gquota/g' /boot/grub2/grub.cfg
-
-- name: Ensure the root partition grows on startup
-  copy: src=partition.conf dest=/etc/systemd/system/google-instance-setup.service.d/

+ 0 - 40
roles/openshift_gcp/tasks/configure_master_bootstrap.yml

@@ -1,40 +0,0 @@
-#
-# These tasks configure the instance to periodically update the project metadata with the
-# latest bootstrap kubeconfig from the project metadata. This keeps the project metadata
-# in sync with the cluster's configuration. We then invoke a CSR approve on any nodes that
-# are waiting to join the cluster.
-#
----
-- name: Copy unit service
-  copy:
-    src: openshift-bootstrap-update.timer
-    dest: /etc/systemd/system/openshift-bootstrap-update.timer
-    owner: root
-    group: root
-    mode: 0664
-
-- name: Copy unit timer
-  copy:
-    src: openshift-bootstrap-update.service
-    dest: /etc/systemd/system/openshift-bootstrap-update.service
-    owner: root
-    group: root
-    mode: 0664
-
-- name: Create bootstrap update script
-  template: src=openshift-bootstrap-update.j2 dest=/usr/bin/openshift-bootstrap-update mode=u+rx
-
-- name: Start bootstrap update timer
-  systemd:
-    name: "openshift-bootstrap-update.timer"
-    state: started
-
-- name: Approve node certificates when bootstrapping
-  oc_csr_approve:
-    oc_bin: "{{ hostvars[groups.masters.0]['first_master_client_binary'] }}"
-    oc_conf: "{{ hostvars[groups.masters.0].openshift.common.config_base }}/master/admin.kubeconfig"
-    node_list: "{{ groups['all'] | map('extract', hostvars) | selectattr('gce_metadata.bootstrap', 'match', 'true') | map(attribute='gce_name') | list }}"
-  register: gcp_csr_approve
-  retries: 30
-  until: gcp_csr_approve is succeeded
-  when: groups['all'] | map('extract', hostvars) | selectattr('gce_metadata.bootstrap', 'match', 'true') | map(attribute='gce_name') | list | length > 0

+ 6 - 0
roles/openshift_gcp/tasks/configure_master_healthcheck.yml

@@ -24,3 +24,9 @@
     name: haproxy
     state: started
     enabled: yes
+
+- name: allow haproxy to connect to any port
+  seboolean:
+    name: haproxy_connect_any
+    state: yes
+    persistent: yes

+ 141 - 0
roles/openshift_gcp/tasks/deprovision.yml

@@ -0,0 +1,141 @@
+---
+- name: Fetch instance group managers
+  gcp_compute_instance_group_manager_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    filters:
+    - "name : {{ openshift_gcp_prefix }}ig*"
+  register: instance_group_managers
+
+- name: Fetch instance templates
+  gcp_compute_instance_template_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    filters:
+    - "name : {{ openshift_gcp_prefix }}instance-template*"
+  register: instance_templates
+
+- name: Collect a list of instances
+  gcp_compute_instance_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+  register: all_instances
+
+- name: Filter instances to fetch masters
+  set_fact:
+    master_instances: "{{ master_instances | default([]) }} + [ {{ item }} ]"
+  with_items:
+  - "{{ all_instances['items'] }}"
+  when:
+  - "'tags' in item"
+  - "'items' in item['tags']"
+  - "cluster_tag in item['tags']['items']"
+  - "'ocp-master' in item['tags']['items']"
+  vars:
+    cluster_tag: "{{ openshift_gcp_prefix }}ocp"
+
+- name: Get managed zone
+  gcp_dns_managed_zone:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
+    state: present
+  register: managed_zone
+
+- name: Remove public API hostname
+  gcp_dns_resource_record_set:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_master_cluster_public_hostname }}."
+    managed_zone: "{{ managed_zone }}"
+    type: A
+    state: absent
+
+- name: Remove etcd records for masters
+  gcp_dns_resource_record_set:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ entry_name }}"
+    managed_zone: "{{ managed_zone }}"
+    type: A
+    state: absent
+  with_indexed_items: "{{ master_instances }}"
+  when: master_instances is defined
+  vars:
+    entry_name: "{{ openshift_gcp_prefix }}etcd-{{ item.0 }}.{{ public_hosted_zone }}."
+
+- name: Remove GCP Instance Groups
+  gcp_compute_instance_group_manager:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    name: "{{ item[0].name }}"
+    base_instance_name: "{{ item[0].name }}"
+    instance_template: "{{ item[1] }}"
+    state: absent
+  with_nested:
+  - "{{ instance_group_managers['items'] }}"
+  - "{{ instance_templates['items'] }}"
+
+- name: Remove GCP instance templates
+  gcp_compute_instance_template:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ item.name }}"
+    state: absent
+  with_items: "{{ instance_templates['items'] }}"
+
+- name: Remove GCP firewall
+  gcp_compute_firewall:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_gcp_prefix }}{{ item.rule }}"
+    state: absent
+  with_items: "{{ openshift_gcp_firewall_rules }}"
+
+- name: Remove GCP network
+  gcp_compute_network:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_gcp_network_name }}"
+    state: absent
+
+- name: Templatize DNS script
+  template: src=remove.j2.sh dest=/tmp/remove.sh mode=u+rx
+
+- name: Run DNS cleanup script
+  command: /tmp/remove.sh
+  args:
+    chdir: "{{ files_dir }}"

+ 9 - 2
roles/openshift_gcp/tasks/dynamic_inventory.yml

@@ -1,5 +1,12 @@
 ---
 - name: Extract PEM from service account file
-  copy: content="{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).private_key }}" dest=/tmp/gce.pem mode=0600
+  copy:
+    content: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).private_key }}"
+    dest: /tmp/gce.pem
+    mode: 0600
+
 - name: Templatize environment script
-  template: src=inventory.j2.sh dest=/tmp/inventory.sh mode=u+rx
+  template:
+    src: inventory.j2.sh
+    dest: /tmp/inventory.sh
+    mode: u+rx

+ 246 - 36
roles/openshift_gcp/tasks/main.yml

@@ -1,45 +1,255 @@
-#
-# This role relies on gcloud invoked via templated bash in order to
-# provide a high performance deployment option. The next logical step
-# is to transition to a deployment manager template which is then instantiated.
-# TODO: use a formal set of role parameters consistent with openshift_aws
-#
 ---
-- name: Templatize DNS script
-  template: src=dns.j2.sh dest=/tmp/openshift_gcp_provision_dns.sh mode=u+rx
-- name: Templatize provision script
-  template: src=provision.j2.sh dest=/tmp/openshift_gcp_provision.sh mode=u+rx
-- name: Templatize de-provision script
-  template: src=remove.j2.sh dest=/tmp/openshift_gcp_provision_remove.sh mode=u+rx
-  when:
-  - state | default('present') == 'absent'
+- name: Create GCP network
+  gcp_compute_network:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_gcp_network_name }}"
+    state: present
+  register: network
 
-- name: Provision GCP DNS domain
-  command: /tmp/openshift_gcp_provision_dns.sh
-  args:
-    chdir: "{{ files_dir }}"
-  register: dns_provision
+- name: Create GCP firewall
+  gcp_compute_firewall:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_gcp_prefix }}{{ item.rule }}"
+    allowed: "{{ item.allowed }}"
+    network: "{{ network.selfLink }}"
+    target_tags: "{{ item.target_tags | default(omit) }}"
+    source_tags: "{{ item.source_tags | default(omit) }}"
+    state: present
+  with_items: "{{ openshift_gcp_firewall_rules }}"
+
+- import_tasks: provision_ssh_keys.yml
+
+- name: Find GCP image
+  gcp_compute_image_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    filters:
+    - "family = {{ openshift_gcp_image }}"
+  register: gcp_node_image
+
+- fail:
+    msg: "No images for family '{{ openshift_gcp_image }}' found"
+  when: gcp_node_image['items'] | length == 0
+
+- name: Provision GCP instance templates
+  gcp_compute_instance_template:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_gcp_prefix }}instance-template-{{ item.name }}"
+    properties:
+      machine_type: "{{ item.machine_type }}"
+      network_interfaces:
+      - network: "{{ network }}"
+        access_configs:
+        - name: "{{ openshift_gcp_prefix }}instance-template-{{ item.name }}-config"
+          type: 'ONE_TO_ONE_NAT'
+      disks:
+      - auto_delete: true
+        boot: true
+        initialize_params:
+          disk_size_gb: "{{ item.boot_disk_size }}"
+          source_image: "{{ gcp_node_image['items'][0].selfLink }}"
+      metadata:
+        "cluster-id": "{{ openshift_gcp_prefix + openshift_gcp_clusterid }}"
+        "node-group": "{{ item.name }}"
+      tags:
+        items:
+        - "ocp"
+        - "{{ openshift_gcp_prefix }}ocp"
+        - "{{ item.tags }}"
+    state: present
+  with_items: "{{ openshift_gcp_node_group_config }}"
+  register: instance_template
+
+- name: Create GCP Instance Groups
+  gcp_compute_instance_group_manager:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    name: "{{ openshift_gcp_prefix }}ig-{{ item.item.suffix }}"
+    base_instance_name: "{{ openshift_gcp_prefix }}ig-{{ item.item.suffix }}"
+    instance_template: "{{ item }}"
+    target_size: "{{ item.item.scale | int}}"
+    named_ports:
+    - name: "{{ openshift_gcp_prefix }}port-kube-api"
+      port: "{{ openshift_gcp_kubernetes_api_port }}"
+    - name: "{{ openshift_gcp_prefix }}port-openshift-api"
+      port: "{{ openshift_master_api_port }}"
+    state: present
+  with_items: "{{ instance_template.results }}"
+  register: instance_groups
+
+- name: Get bootstrap instance group
+  gcp_compute_instance_group_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    filters:
+    - name = "{{ openshift_gcp_prefix }}ig-b"
+  register: bootstrap_instance_group
+
+- name: Get master instance group
+  gcp_compute_instance_group_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    filters:
+    - name = "{{ openshift_gcp_prefix }}ig-m"
+  register: master_instance_group
+
+- set_fact:
+    bootstrap_instance_group: "{{ bootstrap_instance_group['items'][0] }}"
+    master_instance_group: "{{ master_instance_group['items'][0] }}"
+
+- name: Wait for bootstrap instance group to start all instances
+  gcp_compute_instance_group_manager_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    filters: "name = {{ bootstrap_instance_group['name'] }}"
+  register: bootstrap_group_result
+  # Wait for 3 minutes
+  retries: 36
+  delay: 5
+  until:
+  - "bootstrap_group_result['items'][0]['currentActions']['none'] == bootstrap_group_result['items'][0]['targetSize']"
+
+- name: Wait for master instance group to start all instances
+  gcp_compute_instance_group_manager_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    filters: "name = {{ master_instance_group['name'] }}"
+  register: master_group_result
+  # Wait for 3 minutes
+  retries: 36
+  delay: 5
+  until:
+  - "master_group_result['items'][0]['currentActions']['none'] == master_group_result['items'][0]['targetSize']"
+
+- name: Collect a list of instances
+  gcp_compute_instance_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+  register: all_instances
+
+- name: Filter instances to fetch bootstrap
+  set_fact:
+    bootstrap_instances: "{{ item }}"
+  with_items:
+  - "{{ all_instances['items'] }}"
   when:
-  - state | default('present') == 'present'
+  - "'tags' in item"
+  - "'items' in item['tags']"
+  - "cluster_tag in item['tags']['items']"
+  - "'ocp-bootstrap' in item['tags']['items']"
+  vars:
+    cluster_tag: "{{ openshift_gcp_prefix }}ocp"
 
-- name: Ensure that DNS resolves to the hosted zone
-  assert:
-    that:
-    - "lookup('dig', public_hosted_zone, 'qtype=NS', wantlist=True) | sort | join(',') == dns_provision.stdout"
-    msg: "The DNS domain {{ public_hosted_zone }} defined in 'public_hosted_zone' must have NS records pointing to the Google nameservers: '{{ dns_provision.stdout }}' instead of '{{ lookup('dig', public_hosted_zone, 'qtype=NS') }}'."
+- name: Filter instances to fetch masters
+  set_fact:
+    master_instances: "{{ master_instances | default([]) }} + [ {{ item }} ]"
+  with_items:
+  - "{{ all_instances['items'] }}"
   when:
-  - state | default('present') == 'present'
+  - "'tags' in item"
+  - "'items' in item['tags']"
+  - "cluster_tag in item['tags']['items']"
+  - "'ocp-master' in item['tags']['items']"
+  vars:
+    cluster_tag: "{{ openshift_gcp_prefix }}ocp"
 
-- import_tasks: provision_ssh_keys.yml
+- set_fact:
+    etcd_discovery_targets: "{{ etcd_discovery_targets | default([]) }} + ['0 0 2380 {{ entry_name }}']"
+    master_external_ips: "{{ master_external_ips | default([]) }} + ['{{ master_ip }}']"
+  with_indexed_items: "{{ master_instances }}"
+  vars:
+    entry_name: "{{ openshift_gcp_prefix }}etcd-{{ item.0 }}.{{ public_hosted_zone }}."
+    master_ip: "{{ item.1.networkInterfaces[0].accessConfigs[0].natIP }}"
+
+- set_fact:
+    bootstrap_and_masters: "{{ master_external_ips | list }} + ['{{ bootstrap_instances.networkInterfaces[0].accessConfigs[0].natIP }}']"
+
+- name: Get managed zone
+  gcp_dns_managed_zone:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
+    state: present
+  register: managed_zone
+
+- name: Create public API hostname
+  gcp_dns_resource_record_set:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_master_cluster_public_hostname }}."
+    managed_zone: "{{ managed_zone }}"
+    type: A
+    ttl: 600
+    target: "{{ bootstrap_and_masters }}"
+    state: present
 
-- name: Provision GCP resources
-  command: /tmp/openshift_gcp_provision.sh
+- name: Create etcd records for masters
+  gcp_dns_resource_record_set:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ entry_name }}"
+    managed_zone: "{{ managed_zone }}"
+    type: A
+    ttl: 600
+    target: "{{ master_ip }}"
+    state: present
+  with_indexed_items: "{{ master_instances }}"
+  vars:
+    entry_name: "{{ openshift_gcp_prefix }}etcd-{{ item.0 }}.{{ public_hosted_zone }}."
+    master_ip: "{{ item.1.networkInterfaces[0].networkIP }}"
+
+- name: Templatize DNS script
+  template: src=additional_settings.j2.sh dest=/tmp/additional_settings.sh mode=u+rx
+
+- name: Run addition provision GCP script
+  command: /tmp/additional_settings.sh
   args:
     chdir: "{{ files_dir }}"
-  when:
-  - state | default('present') == 'present'
-
-- name: De-provision GCP resources
-  command: /tmp/openshift_gcp_provision_remove.sh
-  when:
-  - state | default('present') == 'absent'

+ 4 - 1
roles/openshift_gcp/tasks/provision_ssh_keys.yml

@@ -1,6 +1,9 @@
 ---
 - name: Templatize SSH key provision script
-  template: src=provision_ssh.j2.sh dest=/tmp/openshift_gcp_provision_ssh.sh mode=u+rx
+  template:
+    src: provision_ssh.j2.sh
+    dest: /tmp/openshift_gcp_provision_ssh.sh
+    mode: u+rx
 
 - name: Provision GCP SSH key resources
   command: /tmp/openshift_gcp_provision_ssh.sh

+ 0 - 32
roles/openshift_gcp/tasks/publish_image.yml

@@ -1,32 +0,0 @@
----
-- name: Require openshift_gcp_image
-  fail:
-    msg: "A source image name or family is required for image publishing.  Please ensure `openshift_gcp_image` is defined."
-  when: openshift_gcp_image is undefined
-
-- name: Require openshift_gcp_target_image
-  fail:
-    msg: "A target image name or family is required for image publishing.  Please ensure `openshift_gcp_target_image` is defined."
-  when: openshift_gcp_target_image is undefined
-
-- block:
-  - name: Retrieve images in the {{ openshift_gcp_target_image }} family
-    command: >
-      gcloud --project "{{ openshift_gcp_project }}" compute images list
-        "--filter=family={{ openshift_gcp_target_image }}"
-        --format=json --sort-by ~creationTimestamp
-    register: images
-  - name: Prune oldest images
-    command: >
-      gcloud --project "{{ openshift_gcp_project }}" compute images delete "{{ item['name'] }}"
-    with_items: "{{ (images.stdout | default('[]') | from_json )[( openshift_gcp_keep_images | int ):] }}"
-  when: openshift_gcp_keep_images is defined
-
-- name: Copy the latest image in the family {{ openshift_gcp_image }} to {{ openshift_gcp_target_image }}
-  command: >
-    gcloud --project "{{ openshift_gcp_target_project | default(openshift_gcp_project) }}"
-      beta compute images create
-      "{{ openshift_gcp_target_image_name | default(openshift_gcp_target_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}"
-      --family "{{ openshift_gcp_target_image }}"
-      --source-image-family "{{ openshift_gcp_image }}"
-      --source-image-project "{{ openshift_gcp_project }}"

+ 93 - 0
roles/openshift_gcp/tasks/remove_bootstrap.yml

@@ -0,0 +1,93 @@
+---
+- name: Get bootstrap instance group
+  gcp_compute_instance_group_manager_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    filters:
+    - name = "{{ openshift_gcp_prefix }}ig-b"
+  register: bootstrap_instance_group
+
+- name: Get bootstrap instance template
+  gcp_compute_instance_template_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    filters:
+    - "name : {{ openshift_gcp_prefix }}instance-template-bootstrap"
+  register: bootstrap_instance_template
+
+- name: Collect a list of instances
+  gcp_compute_instance_facts:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+  register: all_instances
+
+- name: Filter instances to fetch masters
+  set_fact:
+    master_instances: "{{ master_instances | default([]) }} + [ {{ item }} ]"
+  with_items:
+  - "{{ all_instances['items'] }}"
+  when:
+  - "'tags' in item"
+  - "'items' in item['tags']"
+  - "cluster_tag in item['tags']['items']"
+  - "'ocp-master' in item['tags']['items']"
+  vars:
+    cluster_tag: "{{ openshift_gcp_prefix }}ocp"
+
+- set_fact:
+    master_external_ips: "{{ master_external_ips | default([]) }}  + [ '{{ master_ip }}' ]"
+  with_indexed_items: "{{ master_instances }}"
+  vars:
+    master_ip: "{{ item.1.networkInterfaces[0].accessConfigs[0].natIP }}"
+
+- name: Get a managed zone
+  gcp_dns_managed_zone:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
+    state: present
+  register: managed_zone
+
+- name: Update public API hostname
+  gcp_dns_resource_record_set:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    name: "{{ openshift_master_cluster_public_hostname }}."
+    managed_zone: "{{ managed_zone }}"
+    type: A
+    ttl: 600
+    target: "{{ master_external_ips }}"
+    state: present
+
+- name: Delete bootstrap instance group
+  gcp_compute_instance_group_manager:
+    auth_kind: serviceaccount
+    scopes:
+    - https://www.googleapis.com/auth/compute
+    service_account_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+    project: "{{ openshift_gcp_project }}"
+    zone: "{{ openshift_gcp_zone }}"
+    name: "{{ bootstrap_instance_group['items'][0]['name'] }}"
+    base_instance_name: "{{ bootstrap_instance_group['items'][0]['baseInstanceName'] }}"
+    instance_template: "{{ bootstrap_instance_template['items'][0] }}"
+    state: absent
+  when:
+  - bootstrap_instance_group['items'] | length > 0
+  - bootstrap_instance_template['items'] | length > 0

+ 15 - 28
roles/openshift_gcp/tasks/setup_scale_group_facts.yml

@@ -1,38 +1,25 @@
 ---
-- name: Set var to exclude bootstrapped nodes
-  set_fact:
-    bootstrapped_nodes: "{{ all_nodes | default(false) | ternary([], groups['tag_ocp-bootstrap']) | default([]) }}"
-
-- name: Add node instances to node group
+- name: Add bootstrap instances
   add_host:
     name: "{{ hostvars[item].gce_name }}"
-    groups: nodes, new_nodes
-    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['compute'] }}"
-  with_items: "{{ groups['tag_ocp-node'] | default([]) | difference(bootstrapped_nodes) }}"
-
-- name: Add bootstrap node instances as nodes
-  add_host:
-    name: "{{ item }}"
-    groups: nodes, new_nodes
+    groups:
+    - bootstrap
+    - nodes
+    ignition_file: "{{ openshift_bootstrap_ignition_file }}"
   with_items: "{{ groups['tag_ocp-bootstrap'] | default([]) }}"
-  when: all_nodes | default(False)
-
-- name: Add non-bootstrapping master node instances to node group
-  add_host:
-    name: "{{ hostvars[item].gce_name }}"
-    groups: nodes
-  with_items: "{{ groups['tag_ocp-master'] | default([]) | difference(bootstrapped_nodes) }}"
 
-- name: Add infra node instances to node group
+- name: Add master instances
   add_host:
     name: "{{ hostvars[item].gce_name }}"
-    groups: nodes, new_nodes
-    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['infra'] }}"
-  with_items: "{{ groups['tag_ocp-infra-node'] | default([]) | difference(bootstrapped_nodes) }}"
+    groups:
+    - masters
+    - nodes
+  with_items: "{{ groups['tag_ocp-master'] | default([]) }}"
 
-- name: Add masters to requisite groups
+- name: Add worker instances
   add_host:
     name: "{{ hostvars[item].gce_name }}"
-    groups: masters, etcd
-    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['masters'] }}"
-  with_items: "{{ groups['tag_ocp-master'] }}"
+    groups:
+    - workers
+    - nodes
+  with_items: "{{ groups['tag_ocp-worker'] | default([]) }}"

+ 39 - 0
roles/openshift_gcp/templates/additional_settings.j2.sh

@@ -0,0 +1,39 @@
+#!/bin/bash
+
+set -euxo pipefail
+
+dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
+# configure DNS
+(
+# Retry DNS changes until they succeed since this may be a shared resource
+while true; do
+    dns="${TMPDIR:-/tmp}/dns.yaml"
+    rm -f $dns
+
+    # DNS records for etcd discovery
+    ETCD_DNS_NAME="_etcd-server-ssl._tcp.{{ lookup('env', 'INSTANCE_PREFIX') | mandatory }}.{{ public_hosted_zone }}."
+    if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "${ETCD_DNS_NAME}" 2>/dev/null | grep -q "${ETCD_DNS_NAME}"; then
+        if [[ ! -f $dns ]]; then
+            gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
+        fi
+        gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl {{ openshift_gcp_master_dns_ttl }} --name "${ETCD_DNS_NAME}" --type SRV {% for etcd in etcd_discovery_targets %}'{{ etcd }}' {% endfor %}
+
+    else
+        echo "DNS record for '${ETCD_DNS_NAME}' already exists"
+    fi
+
+    # Commit all DNS changes, retrying if preconditions are not met
+    if [[ -f $dns ]]; then
+        if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
+            rc=$?
+            if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
+                continue
+            fi
+            exit $rc
+        fi
+    fi
+    break
+done
+) &
+
+for i in `jobs -p`; do wait $i; done

+ 0 - 13
roles/openshift_gcp/templates/dns.j2.sh

@@ -1,13 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
-
-# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist
-if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" dns managed-zones create "${dns_zone}" --dns-name "{{ public_hosted_zone }}" --description "{{ public_hosted_zone }} domain" >/dev/null
-fi
-
-# Always output the expected nameservers as a comma delimited list
-gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" --format='value(nameServers)' | tr ';' ','

+ 2 - 2
roles/openshift_gcp/templates/master_healthcheck.j2

@@ -60,9 +60,9 @@ defaults
 #---------------------------------------------------------------------
 # main frontend which proxys to the backends
 #---------------------------------------------------------------------
-frontend  http-proxy *:8080
+frontend  http-proxy *:{{ openshift_gcp_master_healthcheck_port }}
     acl          url_healthz  path_beg  -i /healthz
     use_backend  ocp          if url_healthz
 
 backend ocp
-    server       ocp localhost:{{ internal_console_port }} ssl verify none
+    server       ocp localhost:{{ openshift_gcp_kubernetes_api_port }} ssl verify none

+ 0 - 7
roles/openshift_gcp/templates/openshift-bootstrap-update.j2

@@ -1,7 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-oc serviceaccounts create-kubeconfig -n openshift-infra node-bootstrapper > /root/bootstrap.kubeconfig
-gcloud compute project-info --project '{{ openshift_gcp_project }}' add-metadata --metadata-from-file '{{ openshift_gcp_prefix + openshift_gcp_clusterid | default("default") }}-bootstrap-config=/root/bootstrap.kubeconfig'
-rm -f /root/bootstrap.kubeconfig

+ 0 - 304
roles/openshift_gcp/templates/provision.j2.sh

@@ -1,304 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-metadata=""
-if [[ -n "{{ openshift_gcp_startup_script_file }}" ]]; then
-    if [[ ! -f "{{ openshift_gcp_startup_script_file }}" ]]; then
-        echo "Startup script file missing at {{ openshift_gcp_startup_script_file }} from=$(pwd)"
-        exit 1
-    fi
-    metadata+="--metadata-from-file=startup-script={{ openshift_gcp_startup_script_file }}"
-fi
-if [[ -n "{{ openshift_gcp_user_data_file }}" ]]; then
-    if [[ ! -f "{{ openshift_gcp_user_data_file }}" ]]; then
-        echo "User data file missing at {{ openshift_gcp_user_data_file }}"
-        exit 1
-    fi
-    if [[ -n "${metadata}" ]]; then
-        metadata+=","
-    else
-        metadata="--metadata-from-file="
-    fi
-    metadata+="user-data={{ openshift_gcp_user_data_file }}"
-fi
-
-# Select image or image family
-image="{{ openshift_gcp_image }}"
-if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe "${image}" &>/dev/null; then
-    if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe-from-family "${image}" &>/dev/null; then
-        echo "No compute image or image-family found, create an image named '{{ openshift_gcp_image }}' to continue'"
-        exit 1
-    fi
-    image="family/${image}"
-fi
-
-### PROVISION THE INFRASTRUCTURE ###
-
-dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
-
-# Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers
-if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
-    echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script"
-    exit 1
-fi
-
-# Create network
-if ! gcloud --project "{{ openshift_gcp_project }}" compute networks describe "{{ openshift_gcp_network_name }}" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute networks create "{{ openshift_gcp_network_name }}" --mode "auto"
-else
-    echo "Network '{{ openshift_gcp_network_name }}' already exists"
-fi
-
-# Firewall rules in a form:
-# ['name']='parameters for "gcloud compute firewall-rules create"'
-# For all possible parameters see: gcloud compute firewall-rules create --help
-range=""
-if [[ -n "{{ openshift_node_port_range }}" ]]; then
-    range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}"
-fi
-declare -A FW_RULES=(
-  ['icmp']='--allow icmp'
-  ['ssh-external']='--allow tcp:22'
-  ['ssh-internal']='--allow tcp:22 --source-tags bastion'
-  ['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master"
-  ['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master"
-  ['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255,tcp:9000-10000 --source-tags ocp --target-tags ocp-node,ocp-infra-node"
-  ['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node"
-  ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node"
-)
-for rule in "${!FW_RULES[@]}"; do
-    ( if ! gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then
-        gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules create "{{ openshift_gcp_prefix }}$rule" --network "{{ openshift_gcp_network_name }}" ${FW_RULES[$rule]}
-    else
-        echo "Firewall rule '{{ openshift_gcp_prefix }}${rule}' already exists"
-    fi ) &
-done
-
-
-# Master IP
-( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global
-else
-    echo "IP '{{ openshift_gcp_prefix }}master-ssl-lb-ip' already exists"
-fi ) &
-
-# Internal master IP
-( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}"
-else
-    echo "IP '{{ openshift_gcp_prefix }}master-network-lb-ip' already exists"
-fi ) &
-
-# Router IP
-( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}"
-else
-    echo "IP '{{ openshift_gcp_prefix }}router-network-lb-ip' already exists"
-fi ) &
-
-
-{% for node_group in openshift_gcp_node_group_config %}
-# configure {{ node_group.name }}
-(
-    if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-templates describe "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
-        gcloud --project "{{ openshift_gcp_project }}" compute instance-templates create "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" \
-                --machine-type "{{ node_group.machine_type }}" --network "{{ openshift_gcp_network_name }}" \
-                --tags "{{ openshift_gcp_prefix }}ocp,ocp,ocp-bootstrap,{{ node_group.tags }}" \
-                --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
-                --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
-                --image "{{ node_group.image | default('${image}') }}" ${metadata}  \
-                --metadata "bootstrap={{ node_group.bootstrap | default(False) | bool | to_json }},cluster-id={{ openshift_gcp_prefix + openshift_gcp_clusterid }},node-group={{ node_group.name }}"
-    else
-        echo "Instance template '{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}' already exists"
-    fi
-
-    # Create instance group
-    if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed describe "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" &>/dev/null; then
-        gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed create "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" \
-                --zone "{{ openshift_gcp_zone }}" --template "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
-    else
-        echo "Instance group '{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}' already exists"
-    fi
-) &
-{% endfor %}
-
-for i in `jobs -p`; do wait $i; done
-
-
-# Configure the master external LB rules
-(
-# Master health check
-if ! gcloud --project "{{ openshift_gcp_project }}" compute health-checks describe "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute health-checks create https "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
-else
-    echo "Health check '{{ openshift_gcp_prefix }}master-ssl-lb-health-check' already exists"
-fi
-
-gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-named-ports "{{ openshift_gcp_prefix }}ig-m" \
-        --zone "{{ openshift_gcp_zone }}" --named-ports "{{ openshift_gcp_prefix }}port-name-master:{{ internal_console_port }}"
-
-# Master backend service
-if ! gcloud --project "{{ openshift_gcp_project }}" compute backend-services describe "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute backend-services create "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --health-checks "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port-name "{{ openshift_gcp_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ openshift_gcp_master_lb_timeout }}"
-    gcloud --project "{{ openshift_gcp_project }}" compute backend-services add-backend "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --instance-group "{{ openshift_gcp_prefix }}ig-m" --global --instance-group-zone "{{ openshift_gcp_zone }}"
-else
-    echo "Backend service '{{ openshift_gcp_prefix }}master-ssl-lb-backend' already exists"
-fi
-
-# Master tcp proxy target
-if ! gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies describe "{{ openshift_gcp_prefix }}master-ssl-lb-target" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies create "{{ openshift_gcp_prefix }}master-ssl-lb-target" --backend-service "{{ openshift_gcp_prefix }}master-ssl-lb-backend"
-else
-    echo "Proxy target '{{ openshift_gcp_prefix }}master-ssl-lb-target' already exists"
-fi
-
-# Master forwarding rule
-if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
-    IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
-    gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ openshift_gcp_prefix }}master-ssl-lb-target"
-else
-    echo "Forwarding rule '{{ openshift_gcp_prefix }}master-ssl-lb-rule' already exists"
-fi
-) &
-
-
-# Configure the master internal LB rules
-(
-# Internal master health check
-if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}master-network-lb-health-check" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
-else
-    echo "Health check '{{ openshift_gcp_prefix }}master-network-lb-health-check' already exists"
-fi
-
-# Internal master target pool
-if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}master-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}master-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}master-network-lb-health-check" --region "{{ openshift_gcp_region }}"
-else
-    echo "Target pool '{{ openshift_gcp_prefix }}master-network-lb-pool' already exists"
-fi
-
-# Internal master forwarding rule
-if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
-    IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
-    gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}master-network-lb-pool"
-else
-    echo "Forwarding rule '{{ openshift_gcp_prefix }}master-network-lb-rule' already exists"
-fi
-) &
-
-
-# Configure the infra node rules
-(
-# Router health check
-if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}router-network-lb-health-check" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
-else
-    echo "Health check '{{ openshift_gcp_prefix }}router-network-lb-health-check' already exists"
-fi
-
-# Router target pool
-if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}router-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
-    gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}router-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}router-network-lb-health-check" --region "{{ openshift_gcp_region }}"
-else
-    echo "Target pool '{{ openshift_gcp_prefix }}router-network-lb-pool' already exists"
-fi
-
-# Router forwarding rule
-if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}router-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
-    IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
-    gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}router-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}router-network-lb-pool"
-else
-    echo "Forwarding rule '{{ openshift_gcp_prefix }}router-network-lb-rule' already exists"
-fi
-) &
-
-for i in `jobs -p`; do wait $i; done
-
-# set the target pools
-(
-if [[ "ig-m" == "{{ openshift_gcp_infra_network_instance_group }}" ]]; then
-    gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool,{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
-else
-    gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
-    gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}{{ openshift_gcp_infra_network_instance_group }}" --target-pools "{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
-fi
-) &
-
-# configure DNS
-(
-# Retry DNS changes until they succeed since this may be a shared resource
-while true; do
-    dns="${TMPDIR:-/tmp}/dns.yaml"
-    rm -f $dns
-
-    # DNS record for master lb
-    if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
-        IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
-        if [[ ! -f $dns ]]; then
-            gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
-        fi
-        gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl {{ openshift_gcp_master_dns_ttl }} --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
-    else
-        echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists"
-    fi
-
-    # DNS record for internal master lb
-    if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
-        IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
-        if [[ ! -f $dns ]]; then
-            gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
-        fi
-        gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl {{ openshift_gcp_master_dns_ttl }} --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
-    else
-        echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists"
-    fi
-
-    # DNS record for router lb
-    if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
-        IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
-        if [[ ! -f $dns ]]; then
-            gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
-        fi
-        gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl {{ openshift_gcp_master_dns_ttl }} --name "{{ wildcard_zone }}." --type A "$IP"
-        gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl {{ openshift_gcp_master_dns_ttl }} --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
-    else
-        echo "DNS record for '{{ wildcard_zone }}' already exists"
-    fi
-
-    # Commit all DNS changes, retrying if preconditions are not met
-    if [[ -f $dns ]]; then
-        if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
-            rc=$?
-            if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
-                continue
-            fi
-            exit $rc
-        fi
-    fi
-    break
-done
-) &
-
-# Create bucket for registry
-(
-if ! gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then
-    gsutil mb -p "{{ openshift_gcp_project }}" -l "{{ openshift_gcp_region }}" "gs://{{ openshift_gcp_registry_bucket_name }}"
-else
-    echo "Bucket '{{ openshift_gcp_registry_bucket_name }}' already exists"
-fi
-) &
-
-# wait until all node groups are stable
-{% for node_group in openshift_gcp_node_group_config %}
-{% if node_group.wait_for_stable | default(False) or not (node_group.bootstrap | default(False)) %}
-# wait for stable {{ node_group.name }}
-( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=600 ) &
-{% else %}
-# not waiting for {{ node_group.name }} due to bootstrapping
-{% endif %}
-{% endfor %}
-
-
-for i in `jobs -p`; do wait $i; done

+ 11 - 144
roles/openshift_gcp/templates/remove.j2.sh

@@ -1,78 +1,6 @@
 #!/bin/bash
 
-set -euo pipefail
-
-function teardown_cmd() {
-    a=( $@ )
-    local name=$1
-    a=( "${a[@]:1}" )
-    local flag=0
-    local found=
-    for i in ${a[@]}; do
-        if [[ "$i" == "--"* ]]; then
-            found=true
-            break
-        fi
-        flag=$((flag+1))
-    done
-    if [[ -z "${found}" ]]; then
-      flag=$((flag+1))
-    fi
-    if gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then
-        gcloud --project "{{ openshift_gcp_project }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag}
-    fi
-}
-
-function teardown() {
-    for i in `seq 1 20`; do
-        if teardown_cmd $@; then
-            break
-        fi
-        sleep 0.5
-    done
-}
-
-# Preemptively spin down the instances
-{% for node_group in openshift_gcp_node_group_config %}
-# scale down {{ node_group.name }}
-(
-    # performs a delete and scale down as one operation to ensure maximum parallelism
-    if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' 2>/dev/null ); then
-        exit 0
-    fi
-    instances="${instances%?}"
-    if [[ -z "${instances}" ]]; then
-        echo "warning: No instances in {{ node_group.name }}" 1>&2
-        exit 0
-    fi
-    if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed delete-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --instances "${instances}"; then
-        echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2
-        exit 0
-    fi
-) &
-{% endfor %}
-
-# Bucket for registry
-(
-if gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then
-    gsutil -m rm -r "gs://{{ openshift_gcp_registry_bucket_name }}"
-fi
-) &
-
-# Project metadata prefixed with {{ openshift_gcp_prefix }}
-(
-    for key in $( gcloud --project "{{ openshift_gcp_project }}" compute project-info describe --flatten=commonInstanceMetadata.items[] '--format=value(commonInstanceMetadata.items.key)' ); do
-        if [[ "${key}" == "{{ openshift_gcp_prefix }}"* ]]; then
-            gcloud --project "{{ openshift_gcp_project }}" compute project-info remove-metadata "--keys=${key}"
-        fi
-    done
-) &
-
-# Instances and disks used for image building
-(
-    teardown "{{ openshift_gcp_prefix }}build-image-instance" compute instances --zone "{{ openshift_gcp_zone }}"
-    teardown "{{ openshift_gcp_prefix }}build-image-instance" compute disks --zone "{{ openshift_gcp_zone }}"
-) &
+set -euxo pipefail
 
 # DNS
 (
@@ -86,9 +14,16 @@ if gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${
         # export all dns records that match into a zone format, and turn each line into a set of args for
         # record-sets transaction.
         gcloud dns record-sets export --project "{{ openshift_gcp_project }}" -z "${dns_zone}" --zone-file-format "${dns}"
-        if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \
-                awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input"
-        then
+
+        # Write the header
+        ETCD_DNS_NAME="_etcd-server-ssl._tcp.{{ lookup('env', 'INSTANCE_PREFIX') | mandatory }}.{{ public_hosted_zone }}."
+        grep -F -e "${ETCD_DNS_NAME}" "${dns}" | awk '{ print "--name", $1, "--ttl", $2, "--type", $4 }' | head -n1 | xargs echo -n > "${dns}.input"
+        # Append all etcd records
+        grep -F -e "${ETCD_DNS_NAME}" "${dns}" | awk '{ print " \x27"$5" "$6" "$7" "$8"\x27"; }' | tr -d '\n\r' >> "${dns}.input" || true
+        echo >> "${dns}.input"
+
+
+        if [ -s "${dns}.input" ]; then
             rm -f "${dns}"
             gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
             cat "${dns}.input" | xargs -L1 gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}"
@@ -108,72 +43,4 @@ if gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${
 fi
 ) &
 
-(
-# Router network rules
-teardown "{{ openshift_gcp_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}"
-teardown "{{ openshift_gcp_prefix }}router-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}"
-teardown "{{ openshift_gcp_prefix }}router-network-lb-health-check" compute http-health-checks
-teardown "{{ openshift_gcp_prefix }}router-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}"
-
-# Internal master network rules
-teardown "{{ openshift_gcp_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ openshift_gcp_region }}"
-teardown "{{ openshift_gcp_prefix }}master-network-lb-pool" compute target-pools --region "{{ openshift_gcp_region }}"
-teardown "{{ openshift_gcp_prefix }}master-network-lb-health-check" compute http-health-checks
-teardown "{{ openshift_gcp_prefix }}master-network-lb-ip" compute addresses --region "{{ openshift_gcp_region }}"
-) &
-
-(
-# Master SSL network rules
-teardown "{{ openshift_gcp_prefix }}master-ssl-lb-rule" compute forwarding-rules --global
-teardown "{{ openshift_gcp_prefix }}master-ssl-lb-target" compute target-tcp-proxies
-teardown "{{ openshift_gcp_prefix }}master-ssl-lb-ip" compute addresses --global
-teardown "{{ openshift_gcp_prefix }}master-ssl-lb-backend" compute backend-services --global
-teardown "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" compute health-checks
-) &
-
-# Firewall rules
-(
-    if ! firewalls=$( gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules list --filter="network~projects/{{ openshift_gcp_project }}/global/networks/{{ openshift_gcp_network_name }}" --format="value[terminator=' '](name)" 2>/dev/null ); then
-        exit 0
-    fi
-    firewalls="${firewalls%?}"
-    if [[ -z "${firewalls}" ]]; then
-        exit 0
-    fi
-    gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules delete -q ${firewalls}
-) &
-
 for i in `jobs -p`; do wait $i; done
-
-{% for node_group in openshift_gcp_node_group_config %}
-# teardown {{ node_group.name }} - any load balancers referencing these groups must be removed
-(
-    teardown "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ openshift_gcp_zone }}"
-    teardown "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" compute instance-templates
-) &
-{% endfor %}
-
-for i in `jobs -p`; do wait $i; done
-
-# Images specifically located under this cluster prefix family
-for name in $( gcloud --project "{{ openshift_gcp_project }}" compute images list "--filter=family={{ openshift_gcp_prefix }}images" '--format=value(name)' ); do
-    ( gcloud --project "{{ openshift_gcp_project }}" compute images delete "${name}" ) &
-done
-
-# Disks
-(
-    if ! disks=$( gcloud --project "{{ openshift_gcp_project }}" compute disks list --filter="users~projects/{{ openshift_gcp_project }}/zones/{{ openshift_gcp_zone }}/instances/{{ openshift_gcp_prefix }}.*" --format="value[terminator=' '](name)" 2>/dev/null ); then
-        exit 0
-    fi
-    disks="${disks%?}"
-    if [[ -z "${disks}" ]]; then
-        echo "warning: No disks in use by {{ openshift_gcp_prefix }}" 1>&2
-        exit 0
-    fi
-    gcloud --project "{{ openshift_gcp_project }}" compute disks delete -q "${disks}"
-) &
-
-# Network
-( teardown "{{ openshift_gcp_network_name }}" compute networks ) &
-
-for i in `jobs -p`; do wait $i; done

+ 0 - 20
roles/openshift_gcp/templates/yum_repo.j2

@@ -1,20 +0,0 @@
-{% for repo in provision_custom_repositories %}
-[{{ repo.id | default(repo.name) }}]
-name={{ repo.name | default(repo.id) }}
-baseurl={{ repo.baseurl }}
-{% set enable_repo = repo.enabled | default(1) %}
-enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }}
-{% set enable_gpg_check = repo.gpgcheck | default(1) %}
-gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }}
-{% if 'sslclientcert' in repo %}
-sslclientcert={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_cert" if repo.sslclientcert }}
-{% endif %}
-{% if 'sslclientkey' in repo %}
-sslclientkey={{ "/var/lib/yum/custom_secret_" + (loop.index-1)|string + "_key" if repo.sslclientkey }}
-{% endif %}
-{% for key, value in repo.iteritems() %}
-{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck', 'sslclientkey', 'sslclientcert'] and value is defined %}
-{{ key }}={{ value }}
-{% endif %}
-{% endfor %}
-{% endfor %}

+ 61 - 0
roles/openshift_node40/README.md

@@ -0,0 +1,61 @@
+OpenShift Node
+================================
+
+Node service installation
+
+Requirements
+------------
+
+* Ansible 2.2
+* One or more Master servers
+* A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos
+
+Role Variables
+--------------
+From this role:
+
+| Name                                     | Default value         |                                                          |
+|------------------------------------------|-----------------------|----------------------------------------------------------|
+| openshift_node_start_options             | UNDEF (Optional)      | Options to pass to node start cmdline                    |
+| oreg_url                                 | UNDEF (Optional)      | Default docker registry to use                           |
+| openshift_persistentlocalstorage_enabled | false                 | Enable the persistent local storage                      |
+
+openshift_node_start_options can be used for passing any start node option, e.g.:
+
+--enable=kubelet,plugins
+
+Which would have a node running without kube-proxy and dns.
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+Notes
+-----
+
+Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below:
+
+```
+oc adm manage-node --schedulable=false ${NODE}
+oc adm manage-node --drain ${NODE}
+oc adm manage-node --schedulable=true ${NODE}
+````
+
+> If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`.
+
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+TODO

+ 160 - 0
roles/openshift_node40/defaults/main.yml

@@ -0,0 +1,160 @@
+---
+openshift_node_debug_level: "{{ debug_level | default(2) }}"
+openshift_node_iptables_sync_period: '30s'
+osn_storage_plugin_deps:
+- ceph
+- glusterfs
+- iscsi
+openshift_node_local_quota_per_fsgroup: ""
+openshift_node_proxy_mode: iptables
+openshift_set_node_ip: False
+openshift_config_base: '/etc/origin'
+
+
+# Assume the images are already downloaded on the machine
+system_images_registry: "docker"
+l_osn_image: "{{ (system_images_registry == 'docker') | ternary(osn_image, (osn_image.split('/')|length==2) | ternary(system_images_registry + '/' + osn_image, osn_image)) }}"
+system_osn_image: "{{ (system_images_registry == 'docker') | ternary('docker:' + l_osn_image, l_osn_image) }}"
+
+openshift_node_env_vars: {}
+
+# lo must always be present in this list or dnsmasq will conflict with
+# the node's dns service.
+openshift_node_dnsmasq_except_interfaces:
+- lo
+
+# dnsmasq defaults to neg caching disabled
+openshift_node_dnsmasq_no_negcache: true
+# When openshift_node_dnsmasq_no_negcache is set to false, how many seconds to cache negative lookups.
+openshift_node_dnsmasq_neg_ttl: '1'
+
+r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+openshift_node_syscon_auth_mounts_l:
+- type: bind
+  source: "{{ oreg_auth_credentials_path }}"
+  destination: "/root/.docker"
+  options:
+  - ro
+  - bind
+
+# If we need to add new mounts in the future, or the user wants to mount data.
+# This should be in the same format as auth_mounts_l above.
+openshift_node_syscon_add_mounts_l: []
+
+default_r_openshift_node_image_prep_packages:
+- "{{ openshift_service_type }}-node"
+- ansible
+- bash-completion
+- dnsmasq
+- ntp
+- logrotate
+- httpd-tools
+- bind-utils
+- firewalld
+- libselinux-python
+- conntrack-tools
+- openssl
+- iproute
+- python-dbus
+- PyYAML
+- yum-utils
+- glusterfs-fuse
+- device-mapper-multipath
+- nfs-utils
+- cockpit-ws
+- cockpit-system
+- cockpit-bridge
+- cockpit-docker
+- iscsi-initiator-utils
+- ceph-common
+- atomic
+r_openshift_node_image_prep_packages: "{{ default_r_openshift_node_image_prep_packages | union(openshift_node_image_prep_packages | default([])) }}"
+
+r_openshift_node_os_firewall_deny: []
+default_r_openshift_node_os_firewall_allow:
+- service: Kubernetes kubelet
+  port: 10250/tcp
+- service: Kubernetes kube-proxy health check for service load balancers
+  port: 10256/tcp
+- service: http
+  port: 80/tcp
+- service: https
+  port: 443/tcp
+- service: OpenShift OVS sdn
+  port: 4789/udp
+  cond: openshift_use_openshift_sdn | bool
+- service: Calico BGP Port
+  port: 179/tcp
+  cond: "{{ openshift_node_use_calico }}"
+- service: Kubernetes service NodePort TCP
+  port: "{{ openshift_node_port_range | default('') }}/tcp"
+  cond: "{{ openshift_node_port_range is defined }}"
+- service: Kubernetes service NodePort UDP
+  port: "{{ openshift_node_port_range | default('') }}/udp"
+  cond: "{{ openshift_node_port_range is defined }}"
+- service: Prometheus monitoring
+  port: 9000-10000/tcp
+# Allow multiple port ranges to be added to the role
+r_openshift_node_os_firewall_allow: "{{ default_r_openshift_node_os_firewall_allow | union(openshift_node_open_ports | default([])) }}"
+
+# oreg_url is defined by user input
+oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
+l_bind_docker_reg_auth: False
+
+openshift_docker_service_name: "docker"
+
+# These defaults assume forcing journald persistence, fsync to disk once
+# a second, rate-limiting to 10,000 logs a second, no forwarding to
+# syslog or wall, using 8GB of disk space maximum, using 10MB journal
+# files, keeping only a days worth of logs per journal file, and
+# retaining journal files no longer than a month.
+journald_vars_to_replace:
+- { var: Storage, val: persistent }
+- { var: Compress, val: yes }
+- { var: SyncIntervalSec, val: 1s }
+- { var: RateLimitInterval, val: 1s }
+- { var: RateLimitBurst, val: 10000 }
+- { var: SystemMaxUse, val: 8G }
+- { var: SystemKeepFree, val: 20% }
+- { var: SystemMaxFileSize, val: 10M }
+- { var: MaxRetentionSec, val: 1month }
+- { var: MaxFileSec, val: 1day }
+- { var: ForwardToSyslog, val: no }
+- { var: ForwardToWall, val: no }
+
+# NOTE
+# r_openshift_node_*_default may be defined external to this role.
+# openshift_use_*, if defined, may affect other roles or play behavior.
+openshift_node_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}"
+openshift_node_use_openshift_sdn: "{{ openshift_node_use_openshift_sdn_default }}"
+
+openshift_node_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name }}"
+openshift_node_sdn_network_plugin_name: "{{ openshift_node_sdn_network_plugin_name_default }}"
+
+openshift_node_use_calico_default: "{{ openshift_use_calico | default(False) }}"
+openshift_node_use_calico: "{{ openshift_node_use_calico_default }}"
+
+openshift_node_use_nuage_default: "{{ openshift_use_nuage | default(False) }}"
+openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}"
+
+openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
+openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}"
+
+openshift_node_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
+openshift_node_use_kuryr: "{{ openshift_node_use_kuryr_default }}"
+
+openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
+
+openshift_node_config_dir_default: "/etc/origin/node"
+openshift_node_config_dir: "{{ openshift_node_config_dir_default }}"
+
+openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
+openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}"
+
+
+openshift_node_use_instance_profiles: False
+
+openshift_node_use_persistentlocalvolumes: "{{ openshift_persistentlocalstorage_enabled | default(False) | bool }}"

+ 24 - 0
roles/openshift_node40/files/clean-up-crio-pods.sh

@@ -0,0 +1,24 @@
+#!/bin/bash
+for c in $(runc list -q); do
+        output=$(runc state $c | grep io.kubernetes.cri-o.ContainerType)
+        if [[ "$output" =~ "container" ]]; then
+                runc delete -f $c
+        fi
+        for m in $(mount | grep $c | awk '{print $3}'); do
+                umount -R $m
+        done
+done
+for c in $(runc list -q); do
+        output=$(runc state $c | grep io.kubernetes.cri-o.ContainerType)
+        if [[ "$output" =~ "sandbox" ]]; then
+                runc delete -f $c
+        fi
+        for m in $(mount | grep $c | awk '{print $3}'); do
+                umount -R $m
+        done
+done
+mount | grep overlay | awk '{print $3}' | xargs umount | true
+umount -R /var/lib/containers/storage/overlay
+umount -R /var/lib/containers/storage
+rm -rf /var/run/containers/storage/*
+rm -rf /var/lib/containers/storage/*

+ 128 - 0
roles/openshift_node40/files/networkmanager/99-origin-dns.sh

@@ -0,0 +1,128 @@
+#!/bin/bash -x
+# -*- mode: sh; sh-indentation: 2 -*-
+
+# This NetworkManager dispatcher script replicates the functionality of
+# NetworkManager's dns=dnsmasq  however, rather than hardcoding the listening
+# address and /etc/resolv.conf to 127.0.0.1 it pulls the IP address from the
+# interface that owns the default route. This enables us to then configure pods
+# to use this IP address as their only resolver, where as using 127.0.0.1 inside
+# a pod would fail.
+#
+# To use this,
+# - If this host is also a master, reconfigure master dnsConfig to listen on
+#   8053 to avoid conflicts on port 53 and open port 8053 in the firewall
+# - Drop this script in /etc/NetworkManager/dispatcher.d/
+# - systemctl restart NetworkManager
+#
+# Test it:
+# host kubernetes.default.svc.cluster.local
+# host google.com
+#
+# TODO: I think this would be easy to add as a config option in NetworkManager
+# natively, look at hacking that up
+
+cd /etc/sysconfig/network-scripts
+. ./network-functions
+
+[ -f ../network ] && . ../network
+
+if [[ $2 =~ ^(up|dhcp4-change|dhcp6-change)$ ]]; then
+  # If the origin-upstream-dns config file changed we need to restart
+  NEEDS_RESTART=0
+  UPSTREAM_DNS='/etc/dnsmasq.d/origin-upstream-dns.conf'
+  # We'll regenerate the dnsmasq origin config in a temp file first
+  UPSTREAM_DNS_TMP=`mktemp`
+  UPSTREAM_DNS_TMP_SORTED=`mktemp`
+  CURRENT_UPSTREAM_DNS_SORTED=`mktemp`
+  NEW_RESOLV_CONF=`mktemp`
+  NEW_NODE_RESOLV_CONF=`mktemp`
+
+
+  ######################################################################
+  # couldn't find an existing method to determine if the interface owns the
+  # default route
+  def_route=$(/sbin/ip route list match 0.0.0.0/0 | awk '{print $3 }')
+  def_route_int=$(/sbin/ip route get to ${def_route} | awk -F 'dev' '{print $2}' | head -n1 | awk '{print $1}')
+  def_route_ip=$(/sbin/ip route get to ${def_route}  | awk -F 'src' '{print $2}' | head -n1 | awk '{print $1}')
+  if [[ ${DEVICE_IFACE} == ${def_route_int} ]]; then
+    if [ ! -f /etc/dnsmasq.d/origin-dns.conf ]; then
+      cat << EOF > /etc/dnsmasq.d/origin-dns.conf
+no-resolv
+domain-needed
+server=/cluster.local/172.30.0.1
+server=/30.172.in-addr.arpa/172.30.0.1
+enable-dbus
+dns-forward-max=5000
+cache-size=5000
+min-port=1024
+EOF
+      # New config file, must restart
+      NEEDS_RESTART=1
+    fi
+
+    # If network manager doesn't know about the nameservers then the best
+    # we can do is grab them from /etc/resolv.conf but only if we've got no
+    # watermark
+    if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then
+      if [[ -z "${IP4_NAMESERVERS}" || "${IP4_NAMESERVERS}" == "${def_route_ip}" ]]; then
+            IP4_NAMESERVERS=`grep '^nameserver[[:blank:]]' /etc/resolv.conf | awk '{ print $2 }'`
+      fi
+      ######################################################################
+      # Write out default nameservers for /etc/dnsmasq.d/origin-upstream-dns.conf
+      # and /etc/origin/node/resolv.conf in their respective formats
+      for ns in ${IP4_NAMESERVERS}; do
+        if [[ ! -z $ns ]]; then
+          echo "server=${ns}" >> $UPSTREAM_DNS_TMP
+          echo "nameserver ${ns}" >> $NEW_NODE_RESOLV_CONF
+        fi
+      done
+      # Sort it in case DNS servers arrived in a different order
+      sort $UPSTREAM_DNS_TMP > $UPSTREAM_DNS_TMP_SORTED
+      sort $UPSTREAM_DNS > $CURRENT_UPSTREAM_DNS_SORTED
+      # Compare to the current config file (sorted)
+      NEW_DNS_SUM=`md5sum ${UPSTREAM_DNS_TMP_SORTED} | awk '{print $1}'`
+      CURRENT_DNS_SUM=`md5sum ${CURRENT_UPSTREAM_DNS_SORTED} | awk '{print $1}'`
+      if [ "${NEW_DNS_SUM}" != "${CURRENT_DNS_SUM}" ]; then
+        # DNS has changed, copy the temp file to the proper location (-Z
+        # sets default selinux context) and set the restart flag
+        cp -Z $UPSTREAM_DNS_TMP $UPSTREAM_DNS
+        NEEDS_RESTART=1
+      fi
+      # compare /etc/origin/node/resolv.conf checksum and replace it if different
+      NEW_NODE_RESOLV_CONF_MD5=`md5sum ${NEW_NODE_RESOLV_CONF}`
+      OLD_NODE_RESOLV_CONF_MD5=`md5sum /etc/origin/node/resolv.conf`
+      if [ "${NEW_NODE_RESOLV_CONF_MD5}" != "${OLD_NODE_RESOLV_CONF_MD5}" ]; then
+        cp -Z $NEW_NODE_RESOLV_CONF /etc/origin/node/resolv.conf
+      fi
+    fi
+
+    if ! `systemctl -q is-active dnsmasq.service`; then
+      NEEDS_RESTART=1
+    fi
+
+    ######################################################################
+    if [ "${NEEDS_RESTART}" -eq "1" ]; then
+      systemctl restart dnsmasq
+    fi
+
+    # Only if dnsmasq is running properly make it our only nameserver and place
+    # a watermark on /etc/resolv.conf
+    if `systemctl -q is-active dnsmasq.service`; then
+      if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then
+          echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> ${NEW_RESOLV_CONF}
+      fi
+      sed -e '/^nameserver.*$/d' /etc/resolv.conf >> ${NEW_RESOLV_CONF}
+      echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF}
+      if ! grep -qw search ${NEW_RESOLV_CONF}; then
+        echo 'search cluster.local' >> ${NEW_RESOLV_CONF}
+      elif ! grep -q 'search cluster.local' ${NEW_RESOLV_CONF}; then
+        # cluster.local should be in first three DNS names so that glibc resolver would work
+        sed -i -e 's/^search[[:blank:]]\(.\+\)\( cluster\.local\)\{0,1\}$/search cluster.local \1/' ${NEW_RESOLV_CONF}
+      fi
+      cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf
+    fi
+  fi
+
+  # Clean up after yourself
+  rm -f $UPSTREAM_DNS_TMP $UPSTREAM_DNS_TMP_SORTED $CURRENT_UPSTREAM_DNS_SORTED $NEW_RESOLV_CONF
+fi

+ 18 - 0
roles/openshift_node40/files/openshift-node

@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# This launches the Kubelet by converting the node configuration into kube flags.
+
+set -euo pipefail
+
+if ! [[ -f /etc/origin/node/client-ca.crt ]]; then
+  if [[ -f /etc/origin/node/bootstrap.kubeconfig ]]; then
+    oc config --config=/etc/origin/node/bootstrap.kubeconfig view --raw --minify -o go-template='{{ index .clusters 0 "cluster" "certificate-authority-data" }}' | base64 -d - > /etc/origin/node/client-ca.crt
+  fi
+fi
+config=/etc/origin/node/bootstrap-node-config.yaml
+# TODO: remove when dynamic kubelet config is delivered
+if [[ -f /etc/origin/node/node-config.yaml ]]; then
+  config=/etc/origin/node/node-config.yaml
+fi
+flags=$( /usr/bin/openshift-node-config "--config=${config}" )
+eval "exec /usr/bin/hyperkube kubelet --v=${DEBUG_LOGLEVEL:-2} ${flags}"

+ 20 - 0
roles/openshift_node40/handlers/main.yml

@@ -0,0 +1,20 @@
+---
+- name: reload systemd units
+  command: systemctl daemon-reload
+  when:
+  - (not skip_node_svc_handlers | default(False) | bool)
+
+- name: restart NetworkManager
+  systemd:
+    name: NetworkManager
+    state: restarted
+    enabled: True
+  when:
+  - (not skip_node_svc_handlers | default(False) | bool)
+
+- name: restart dnsmasq
+  systemd:
+    name: dnsmasq
+    state: restarted
+  when:
+  - (not skip_node_svc_handlers | default(False) | bool)

+ 17 - 0
roles/openshift_node40/meta/main.yml

@@ -0,0 +1,17 @@
+---
+galaxy_info:
+  author: Jhon Honce
+  description: OpenShift Node
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 2.1
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+dependencies:
+- role: lib_openshift
+- role: lib_utils
+- role: openshift_facts

+ 32 - 0
roles/openshift_node40/tasks/bootkube_config.yml

@@ -0,0 +1,32 @@
+---
+- name: parse ignition file
+  parse_ignition:
+    ign_file_contents: "{{ openshift_bootkube_ign_contents }}"
+  register: l_parse_ignition_boot_kube_res
+
+- import_tasks: create_files_from_ignition.yml
+  vars:
+    l_parse_ignition_dict: "{{ l_parse_ignition_boot_kube_res }}"
+
+#### Disable SWAP #####
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+# swapoff is a custom module in lib_utils that comments out swap entries in
+# /etc/fstab and runs swapoff -a, if necessary.
+- name: Disable swap
+  swapoff: {}
+  when: openshift_disable_swap | default(true) | bool
+
+# The atomic-openshift-node service will set this parameter on
+# startup, but if the network service is restarted this setting is
+# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
+- sysctl:
+    name: net.ipv4.ip_forward
+    value: 1
+    sysctl_file: "/etc/sysctl.d/99-openshift.conf"
+    reload: yes
+
+- name: Setting sebool container_manage_cgroup
+  seboolean:
+    name: container_manage_cgroup
+    state: yes
+    persistent: yes

+ 49 - 0
roles/openshift_node40/tasks/config.yml

@@ -0,0 +1,49 @@
+---
+- name: get worker ignition file
+  command: >
+    curl -k {{ openshift_bootstrap_endpoint }}
+  register: l_worker_bootstrap
+  when: openshift_bootstrap_endpoint is defined
+
+- set_fact:
+    ign_contents: "{{ l_worker_bootstrap.stdout }}"
+  when: openshift_bootstrap_endpoint is defined
+
+- set_fact:
+    ign_contents: "{{ lookup('file', ignition_file) }}"
+  when: ignition_file is defined
+
+- debug:
+    var: ign_contents
+
+- name: parse ignition file
+  parse_ignition:
+    ign_file_contents: "{{ ign_contents }}"
+  register: l_parse_ignition_res
+
+- import_tasks: create_files_from_ignition.yml
+  vars:
+    l_parse_ignition_dict: "{{ l_parse_ignition_res }}"
+
+#### Disable SWAP #####
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+# swapoff is a custom module in lib_utils that comments out swap entries in
+# /etc/fstab and runs swapoff -a, if necessary.
+- name: Disable swap
+  swapoff: {}
+  when: openshift_disable_swap | default(true) | bool
+
+# The atomic-openshift-node service will set this parameter on
+# startup, but if the network service is restarted this setting is
+# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
+- sysctl:
+    name: net.ipv4.ip_forward
+    value: 1
+    sysctl_file: "/etc/sysctl.d/99-openshift.conf"
+    reload: yes
+
+- name: Setting sebool container_manage_cgroup
+  seboolean:
+    name: container_manage_cgroup
+    state: yes
+    persistent: yes

+ 16 - 0
roles/openshift_node40/tasks/create_files_from_ignition.yml

@@ -0,0 +1,16 @@
+---
+- name: Create all the directories we will need
+  file:
+    path: "{{ item }}"
+    state: directory
+  with_items: "{{ l_parse_ignition_dict.dir_list }}"
+
+- name: create files from ignition contents
+  copy:
+    content: "{{ item.value.contents }}"
+    dest: "{{ item.key }}"
+    mode: "{{ l_file_mode }}"
+  with_dict: "{{ l_parse_ignition_dict.files_dict }}"
+  vars:
+    l_mode_prepend: "{{ '0' if (item.value.mode | length < 4) else '' }}"
+    l_file_mode: "{{ l_mode_prepend ~ item.value.mode }}"

+ 15 - 0
roles/openshift_node40/tasks/install.yml

@@ -0,0 +1,15 @@
+---
+
+- name: Install openshift packages
+  package:
+    name: "{{ l_node_packages | join(',') }}"
+    update_cache: true
+  register: install_openshift
+  until: install_openshift.rc == 0
+  retries: 3
+  delay: 1
+  vars:
+    l_node_packages:
+    - "origin-node{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
+    - "origin-clients{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
+    - "origin-hyperkube{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"

+ 15 - 0
roles/openshift_node40/tasks/systemd.yml

@@ -0,0 +1,15 @@
+---
+
+- name: daemon reload
+  systemd:
+    daemon_reload: yes
+
+# dictionary of kv pairs, servicename: enabled, eg:
+# {'kubernetes': "true"}
+- name: Start and enable services
+  systemd:
+    name: "{{ item.key }}"
+    state: "{{ 'restarted' if (item.value | bool) else 'stopped' }}"
+    enabled: "{{ item.value | bool }}"
+  with_dict: "{{ l_parse_ignition_res.systemd_dict }}"
+  when: item.key not in excluded_services | default([])

+ 69 - 0
roles/openshift_node40/templates/bootstrap.yml.j2

@@ -0,0 +1,69 @@
+{% raw -%}
+#!/usr/bin/ansible-playbook
+---
+- hosts: localhost
+  gather_facts: yes
+  vars:
+    origin_dns:
+      file: /etc/dnsmasq.d/origin-dns.conf
+      lines:
+      - regex: ^listen-address
+        state: present
+        line: "listen-address={{ ansible_default_ipv4.address }}"
+
+  tasks:
+  - include_vars: openshift_settings.yaml
+
+  - name: set the data for origin_dns
+    lineinfile:
+      create: yes
+      state: "{{ item.state | default('present') }}"
+      insertafter: "{{ item.after | default(omit) }}"
+      path: "{{ origin_dns.file }}"
+      regexp: "{{ item.regex }}"
+      line: "{{ item.line | default(omit)}}"
+    with_items: "{{ origin_dns.lines }}"
+
+  - when:
+    - openshift_node_config_name is defined
+    - openshift_node_config_name != ''
+    block:
+    - name: determine the openshift_service_type
+      stat:
+        path: /etc/sysconfig/atomic-openshift-node
+        get_checksum: false
+        get_attributes: false
+        get_mime: false
+      register: service_type_results
+
+    - name: set openshift_service_type fact based on stat results
+      set_fact:
+        openshift_service_type: "{{ service_type_results.stat.exists | ternary('atomic-openshift', 'origin') }}"
+
+    - name: update the sysconfig to have necessary variables
+      lineinfile:
+        dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
+        line: "{{ item.line }}"
+        regexp: "{{ item.regexp }}"
+      with_items:
+      - line: "BOOTSTRAP_CONFIG_NAME={{ openshift_node_config_name }}"
+        regexp: "^BOOTSTRAP_CONFIG_NAME=.*"
+{% endraw %}
+
+{% if openshift_cloudprovider_kind | default('') == 'aws' %}
+  # need to update aws.conf file if the instance has come up in a new region
+  - name: set up aws.conf
+    block:
+    - name: get current AZ
+      uri:
+        url: http://169.254.169.254/latest/meta-data/placement/availability-zone
+        return_content: yes
+      register: aws_out
+
+    - name: set AZ in aws.conf
+      ini_file:
+        path: /etc/origin/cloudprovider/aws.conf
+        section: Global
+        option: Zone
+        value: "{% raw %}{{ aws_out.content }}{% endraw %}"
+{% endif %}

+ 20 - 0
roles/openshift_node40/templates/multipath.conf.j2

@@ -0,0 +1,20 @@
+# LIO iSCSI
+# TODO: Add env variables for tweaking
+devices {
+        device {
+                vendor "LIO-ORG"
+                user_friendly_names "yes" 
+                path_grouping_policy "failover"
+                path_selector "round-robin 0"
+                failback immediate
+                path_checker "tur"
+                prio "alua"
+                no_path_retry 120
+                rr_weight "uniform"
+        }
+}
+defaults {
+	user_friendly_names yes
+	find_multipaths yes
+}
+

+ 26 - 0
roles/openshift_node40/templates/node.service.j2

@@ -0,0 +1,26 @@
+[Unit]
+Description=OpenShift Node
+After={{ openshift_docker_service_name }}.service
+After=chronyd.service
+After=ntpd.service
+Wants={{ openshift_docker_service_name }}.service
+Documentation=https://github.com/openshift/origin
+Wants=dnsmasq.service
+After=dnsmasq.service
+{% if openshift_use_crio | bool %}Wants=cri-o.service{% endif %}
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node
+ExecStart=/usr/local/bin/openshift-node
+LimitNOFILE=65536
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier={{ openshift_service_type }}-node
+Restart=always
+RestartSec=5s
+TimeoutStartSec=300
+OOMScoreAdjust=-999
+
+[Install]
+WantedBy=multi-user.target

+ 17 - 0
roles/openshift_node40/templates/origin-dns.conf.j2

@@ -0,0 +1,17 @@
+no-resolv
+domain-needed
+{% if openshift_node_dnsmasq_no_negcache %}
+no-negcache
+{% else %}
+neg-ttl={{ openshift_node_dnsmasq_neg_ttl }}
+{% endif %}
+max-cache-ttl=1
+enable-dbus
+dns-forward-max=10000
+cache-size=10000
+bind-dynamic
+min-port=1024
+{% for interface in openshift_node_dnsmasq_except_interfaces %}
+except-interface={{ interface }}
+{% endfor %}
+# End of config

+ 1 - 11
roles/openshift_version/tasks/first_master.yml

@@ -1,15 +1,5 @@
 ---
-# Determine the openshift_version to configure if none has been specified or set previously.
-
-# Protect the installed version by default unless explicitly told not to, or given an
-# openshift_version already.
-- name: Use openshift_current_version fact as version to configure if already installed
-  set_fact:
-    openshift_version: "{{ openshift_current_version }}"
-  when:
-  - openshift_current_version is defined
-  - openshift_version is not defined or openshift_version == ""
-  - openshift_protect_installed_version | bool
+# Determine the openshift_version
 
 - name: Set openshift_version to openshift_release if undefined
   set_fact:

+ 0 - 7
test/ci/README.md

@@ -1,7 +0,0 @@
-* Copy `test/ci/vars.yml.sample` to `test/ci/vars.yml`
-* Adjust it your liking - this would be the host configuration
-* Adjust `inventory/group_vars/OSEv3/vars.yml` - this would be Origin-specific config
-* Provision instances via `ansible-playbook -vv -i test/ci/inventory/ test/ci/launch.yml`
-  This would place inventory file in `test/ci/inventory/hosts` and run prerequisites and deploy.
-
-* Once the setup is complete run `ansible-playbook -vv -i test/ci/inventory/ test/ci/deprovision.yml`

+ 0 - 45
test/ci/deprovision.yml

@@ -1,45 +0,0 @@
----
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  tasks:
-    - include_vars: "{{ item }}"
-      with_first_found:
-        - vars.yml
-        - vars.yaml
-
-    - name: Gather ec2 facts
-      ec2_instance_facts:
-        region: "{{ aws_region }}"
-        filters:
-          tag-key: "kubernetes.io/cluster/{{ aws_cluster_id }}"
-      register: ec2
-
-    - name: Terminate instances
-      ec2:
-        instance_ids: "{{ item.instance_id }}"
-        region: "{{ aws_region }}"
-        state: absent
-        wait: no
-      with_items: "{{ ec2.instances }}"
-      when: not aws_use_auto_terminator | default(true)
-
-    - when: aws_use_auto_terminator | default(true)
-      block:
-        - name: Stop VMs
-          ec2:
-            instance_ids: "{{ item.instance_id }}"
-            region: "{{ aws_region }}"
-            state: stopped
-            wait: no
-          with_items: "{{ ec2.instances }}"
-          ignore_errors: true
-
-        - name: Rename VMs
-          ec2_tag:
-            resource: "{{ item.instance_id }}"
-            region: "{{ aws_region }}"
-            tags:
-              Name: "{{ item.tags.Name }}-terminate"
-          when: "'-terminate' not in item.tags.Name"
-          with_items: "{{ ec2.instances }}"

+ 0 - 113
test/ci/inventory/group_vars/OSEv3/vars.yml

@@ -1,113 +0,0 @@
----
-ansible_become: true
-ansible_become_sudo: true
-
-openshift_deployment_type: origin
-openshift_repos_enable_testing: false
-
-#Minimal set of services
-openshift_web_console_install: true
-openshift_console_install: true
-openshift_metrics_install_metrics: false
-openshift_metrics_install_logging: false
-openshift_logging_install_logging: false
-openshift_management_install_management: false
-template_service_broker_install: false
-ansible_service_broker_install: false
-openshift_enable_service_catalog: false
-osm_use_cockpit: false
-openshift_monitoring_deploy: false
-openshift_metering_install: false
-openshift_metrics_server_install: false
-openshift_monitor_availability_install: false
-openshift_enable_olm: false
-openshift_descheduler_install: false
-openshift_node_problem_detector_install: false
-openshift_autoheal_deploy: false
-openshift_cluster_autoscaler_install: false
-
-# debugging
-debug_level: 4
-etcd_debug: true
-etcd_log_package_levels: 'auth=INFO,etcdmain=DEBUG,etcdserver=DEBUG'
-openshift_docker_options: "--log-driver=journald"
-
-#Disable journald persistence
-journald_vars_to_replace:
-  - { var: Storage, val: volatile }
-  - { var: Compress, val: no }
-  - { var: SyncIntervalSec, val: 1s }
-  - { var: RateLimitInterval, val: 1s }
-  - { var: RateLimitBurst, val: 10000 }
-  - { var: SystemMaxUse, val: 8G }
-  - { var: SystemKeepFree, val: 20% }
-  - { var: SystemMaxFileSize, val: 10M }
-  - { var: MaxRetentionSec, val: 1month }
-  - { var: MaxFileSec, val: 1day }
-  - { var: ForwardToSyslog, val: no }
-  - { var: ForwardToWall, val: no }
-
-#Other settings
-openshift_enable_origin_repo: false
-osm_default_node_selector: "node-role.kubernetes.io/compute=true"
-openshift_hosted_infra_selector: "node-role.kubernetes.io/infra=true"
-openshift_logging_es_nodeselector:
-  node-role.kubernetes.io/infra: "true"
-openshift_logging_es_ops_nodeselector:
-  node-role.kubernetes.io/infra: "true"
-osm_controller_args:
-  enable-hostpath-provisioner:
-    - "true"
-openshift_hosted_router_create_certificate: true
-openshift_master_audit_config:
-  enabled: true
-openshift_master_identity_providers:
-  - name: "allow_all"
-    login: "true"
-    challenge: "true"
-    kind: "AllowAllPasswordIdentityProvider"
-openshift_template_service_broker_namespaces:
-  - "openshift"
-enable_excluders: "true"
-osm_cluster_network_cidr: "10.128.0.0/14"
-openshift_portal_net: "172.30.0.0/16"
-osm_host_subnet_length: 9
-openshift_check_min_host_disk_gb: 1.5
-openshift_check_min_host_memory_gb: 1.9
-openshift_disable_check: package_update,package_availability,memory_availability,disk_availability
-
-openshift_logging_use_mux: false
-openshift_logging_use_ops: true
-openshift_logging_es_log_appenders:
-  - "console"
-openshift_logging_fluentd_journal_read_from_head: false
-openshift_logging_fluentd_audit_container_engine: true
-
-openshift_logging_curator_cpu_request: "100m"
-openshift_logging_curator_memory_limit: "32Mi"
-openshift_logging_curator_ops_cpu_request: "100m"
-openshift_logging_curator_ops_memory_limit: "32Mi"
-openshift_logging_elasticsearch_proxy_cpu_request: "100m"
-openshift_logging_elasticsearch_proxy_memory_limit: "32Mi"
-openshift_logging_es_cpu_request: "400m"
-openshift_logging_es_memory_limit: "4Gi"
-openshift_logging_es_ops_cpu_request: "400m"
-openshift_logging_es_ops_memory_limit: "4Gi"
-openshift_logging_eventrouter_cpu_request: "100m"
-openshift_logging_eventrouter_memory_limit: "64Mi"
-openshift_logging_fluentd_cpu_request: "100m"
-openshift_logging_fluentd_memory_limit: "256Mi"
-openshift_logging_kibana_cpu_request: "100m"
-openshift_logging_kibana_memory_limit: "128Mi"
-openshift_logging_kibana_ops_cpu_request: "100m"
-openshift_logging_kibana_ops_memory_limit: "128Mi"
-openshift_logging_kibana_ops_proxy_cpu_request: "100m"
-openshift_logging_kibana_ops_proxy_memory_limit: "64Mi"
-openshift_logging_kibana_proxy_cpu_request: "100m"
-openshift_logging_kibana_proxy_memory_limit: "64Mi"
-openshift_logging_mux_cpu_request: "400m"
-openshift_logging_mux_memory_limit: "256Mi"
-
-openshift_master_cluster_method: native
-
-openshift_node_port_range: '30000-32000'

+ 0 - 112
test/ci/launch.yml

@@ -1,112 +0,0 @@
----
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  tasks:
-    - include_vars: "{{ item }}"
-      with_first_found:
-        - vars.yml
-        - vars.yaml
-
-    - name: list available AMIs
-      ec2_ami_facts:
-        region: "{{ aws_region }}"
-        filters: "{{ aws_ami_tags }}"
-      register: ami_facts
-      when: aws_image is not defined
-
-    - name: determine which AMI to use
-      set_fact:
-        aws_image: "{{ ami_facts.images[-1].image_id }}"
-      when: aws_image is not defined
-
-    - name: determine which AMI to use
-      set_fact:
-        aws_image: "{{ ami_facts.images[-1].image_id }}"
-      when: aws_image is not defined
-
-    - name: Create EC2 instance
-      ec2:
-        region: "{{ aws_region }}"
-        key_name: "{{ aws_key }}"
-        instance_type: "{{ item.aws_flavor }}"
-        image: "{{ item.aws_image | default(aws_image) }}"
-        wait: yes
-        group: "{{ item.aws_security_group }}"
-        count: 1
-        vpc_subnet_id: "{{ aws_subnet }}"
-        assign_public_ip: yes
-        instance_tags: "{{ aws_instance_tags }}"
-        volumes: "{{ item.aws_volumes | default(omit) }}"
-      register: ec2
-      with_items: "{{ aws_instances }}"
-      vars:
-        aws_instance_tags: |
-          {
-            "kubernetes.io/cluster/{{ aws_cluster_id }}": "true",
-            "Name": "{{ item.name }}",
-            "ansible-groups": "{{ item.ansible_groups | join(',') }}",
-            "ansible-node-group": "{{ item.node_group }}",
-            "expirationDate": "{{ item.aws_expiration_date | default(aws_expiration_date) }}"
-          }
-
-    - name: Add machine to inventory
-      add_host:
-        name: "{{ item.instances.0.tags['Name'] }}"
-        ansible_host: "{{ item.instances.0.dns_name }}"
-        ansible_user: "{{ item.instances.0.aws_user | default(aws_user)}}"
-        groups: "{{ item.instances.0.tags['ansible-groups'].split(',') }}"
-        aws_region: "{{ aws_region }}"
-        aws_ip: "{{ item.instances.0.public_ip }}"
-        aws_id: "{{ item.instances.0.id }}"
-        openshift_node_group_name: "{{ item.instances.0.tags['ansible-node-group'] }}"
-      with_items: "{{ ec2.results }}"
-
-    - name: write the inventory
-      template:
-        src: ./template-inventory.j2
-        dest: "inventory/hosts"
-
-    - name: Refresh inventory to ensure new instances exist in inventory
-      meta: refresh_inventory
-
-- hosts: all
-  gather_facts: no
-  become: true
-  tasks:
-    - wait_for_connection: {}
-    - name: Make sure hostname is set to public ansible host
-      hostname:
-        name: "{{ ansible_host }}"
-    - name: Detecting Operating System
-      shell: ls /run/ostree-booted
-      ignore_errors: yes
-      failed_when: false
-      register: ostree_output
-    - name: Update all packages
-      package:
-        name: '*'
-        state: latest
-      when: ostree_output.rc != 0
-      register: yum_update
-    - name: Update Atomic system
-      command: atomic host upgrade
-      when: ostree_output.rc == 0
-      register: ostree_update
-    - name: Reboot machines
-      shell: sleep 5 && systemctl reboot
-      async: 1
-      poll: 0
-      ignore_errors: true
-      when: yum_update | changed or ostree_update | changed
-    - name: Wait for connection
-      wait_for_connection:
-        connect_timeout: 20
-        sleep: 5
-        delay: 5
-        timeout: 300
-    - setup: {}
-
-- import_playbook: ../../playbooks/openshift-node/network_manager.yml
-- import_playbook: ../../playbooks/prerequisites.yml
-- import_playbook: ../../playbooks/deploy_cluster.yml

+ 0 - 26
test/ci/template-inventory.j2

@@ -1,26 +0,0 @@
-[OSEv3:vars]
-ansible_python_interpreter="{{ python }}"
-ansible_user="{{ aws_user }}"
-aws_region="{{ aws_region }}"
-openshift_master_default_subdomain="{{ hostvars[groups[('lb' in groups) | ternary('lb', 'masters')][0]]["aws_ip"] }}.xip.io"
-
-[OSEv3:children]
-{% for group in groups %}
-{% if group not in ["all", "ungrouped", "OSEv3"] %}
-{{group}}
-{% endif %}
-{% endfor %}
-
-{% for group in groups %}
-{% if group not in ["all", "ungrouped", "OSEv3"] %}
-[{{group}}]
-{% for entry in groups[group] %}
-{% set addon_opts = "" %}
-{% if group == "nodes" %}
-{% set addon_opts = addon_opts + " openshift_node_group_name='" + hostvars[entry]['openshift_node_group_name'] + "'" %}
-{% endif %}
-{{ entry }} ansible_host='{{ hostvars[entry]['ansible_host'] }}' aws_id='{{ hostvars[entry]['aws_id'] }}' {{ addon_opts }}
-{% endfor %}
-{% endif %}
-
-{% endfor %}

+ 0 - 46
test/ci/vars.yml.sample

@@ -1,46 +0,0 @@
----
-vm_prefix: "ci_test"
-#aws_use_auto_terminator is set to True by default, as rh-dev account doesn't have permission
-# to terminate instances. These should be stopped and renamed to include 'terminate' instead
-#aws_use_auto_terminator: false
-
-type: aws
-aws_user: "ec2-user"
-python: "/usr/bin/python"
-
-aws_key: "libra"
-aws_region: "us-east-1"
-aws_cluster_id: "ci"
-# us-east-1d
-aws_subnet: "subnet-cf57c596"
-
-aws_expiration_date: "{{ lookup('pipe','date -d \"4 hours\" --iso=minutes --utc') }}"
-
-aws_ami_tags:
-  "tag:operating_system": "rhel"
-  "tag:image_stage": "base"
-  "tag:ready": "yes"
-
-aws_instances:
-- name: "{{ vm_prefix }}-master"
-  ansible_groups:
-    - masters
-    - etcd
-    - nodes
-  aws_flavor: t2.large
-  aws_security_group: public
-  node_group: "node-config-all-in-one"
-  # Use custom AMI tags
-  # aws_ami_tags:
-  #   operating_system: "rhel"
-  #   image_stage: "base"
-  #   ready: "yes"
-  # Use custom AMI
-  #aws_image: "ami-70e8fd66"
-  # Attach custom volumes
-  #aws_volumes:
-  # - device_name: /dev/sdb
-  #   volume_size: 50
-  #   delete_on_termination: yes
-  #Set expiration date for instances on CI namespace
-  #aws_expiration_date: "{{ lookup('pipe','date -d \"4 hours\" --iso=minutes --utc') }}"

+ 20 - 13
playbooks/gcp/openshift-cluster/build_image.yml

@@ -24,10 +24,6 @@
   connection: local
   gather_facts: no
   tasks:
-  - name: Set facts
-    set_fact:
-      openshift_master_unsupported_embedded_etcd: True
-
   - name: Create the image instance disk
     gce_pd:
       service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
@@ -79,16 +75,12 @@
 
 - name: Add custom repositories
   hosts: nodes
-  handlers:
-  - import_tasks: ../../roles/openshift_repos/handlers/main.yml
-  tasks:
-  - include_role:
-      name: openshift_gcp
-      tasks_from: add_custom_repositories.yml
+  roles:
+  - role: openshift_repos
 
-# This is the part that installs all of the software and configs for the instance
-# to become a node.
-- import_playbook: ../../openshift-node/private/image_prep.yml
+- import_playbook: ../../playbooks/init/base_packages.yml
+  vars:
+    l_base_packages_hosts: nodes
 
 # Add additional GCP specific behavior
 - hosts: nodes
@@ -99,6 +91,21 @@
   - include_role:
       name: openshift_gcp
       tasks_from: frequent_log_rotation.yml
+  - name: Install networkmanager-glib to reset MTU
+    package:
+      name: NetworkManager-glib
+      state: present
+  - name: Set MTU
+    nmcli:
+      conn_name: "System eth0"
+      mtu: "{{ openshift_node_sdn_mtu }}"
+      type: ethernet
+      state: present
+  #Required for storage tests to mount NFS shares
+  - name: Install packages for tests
+    package:
+      name: "nfs-utils"
+      state: present
 
 - name: Commit image
   hosts: localhost

+ 1 - 2
playbooks/gcp/openshift-cluster/deprovision.yml

@@ -6,5 +6,4 @@
   tasks:
   - include_role:
       name: openshift_gcp
-    vars:
-      state: absent
+      tasks_from: deprovision.yml

+ 29 - 0
test/gcp/install.yml

@@ -0,0 +1,29 @@
+# This playbook installs onto a provisioned cluster
+#TODO: split into parts: nodes.yml, bootstrap.yml, masters.yml, workers.yml, bootkube/post_setup.yml
+---
+- hosts: localhost
+  connection: local
+  tasks:
+  - name: place all scale groups into Ansible groups
+    include_role:
+      name: openshift_gcp
+      tasks_from: setup_scale_group_facts.yml
+
+- hosts: nodes
+  tasks:
+  - name: Disable google hostname updater
+    file:
+      path: /etc/dhcp/dhclient.d/google_hostname.sh
+      mode: 0644
+
+- name: run the deploy_cluster_40
+  import_playbook: ../../playbooks/deploy_cluster_40.yml
+
+- name: destroy bootstrap node
+  hosts: localhost
+  connection: local
+  tasks:
+  - name: Scale down bootstrap node and update public API DNS record
+    include_role:
+      name: openshift_gcp
+      tasks_from: remove_bootstrap.yml

playbooks/gcp/openshift-cluster/inventory.yml → test/gcp/inventory.yml


playbooks/gcp/openshift-cluster/launch.yml → test/gcp/launch.yml


playbooks/gcp/openshift-cluster/provision.yml → test/gcp/provision.yml


+ 1 - 0
test/gcp/roles

@@ -0,0 +1 @@
+../../roles

playbooks/gcp/openshift-cluster/upgrade.yml → test/gcp/upgrade.yml