Bläddra i källkod

Add test playbooks to verify new RHEL nodes can be added to AWS RHCOS
cluster

Vadim Rutkovsky 6 år sedan
förälder
incheckning
e10cc17253

+ 47 - 0
inventory/dynamic/aws/ansible.cfg

@@ -0,0 +1,47 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts.
+
+[defaults]
+# Set the log_path
+#log_path = /tmp/ansible.log
+
+private_key_file = $HOME/.ssh/id_rsa
+
+# Additional default options for OpenShift Ansible
+forks = 50
+host_key_checking = False
+retry_files_enabled = False
+retry_files_save_path = ~/ansible-installer-retries
+nocows = True
+remote_user = ec2-user
+roles_path = ../../../roles/
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = $HOME/ansible/facts
+fact_caching_timeout = 600
+callback_whitelist = profile_tasks
+inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
+# work around privilege escalation timeouts in ansible:
+timeout = 30
+
+stdout_callback = yaml
+
+# Uncomment to use the provided example inventory
+inventory = inventory
+
+[inventory]
+# fail more helpfully when the inventory file does not parse (Ansible 2.4+)
+unparsed_is_failed=true
+
+# Additional ssh options for OpenShift Ansible
+[ssh_connection]
+pipelining = True
+ssh_args = -o ControlMaster=auto -o ControlPersist=600s
+timeout = 10
+# shorten the ControlPath which is often too long; when it is,
+# ssh connection reuse silently fails, making everything slower.
+control_path = %(directory)s/%%h-%%r

+ 3 - 0
inventory/dynamic/aws/group_vars/all/00_defaults.yml

@@ -0,0 +1,3 @@
+---
+ansible_become: yes
+openshift_deployment_type: origin

+ 0 - 0
inventory/dynamic/aws/inventory


+ 8 - 26
playbooks/openshift-node/scaleup.yml

@@ -1,6 +1,4 @@
 ---
-- import_playbook: ../init/evaluate_groups.yml
-
 - name: Ensure there are new_nodes
   hosts: localhost
   connection: local
@@ -8,32 +6,22 @@
   tasks:
   - fail:
       msg: >
-        Detected no new_nodes in inventory. Please add hosts to the
-        new_nodes host group to add nodes.
+        Detected no new_workers in inventory. Please add hosts to the
+        new_workers host group to add nodes.
     when:
-    - g_new_node_hosts | default([]) | length == 0
-  - fail:
-      msg: >
-        Please run playbooks/openshift-master/scaleup.yml if you need to
-        scale up both masters and nodes.  This playbook is only needed if
-        you are only adding new nodes and not new masters.
-    when:
-    - g_new_node_hosts | default([]) | length > 0
-    - g_new_master_hosts | default([]) | length > 0
-
-# if g_new_node_hosts is not empty, oo_nodes_to_config will be set to
-# g_new_node_hosts via evaluate_groups.yml
+    - groups.new_workers | default([]) | length == 0
 
 - name: run the init
   import_playbook: ../init/main.yml
   vars:
-    l_init_fact_hosts: "masters:new_nodes"
-    l_openshift_version_set_hosts: "new_nodes"
+    l_init_fact_hosts: "new_workers"
+    l_openshift_version_set_hosts: "new_workers"
     l_install_base_packages: True
-    l_repo_hosts: "new_nodes"
+    l_repo_hosts: "new_workers"
+    l_base_packages_hosts: "new_workers"
 
 - name: install nodes
-  hosts: new_nodes
+  hosts: new_workers
   vars:
     openshift_bootstrap_endpoint: "https://{{ openshift_install_config['metadata']['name'] }}-api.{{ openshift_install_config['baseDomain'] }}:49500/config/worker"
   roles:
@@ -51,12 +39,6 @@
   - import_role:
       name: openshift_node40
       tasks_from: install.yml
-
-- name: Start workers
-  hosts: new_nodes
-  vars:
-    openshift_bootstrap_endpoint: "https://{{ openshift_install_config['metadata']['name'] }}-api.{{ openshift_install_config['baseDomain'] }}:49500/config/worker"
-  tasks:
   - name: Wait for bootstrap endpoint to show up
     uri:
       url: "{{ openshift_bootstrap_endpoint }}"

+ 1 - 1
roles/openshift_facts/defaults/main.yml

@@ -49,7 +49,7 @@ openshift_use_crio: True
 openshift_use_crio_only: True
 openshift_crio_enable_docker_gc: False
 openshift_crio_var_sock: "/var/run/crio/crio.sock"
-openshift_crio_pause_image: "{{ l_os_registry_url | regex_replace('${component}' | regex_escape, 'pod') }}"
+openshift_crio_pause_image: "{{ l_os_registry_url | regex_replace('${component}' | regex_escape, 'pod') | regex_replace('${version}' | regex_escape, 'v4.0') }}"
 openshift_container_cli: "{{ openshift_use_crio | bool | ternary('crictl', 'docker') }}"
 openshift_crio_docker_gc_node_selector:
   runtime: 'cri-o'

+ 8 - 0
roles/openshift_gcp/tasks/setup_scale_group_facts.yml

@@ -23,3 +23,11 @@
     - workers
     - nodes
   with_items: "{{ groups['tag_ocp-worker'] | default([]) }}"
+
+- name: Add new worker instances
+  add_host:
+    name: "{{ hostvars[item].gce_name }}"
+    groups:
+    - new_workers
+    - nodes
+  with_items: "{{ groups['tag_ocp-new-worker'] | default([]) }}"

+ 0 - 7
test/aws/README.md

@@ -1,7 +0,0 @@
-* Copy `test/ci/vars.yml.sample` to `test/ci/vars.yml`
-* Adjust it your liking - this would be the host configuration
-* Adjust `inventory/group_vars/OSEv3/vars.yml` - this would be Origin-specific config
-* Provision instances via `ansible-playbook -vv -i test/ci/inventory/ test/ci/launch.yml`
-  This would place inventory file in `test/ci/inventory/hosts` and run prerequisites and deploy.
-
-* Once the setup is complete run `ansible-playbook -vv -i test/ci/inventory/ test/ci/deprovision.yml`

+ 66 - 0
test/aws/create_machineset.yml

@@ -0,0 +1,66 @@
+---
+- name: save machineset to a file
+  copy:
+    content: "{{ item | to_yaml }}"
+    dest: "{{ mktemp.stdout }}/machineset.yaml"
+
+- name: get existing machineset name
+  yedit:
+    state: list
+    src: "{{ mktemp.stdout }}/machineset.yaml"
+    key: "metadata.name"
+  register: machineset_name
+
+- name: edit machineset name
+  yedit:
+    src: "{{ mktemp.stdout }}/machineset.yaml"
+    separator: '#'
+    state: present
+    edits:
+    - key: metadata#name
+      value: "{{ machineset_name.result }}-centos"
+    - key: spec#selector#matchLabels#sigs.k8s.io/cluster-api-machineset
+      value: "{{ machineset_name.result }}-centos"
+    - key: spec#template#metadata#labels#sigs.k8s.io/cluster-api-machineset
+      value: "{{ machineset_name.result }}-centos"
+    - key: spec#template#metadata#labels#sigs.k8s.io/cluster-api-machineset
+      value: "{{ machineset_name.result }}-centos"
+    - key: spec#template#spec#providerSpec#value#keyName
+      value: "{{ openshift_aws_scaleup_key }}"
+    - key: spec#template#spec#providerSpec#value#ami#id
+      value: "{{ openshift_aws_scaleup_ami }}"
+
+- name: import machinespec
+  oc_obj:
+    state: present
+    namespace: "openshift-cluster-api"
+    kind: machineset
+    name: "{{ machineset_name.result }}-centos"
+    kubeconfig: "{{ kubeconfig_path }}"
+    files:
+    - "{{ mktemp.stdout }}/machineset.yaml"
+
+- name: wait for machine to be created
+  oc_obj:
+    state: list
+    kind: machines
+    namespace: openshift-cluster-api
+    selector: "sigs.k8s.io/cluster-api-machineset={{ machineset_name.result }}-centos"
+    kubeconfig: "{{ kubeconfig_path }}"
+  register: new_machine
+  retries: 36
+  delay: 5
+  until:
+  - "'results' in new_machine"
+  - "'results' in new_machine.results"
+  - "new_machine.results.results | length > 0"
+  - "'items' in new_machine.results.results[0]"
+  - "new_machine.results.results[0]['items'] | length > 0"
+  - "'status' in new_machine.results.results[0]['items'][0]"
+
+- name: add machine to the inventory
+  add_host:
+    name: "{{ new_machine.results.results[0]['items'][0].status.addresses | selectattr('type', 'match', '^InternalIP$') | map(attribute='address') | first }}"
+    node_name: "{{ new_machine.results.results[0]['items'][0].status.addresses | selectattr('type', 'match', '^InternalDNS$') | map(attribute='address') | first }}"
+    groups: new_workers
+    ansible_ssh_common_args: '-o ProxyCommand="ssh -o IdentityFile="/opt/app-root/src/.ssh/id_rsa" -o StrictHostKeyChecking=no -W %h:%p -q core@{{ master_external_dns }}"'

+ 0 - 45
test/aws/deprovision.yml

@@ -1,45 +0,0 @@
----
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  tasks:
-    - include_vars: "{{ item }}"
-      with_first_found:
-        - vars.yml
-        - vars.yaml
-
-    - name: Gather ec2 facts
-      ec2_instance_facts:
-        region: "{{ aws_region }}"
-        filters:
-          tag-key: "kubernetes.io/cluster/{{ aws_cluster_id }}"
-      register: ec2
-
-    - name: Terminate instances
-      ec2:
-        instance_ids: "{{ item.instance_id }}"
-        region: "{{ aws_region }}"
-        state: absent
-        wait: no
-      with_items: "{{ ec2.instances }}"
-      when: not aws_use_auto_terminator | default(true)
-
-    - when: aws_use_auto_terminator | default(true)
-      block:
-        - name: Stop VMs
-          ec2:
-            instance_ids: "{{ item.instance_id }}"
-            region: "{{ aws_region }}"
-            state: stopped
-            wait: no
-          with_items: "{{ ec2.instances }}"
-          ignore_errors: true
-
-        - name: Rename VMs
-          ec2_tag:
-            resource: "{{ item.instance_id }}"
-            region: "{{ aws_region }}"
-            tags:
-              Name: "{{ item.tags.Name }}-terminate"
-          when: "'-terminate' not in item.tags.Name"
-          with_items: "{{ ec2.instances }}"

+ 57 - 0
test/aws/get_machinesets.yml

@@ -0,0 +1,57 @@
+---
+- name: List existing workers
+  oc_obj:
+    kubeconfig: "{{ kubeconfig_path }}"
+    state: list
+    kind: node
+    selector: "node-role.kubernetes.io/worker"
+  delegate_to: localhost
+  register: pre_scaleup_workers
+  until:
+  - pre_scaleup_workers.results is defined
+  - pre_scaleup_workers.results.returncode is defined
+  - pre_scaleup_workers.results.results is defined
+  - pre_scaleup_workers.results.returncode == 0
+  - pre_scaleup_workers.results.results[0]['items'] | length > 0
+  retries: 36
+  delay: 5
+
+- set_fact:
+    pre_scaleup_workers_name: "{{ pre_scaleup_workers.results.results[0]['items'] |map(attribute='metadata.name') | list }}"
+
+- name: create temp directory
+  command: mktemp -d /tmp/openshift-ansible-XXXXXXX
+  register: mktemp
+  changed_when: False
+
+- name: get existing worker machinesets
+  oc_obj:
+    state: list
+    kind: machinesets
+    namespace: openshift-cluster-api
+    selector: ""
+    kubeconfig: "{{ kubeconfig_path }}"
+  register: machineset
+
+- set_fact:
+    pre_scaleup_machineset_names: "{{ machineset.results.results[0]['items'] |map(attribute='metadata.name') | list }}"
+
+- name: List existing masters
+  oc_obj:
+    kubeconfig: "{{ kubeconfig_path }}"
+    state: list
+    kind: node
+    selector: "node-role.kubernetes.io/master"
+  delegate_to: localhost
+  register: masters
+  until:
+  - masters.results is defined
+  - masters.results.returncode is defined
+  - masters.results.results is defined
+  - masters.results.returncode == 0
+  retries: 36
+  delay: 5
+
+- name: save first master external DNS name
+  set_fact:
+    master_external_dns: "{{ masters.results.results[0]['items'][0].status.addresses | selectattr('type', 'match', '^ExternalDNS$') | map(attribute='address') | first }}"

+ 0 - 113
test/aws/inventory/group_vars/OSEv3/vars.yml

@@ -1,113 +0,0 @@
----
-ansible_become: true
-ansible_become_sudo: true
-
-openshift_deployment_type: origin
-openshift_repos_enable_testing: false
-
-#Minimal set of services
-openshift_web_console_install: true
-openshift_console_install: true
-openshift_metrics_install_metrics: false
-openshift_metrics_install_logging: false
-openshift_logging_install_logging: false
-openshift_management_install_management: false
-template_service_broker_install: false
-ansible_service_broker_install: false
-openshift_enable_service_catalog: false
-osm_use_cockpit: false
-openshift_monitoring_deploy: false
-openshift_metering_install: false
-openshift_metrics_server_install: false
-openshift_monitor_availability_install: false
-openshift_enable_olm: false
-openshift_descheduler_install: false
-openshift_node_problem_detector_install: false
-openshift_autoheal_deploy: false
-openshift_cluster_autoscaler_install: false
-
-# debugging
-debug_level: 4
-etcd_debug: true
-etcd_log_package_levels: 'auth=INFO,etcdmain=DEBUG,etcdserver=DEBUG'
-openshift_docker_options: "--log-driver=journald"
-
-#Disable journald persistence
-journald_vars_to_replace:
-  - { var: Storage, val: volatile }
-  - { var: Compress, val: no }
-  - { var: SyncIntervalSec, val: 1s }
-  - { var: RateLimitInterval, val: 1s }
-  - { var: RateLimitBurst, val: 10000 }
-  - { var: SystemMaxUse, val: 8G }
-  - { var: SystemKeepFree, val: 20% }
-  - { var: SystemMaxFileSize, val: 10M }
-  - { var: MaxRetentionSec, val: 1month }
-  - { var: MaxFileSec, val: 1day }
-  - { var: ForwardToSyslog, val: no }
-  - { var: ForwardToWall, val: no }
-
-#Other settings
-openshift_enable_origin_repo: false
-osm_default_node_selector: "node-role.kubernetes.io/compute=true"
-openshift_hosted_infra_selector: "node-role.kubernetes.io/infra=true"
-openshift_logging_es_nodeselector:
-  node-role.kubernetes.io/infra: "true"
-openshift_logging_es_ops_nodeselector:
-  node-role.kubernetes.io/infra: "true"
-osm_controller_args:
-  enable-hostpath-provisioner:
-    - "true"
-openshift_hosted_router_create_certificate: true
-openshift_master_audit_config:
-  enabled: true
-openshift_master_identity_providers:
-  - name: "allow_all"
-    login: "true"
-    challenge: "true"
-    kind: "AllowAllPasswordIdentityProvider"
-openshift_template_service_broker_namespaces:
-  - "openshift"
-enable_excluders: "true"
-osm_cluster_network_cidr: "10.128.0.0/14"
-openshift_portal_net: "172.30.0.0/16"
-osm_host_subnet_length: 9
-openshift_check_min_host_disk_gb: 1.5
-openshift_check_min_host_memory_gb: 1.9
-openshift_disable_check: package_update,package_availability,memory_availability,disk_availability
-
-openshift_logging_use_mux: false
-openshift_logging_use_ops: true
-openshift_logging_es_log_appenders:
-  - "console"
-openshift_logging_fluentd_journal_read_from_head: false
-openshift_logging_fluentd_audit_container_engine: true
-
-openshift_logging_curator_cpu_request: "100m"
-openshift_logging_curator_memory_limit: "32Mi"
-openshift_logging_curator_ops_cpu_request: "100m"
-openshift_logging_curator_ops_memory_limit: "32Mi"
-openshift_logging_elasticsearch_proxy_cpu_request: "100m"
-openshift_logging_elasticsearch_proxy_memory_limit: "32Mi"
-openshift_logging_es_cpu_request: "400m"
-openshift_logging_es_memory_limit: "4Gi"
-openshift_logging_es_ops_cpu_request: "400m"
-openshift_logging_es_ops_memory_limit: "4Gi"
-openshift_logging_eventrouter_cpu_request: "100m"
-openshift_logging_eventrouter_memory_limit: "64Mi"
-openshift_logging_fluentd_cpu_request: "100m"
-openshift_logging_fluentd_memory_limit: "256Mi"
-openshift_logging_kibana_cpu_request: "100m"
-openshift_logging_kibana_memory_limit: "128Mi"
-openshift_logging_kibana_ops_cpu_request: "100m"
-openshift_logging_kibana_ops_memory_limit: "128Mi"
-openshift_logging_kibana_ops_proxy_cpu_request: "100m"
-openshift_logging_kibana_ops_proxy_memory_limit: "64Mi"
-openshift_logging_kibana_proxy_cpu_request: "100m"
-openshift_logging_kibana_proxy_memory_limit: "64Mi"
-openshift_logging_mux_cpu_request: "400m"
-openshift_logging_mux_memory_limit: "256Mi"
-
-openshift_master_cluster_method: native
-
-openshift_node_port_range: '30000-32000'

+ 0 - 112
test/aws/launch.yml

@@ -1,112 +0,0 @@
----
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  tasks:
-    - include_vars: "{{ item }}"
-      with_first_found:
-        - vars.yml
-        - vars.yaml
-
-    - name: list available AMIs
-      ec2_ami_facts:
-        region: "{{ aws_region }}"
-        filters: "{{ aws_ami_tags }}"
-      register: ami_facts
-      when: aws_image is not defined
-
-    - name: determine which AMI to use
-      set_fact:
-        aws_image: "{{ ami_facts.images[-1].image_id }}"
-      when: aws_image is not defined
-
-    - name: determine which AMI to use
-      set_fact:
-        aws_image: "{{ ami_facts.images[-1].image_id }}"
-      when: aws_image is not defined
-
-    - name: Create EC2 instance
-      ec2:
-        region: "{{ aws_region }}"
-        key_name: "{{ aws_key }}"
-        instance_type: "{{ item.aws_flavor }}"
-        image: "{{ item.aws_image | default(aws_image) }}"
-        wait: yes
-        group: "{{ item.aws_security_group }}"
-        count: 1
-        vpc_subnet_id: "{{ aws_subnet }}"
-        assign_public_ip: yes
-        instance_tags: "{{ aws_instance_tags }}"
-        volumes: "{{ item.aws_volumes | default(omit) }}"
-      register: ec2
-      with_items: "{{ aws_instances }}"
-      vars:
-        aws_instance_tags: |
-          {
-            "kubernetes.io/cluster/{{ aws_cluster_id }}": "true",
-            "Name": "{{ item.name }}",
-            "ansible-groups": "{{ item.ansible_groups | join(',') }}",
-            "ansible-node-group": "{{ item.node_group }}",
-            "expirationDate": "{{ item.aws_expiration_date | default(aws_expiration_date) }}"
-          }
-
-    - name: Add machine to inventory
-      add_host:
-        name: "{{ item.instances.0.tags['Name'] }}"
-        ansible_host: "{{ item.instances.0.dns_name }}"
-        ansible_user: "{{ item.instances.0.aws_user | default(aws_user)}}"
-        groups: "{{ item.instances.0.tags['ansible-groups'].split(',') }}"
-        aws_region: "{{ aws_region }}"
-        aws_ip: "{{ item.instances.0.public_ip }}"
-        aws_id: "{{ item.instances.0.id }}"
-        openshift_node_group_name: "{{ item.instances.0.tags['ansible-node-group'] }}"
-      with_items: "{{ ec2.results }}"
-
-    - name: write the inventory
-      template:
-        src: ./template-inventory.j2
-        dest: "inventory/hosts"
-
-    - name: Refresh inventory to ensure new instances exist in inventory
-      meta: refresh_inventory
-
-- hosts: all
-  gather_facts: no
-  become: true
-  tasks:
-    - wait_for_connection: {}
-    - name: Make sure hostname is set to public ansible host
-      hostname:
-        name: "{{ ansible_host }}"
-    - name: Detecting Operating System
-      shell: ls /run/ostree-booted
-      ignore_errors: yes
-      failed_when: false
-      register: ostree_output
-    - name: Update all packages
-      package:
-        name: '*'
-        state: latest
-      when: ostree_output.rc != 0
-      register: yum_update
-    - name: Update Atomic system
-      command: atomic host upgrade
-      when: ostree_output.rc == 0
-      register: ostree_update
-    - name: Reboot machines
-      shell: sleep 5 && systemctl reboot
-      async: 1
-      poll: 0
-      ignore_errors: true
-      when: yum_update | changed or ostree_update | changed
-    - name: Wait for connection
-      wait_for_connection:
-        connect_timeout: 20
-        sleep: 5
-        delay: 5
-        timeout: 300
-    - setup: {}
-
-- import_playbook: ../../playbooks/openshift-node/network_manager.yml
-- import_playbook: ../../playbooks/prerequisites.yml
-- import_playbook: ../../playbooks/deploy_cluster.yml

+ 136 - 0
test/aws/scaleup.yml

@@ -0,0 +1,136 @@
+---
+- name: run the init
+  import_playbook: ../../playbooks/init/main.yml
+  vars:
+    l_init_fact_hosts: "all:!all"
+    l_openshift_version_set_hosts: "all:!all"
+
+- name: create new nodes
+  hosts: localhost
+  connection: local
+  tasks:
+  - name: add localhost as master
+    add_host:
+      name: localhost
+      ansible_connection: local
+      groups: masters
+
+  - import_tasks: get_machinesets.yml
+
+  - include_tasks: create_machineset.yml
+    loop: "{{ machineset.results.results[0]['items'] }}"
+    when:
+    - item.status.replicas is defined
+    - item.status.replicas != 0
+
+- name: wait for nodes to become available
+  hosts: new_workers
+  gather_facts: false
+  tasks:
+  - wait_for_connection: {}
+  - setup: {}
+
+- import_playbook: ../../playbooks/openshift-node/scaleup.yml
+
+- name: wait for nodes to join
+  hosts: new_workers
+  tasks:
+  - name: HACK disable selinux
+    selinux:
+      policy: targeted
+      state: permissive
+  - name: Create core user for storage tests to pass
+    user:
+      name: core
+      group: wheel
+  - name: Make sure core user has ssh config directory
+    file:
+      name: /home/core/.ssh
+      state: directory
+      owner: core
+      group: wheel
+      mode: 0700
+  - name: Copy a list of authorized ssh keys
+    copy:
+      src: /home/ec2-user/.ssh/authorized_keys
+      dest: /home/core/.ssh/authorized_keys
+      remote_src: true
+      owner: core
+      group: wheel
+      mode: 600
+  - name: Install nfs-utils for storage tests
+    package:
+      name: nfs-utils
+      state: present
+  - name: Wait for new nodes to be ready
+    oc_obj:
+      kubeconfig: "{{ kubeconfig_path }}"
+      state: list
+      kind: node
+      name: "{{ node_name }}"
+    delegate_to: localhost
+    register: new_machine
+    until:
+    - new_machine.results is defined
+    - new_machine.results.returncode is defined
+    - new_machine.results.results is defined
+    - new_machine.results.returncode == 0
+    - new_machine.results.results[0].status is defined
+    - new_machine.results.results[0].status.conditions is defined
+    - new_machine.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
+    # Give the node three minutes to come back online.
+    retries: 48
+    delay: 30
+    ignore_errors: true
+  - when: new_machine is failed
+    block:
+    - name: Collect a list of containers
+      command: crictl ps -a -q
+      register: crictl_ps_output
+    - name: Collect container logs
+      command: "crictl logs {{ item }}"
+      register: crictl_logs_output
+      with_items: "{{ crictl_ps_output.stdout_lines }}"
+      ignore_errors: true
+    - debug:
+        var: crictl_logs_output
+    - debug:
+        msg: "{{ bootkube_logs.stdout_lines }}"
+    - fail:
+        msg: Node failed to become Ready
+
+- name: Remove CoreOS nodes
+  hosts: localhost
+  connection: local
+  tasks:
+  - name: Mark CoreOS nodes as unschedulable
+    oc_adm_manage_node:
+      kubeconfig: "{{ kubeconfig_path }}"
+      node: "{{ item }}"
+      schedulable: False
+    with_items: "{{ pre_scaleup_workers_name }}"
+
+  - name: Drain CoreOS nodes
+    command: >
+      oc adm drain {{ item | lower }}
+      --config={{ kubeconfig_path }}
+      --force --delete-local-data --ignore-daemonsets
+      --timeout=0s
+    with_items: "{{ pre_scaleup_workers_name }}"
+
+  - name: remove existing machinesets
+    oc_obj:
+      state: absent
+      kind: machinesets
+      namespace: openshift-cluster-api
+      name: "{{ item }}"
+      kubeconfig: "{{ kubeconfig_path }}"
+    with_items: "{{ pre_scaleup_machineset_names }}"
+
+  - name: Delete CoreOS nodes
+    oc_obj:
+      kubeconfig: "{{ kubeconfig_path }}"
+      state: absent
+      kind: node
+      name: "{{ item }}"
+    with_items: "{{ pre_scaleup_workers_name }}"

+ 0 - 26
test/aws/template-inventory.j2

@@ -1,26 +0,0 @@
-[OSEv3:vars]
-ansible_python_interpreter="{{ python }}"
-ansible_user="{{ aws_user }}"
-aws_region="{{ aws_region }}"
-openshift_master_default_subdomain="{{ hostvars[groups[('lb' in groups) | ternary('lb', 'masters')][0]]["aws_ip"] }}.xip.io"
-
-[OSEv3:children]
-{% for group in groups %}
-{% if group not in ["all", "ungrouped", "OSEv3"] %}
-{{group}}
-{% endif %}
-{% endfor %}
-
-{% for group in groups %}
-{% if group not in ["all", "ungrouped", "OSEv3"] %}
-[{{group}}]
-{% for entry in groups[group] %}
-{% set addon_opts = "" %}
-{% if group == "nodes" %}
-{% set addon_opts = addon_opts + " openshift_node_group_name='" + hostvars[entry]['openshift_node_group_name'] + "'" %}
-{% endif %}
-{{ entry }} ansible_host='{{ hostvars[entry]['ansible_host'] }}' aws_id='{{ hostvars[entry]['aws_id'] }}' {{ addon_opts }}
-{% endfor %}
-{% endif %}
-
-{% endfor %}

+ 0 - 46
test/aws/vars.yml.sample

@@ -1,46 +0,0 @@
----
-vm_prefix: "ci_test"
-#aws_use_auto_terminator is set to True by default, as rh-dev account doesn't have permission
-# to terminate instances. These should be stopped and renamed to include 'terminate' instead
-#aws_use_auto_terminator: false
-
-type: aws
-aws_user: "ec2-user"
-python: "/usr/bin/python"
-
-aws_key: "libra"
-aws_region: "us-east-1"
-aws_cluster_id: "ci"
-# us-east-1d
-aws_subnet: "subnet-cf57c596"
-
-aws_expiration_date: "{{ lookup('pipe','date -d \"4 hours\" --iso=minutes --utc') }}"
-
-aws_ami_tags:
-  "tag:operating_system": "rhel"
-  "tag:image_stage": "base"
-  "tag:ready": "yes"
-
-aws_instances:
-- name: "{{ vm_prefix }}-master"
-  ansible_groups:
-    - masters
-    - etcd
-    - nodes
-  aws_flavor: t2.large
-  aws_security_group: public
-  node_group: "node-config-all-in-one"
-  # Use custom AMI tags
-  # aws_ami_tags:
-  #   operating_system: "rhel"
-  #   image_stage: "base"
-  #   ready: "yes"
-  # Use custom AMI
-  #aws_image: "ami-70e8fd66"
-  # Attach custom volumes
-  #aws_volumes:
-  # - device_name: /dev/sdb
-  #   volume_size: 50
-  #   delete_on_termination: yes
-  #Set expiration date for instances on CI namespace
-  #aws_expiration_date: "{{ lookup('pipe','date -d \"4 hours\" --iso=minutes --utc') }}"

+ 19 - 0
test/gcp/launch.yml

@@ -10,3 +10,22 @@
   - meta: refresh_inventory
 
 - import_playbook: install.yml
+
+# Inventory refresh required to remove deleted bootstrap node
+- hosts: localhost
+  tasks:
+  - meta: refresh_inventory
+
+# Re-scan GCP machines
+- hosts: localhost
+  connection: local
+  tasks:
+  - name: place all scale groups into Ansible groups
+    include_role:
+      name: openshift_gcp
+      tasks_from: setup_scale_group_facts.yml
+
+- name: run worker scaleup
+  import_playbook: ../../playbooks/openshift-node/scaleup.yml
+  vars:
+    openshift_api_host: "{{ openshift_install_config['metadata']['name'] }}-api.{{ openshift_install_config['baseDomain'] }}"