--- - import_playbook: create_machines.yml - import_playbook: ../../playbooks/scaleup.yml vars: openshift_kubeconfig_path: "{{ kubeconfig_path }}" - name: Wait for nodes to join hosts: new_workers tasks: - block: - name: Wait for nodes to report ready command: > oc get node {{ hostvars[item].ansible_nodename | lower }} --kubeconfig={{ kubeconfig_path }} --output=jsonpath='{.status.conditions[?(@.type=="Ready")].status}' loop: "{{ ansible_play_batch }}" delegate_to: localhost run_once: true register: oc_get until: - oc_get.stdout == "True" retries: 36 delay: 5 changed_when: false rescue: - include_tasks: tasks/gather_logs.yml - name: DEBUG - Node failed to report ready fail: msg: "Node failed to report Ready" - name: Remove CoreOS nodes hosts: localhost connection: local tasks: - block: - name: Wait for worker configs to roll out command: > oc wait machineconfigpool/worker --kubeconfig={{ kubeconfig_path }} --for=condition=Updated --timeout=10m rescue: - name: DEBUG - Get worker machine config pool command: > oc get machineconfigpool/worker --kubeconfig={{ kubeconfig_path }} --output=json - name: DEBUG - Worker config rollout failed fail: msg: "Worker config rollout failed" - name: Mark pre-scaleup worker nodes as unschedulable command: > oc adm cordon {{ item | lower }} --kubeconfig={{ kubeconfig_path }} loop: "{{ pre_scaleup_workers_name }}" - name: Drain pre-scaleup worker nodes command: > oc adm drain {{ item | lower }} --kubeconfig={{ kubeconfig_path }} --force --delete-local-data --ignore-daemonsets --timeout=0s loop: "{{ pre_scaleup_workers_name }}" - name: Remove pre-scaleup machinesets command: > oc delete machinesets {{ item }} --kubeconfig={{ kubeconfig_path }} --namespace=openshift-machine-api loop: "{{ pre_scaleup_machineset_names }}"