provision.j2.sh 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. #!/bin/bash
  2. set -euo pipefail
  3. if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then
  4. # Create SSH key for GCE
  5. if [ ! -f "{{ openshift_gcp_ssh_private_key }}" ]; then
  6. ssh-keygen -t rsa -f "{{ openshift_gcp_ssh_private_key }}" -C gce-provision-cloud-user -N ''
  7. ssh-add "{{ openshift_gcp_ssh_private_key }}" || true
  8. fi
  9. # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
  10. pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub")
  11. key_tmp_file='/tmp/ocp-gce-keys'
  12. if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then
  13. if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then
  14. gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
  15. fi
  16. echo -n 'cloud-user:' >> "$key_tmp_file"
  17. cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file"
  18. gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
  19. rm -f "$key_tmp_file"
  20. fi
  21. fi
  22. metadata=""
  23. if [[ -n "{{ openshift_gcp_startup_script_file }}" ]]; then
  24. if [[ ! -f "{{ openshift_gcp_startup_script_file }}" ]]; then
  25. echo "Startup script file missing at {{ openshift_gcp_startup_script_file }} from=$(pwd)"
  26. exit 1
  27. fi
  28. metadata+="--metadata-from-file=startup-script={{ openshift_gcp_startup_script_file }}"
  29. fi
  30. if [[ -n "{{ openshift_gcp_user_data_file }}" ]]; then
  31. if [[ ! -f "{{ openshift_gcp_user_data_file }}" ]]; then
  32. echo "User data file missing at {{ openshift_gcp_user_data_file }}"
  33. exit 1
  34. fi
  35. if [[ -n "${metadata}" ]]; then
  36. metadata+=","
  37. else
  38. metadata="--metadata-from-file="
  39. fi
  40. metadata+="user-data={{ openshift_gcp_user_data_file }}"
  41. fi
  42. # Select image or image family
  43. image="{{ openshift_gcp_image }}"
  44. if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe "${image}" &>/dev/null; then
  45. if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe-from-family "${image}" &>/dev/null; then
  46. echo "No compute image or image-family found, create an image named '{{ openshift_gcp_image }}' to continue'"
  47. exit 1
  48. fi
  49. image="family/${image}"
  50. fi
  51. ### PROVISION THE INFRASTRUCTURE ###
  52. dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
  53. # Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers
  54. if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
  55. echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script"
  56. exit 1
  57. fi
  58. # Create network
  59. if ! gcloud --project "{{ openshift_gcp_project }}" compute networks describe "{{ openshift_gcp_network_name }}" &>/dev/null; then
  60. gcloud --project "{{ openshift_gcp_project }}" compute networks create "{{ openshift_gcp_network_name }}" --mode "auto"
  61. else
  62. echo "Network '{{ openshift_gcp_network_name }}' already exists"
  63. fi
  64. # Firewall rules in a form:
  65. # ['name']='parameters for "gcloud compute firewall-rules create"'
  66. # For all possible parameters see: gcloud compute firewall-rules create --help
  67. range=""
  68. if [[ -n "{{ openshift_node_port_range }}" ]]; then
  69. range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}"
  70. fi
  71. declare -A FW_RULES=(
  72. ['icmp']='--allow icmp'
  73. ['ssh-external']='--allow tcp:22'
  74. ['ssh-internal']='--allow tcp:22 --source-tags bastion'
  75. ['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master"
  76. ['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master"
  77. ['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255 --source-tags ocp --target-tags ocp-node,ocp-infra-node"
  78. ['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node"
  79. ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node"
  80. )
  81. for rule in "${!FW_RULES[@]}"; do
  82. ( if ! gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then
  83. gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules create "{{ openshift_gcp_prefix }}$rule" --network "{{ openshift_gcp_network_name }}" ${FW_RULES[$rule]}
  84. else
  85. echo "Firewall rule '{{ openshift_gcp_prefix }}${rule}' already exists"
  86. fi ) &
  87. done
  88. # Master IP
  89. ( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
  90. gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global
  91. else
  92. echo "IP '{{ openshift_gcp_prefix }}master-ssl-lb-ip' already exists"
  93. fi ) &
  94. # Internal master IP
  95. ( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
  96. gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}"
  97. else
  98. echo "IP '{{ openshift_gcp_prefix }}master-network-lb-ip' already exists"
  99. fi ) &
  100. # Router IP
  101. ( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
  102. gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}"
  103. else
  104. echo "IP '{{ openshift_gcp_prefix }}router-network-lb-ip' already exists"
  105. fi ) &
  106. {% for node_group in openshift_gcp_node_group_config %}
  107. # configure {{ node_group.name }}
  108. (
  109. if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-templates describe "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
  110. gcloud --project "{{ openshift_gcp_project }}" compute instance-templates create "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" \
  111. --machine-type "{{ node_group.machine_type }}" --network "{{ openshift_gcp_network_name }}" \
  112. --tags "{{ openshift_gcp_prefix }}ocp,ocp,{{ 'ocp-bootstrap,' if (node_group.bootstrap | default(False)) else '' }}{{ node_group.tags }}" \
  113. --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
  114. --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
  115. --image "{{ node_group.image | default('${image}') }}" ${metadata} \
  116. --metadata "bootstrap={{ node_group.bootstrap | default(False) | bool | to_json }},cluster-id={{ openshift_gcp_prefix + openshift_gcp_clusterid }},node-group={{ node_group.name }}"
  117. else
  118. echo "Instance template '{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}' already exists"
  119. fi
  120. # Create instance group
  121. if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed describe "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" &>/dev/null; then
  122. gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed create "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" \
  123. --zone "{{ openshift_gcp_zone }}" --template "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
  124. else
  125. echo "Instance group '{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}' already exists"
  126. fi
  127. ) &
  128. {% endfor %}
  129. for i in `jobs -p`; do wait $i; done
  130. # Configure the master external LB rules
  131. (
  132. # Master health check
  133. if ! gcloud --project "{{ openshift_gcp_project }}" compute health-checks describe "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" &>/dev/null; then
  134. gcloud --project "{{ openshift_gcp_project }}" compute health-checks create https "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
  135. else
  136. echo "Health check '{{ openshift_gcp_prefix }}master-ssl-lb-health-check' already exists"
  137. fi
  138. gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-named-ports "{{ openshift_gcp_prefix }}ig-m" \
  139. --zone "{{ openshift_gcp_zone }}" --named-ports "{{ openshift_gcp_prefix }}port-name-master:{{ internal_console_port }}"
  140. # Master backend service
  141. if ! gcloud --project "{{ openshift_gcp_project }}" compute backend-services describe "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
  142. gcloud --project "{{ openshift_gcp_project }}" compute backend-services create "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --health-checks "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port-name "{{ openshift_gcp_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ openshift_gcp_master_lb_timeout }}"
  143. gcloud --project "{{ openshift_gcp_project }}" compute backend-services add-backend "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --instance-group "{{ openshift_gcp_prefix }}ig-m" --global --instance-group-zone "{{ openshift_gcp_zone }}"
  144. else
  145. echo "Backend service '{{ openshift_gcp_prefix }}master-ssl-lb-backend' already exists"
  146. fi
  147. # Master tcp proxy target
  148. if ! gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies describe "{{ openshift_gcp_prefix }}master-ssl-lb-target" &>/dev/null; then
  149. gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies create "{{ openshift_gcp_prefix }}master-ssl-lb-target" --backend-service "{{ openshift_gcp_prefix }}master-ssl-lb-backend"
  150. else
  151. echo "Proxy target '{{ openshift_gcp_prefix }}master-ssl-lb-target' already exists"
  152. fi
  153. # Master forwarding rule
  154. if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
  155. IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
  156. gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ openshift_gcp_prefix }}master-ssl-lb-target"
  157. else
  158. echo "Forwarding rule '{{ openshift_gcp_prefix }}master-ssl-lb-rule' already exists"
  159. fi
  160. ) &
  161. # Configure the master internal LB rules
  162. (
  163. # Internal master health check
  164. if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}master-network-lb-health-check" &>/dev/null; then
  165. gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
  166. else
  167. echo "Health check '{{ openshift_gcp_prefix }}master-network-lb-health-check' already exists"
  168. fi
  169. # Internal master target pool
  170. if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}master-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
  171. gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}master-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}master-network-lb-health-check" --region "{{ openshift_gcp_region }}"
  172. else
  173. echo "Target pool '{{ openshift_gcp_prefix }}master-network-lb-pool' already exists"
  174. fi
  175. # Internal master forwarding rule
  176. if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
  177. IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
  178. gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}master-network-lb-pool"
  179. else
  180. echo "Forwarding rule '{{ openshift_gcp_prefix }}master-network-lb-rule' already exists"
  181. fi
  182. ) &
  183. # Configure the infra node rules
  184. (
  185. # Router health check
  186. if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}router-network-lb-health-check" &>/dev/null; then
  187. gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
  188. else
  189. echo "Health check '{{ openshift_gcp_prefix }}router-network-lb-health-check' already exists"
  190. fi
  191. # Router target pool
  192. if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}router-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
  193. gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}router-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}router-network-lb-health-check" --region "{{ openshift_gcp_region }}"
  194. else
  195. echo "Target pool '{{ openshift_gcp_prefix }}router-network-lb-pool' already exists"
  196. fi
  197. # Router forwarding rule
  198. if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}router-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
  199. IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
  200. gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}router-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}router-network-lb-pool"
  201. else
  202. echo "Forwarding rule '{{ openshift_gcp_prefix }}router-network-lb-rule' already exists"
  203. fi
  204. ) &
  205. for i in `jobs -p`; do wait $i; done
  206. # set the target pools
  207. (
  208. if [[ "ig-m" == "{{ openshift_gcp_infra_network_instance_group }}" ]]; then
  209. gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool,{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
  210. else
  211. gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
  212. gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}{{ openshift_gcp_infra_network_instance_group }}" --target-pools "{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
  213. fi
  214. ) &
  215. # configure DNS
  216. (
  217. # Retry DNS changes until they succeed since this may be a shared resource
  218. while true; do
  219. dns="${TMPDIR:-/tmp}/dns.yaml"
  220. rm -f $dns
  221. # DNS record for master lb
  222. if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
  223. IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
  224. if [[ ! -f $dns ]]; then
  225. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
  226. fi
  227. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
  228. else
  229. echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists"
  230. fi
  231. # DNS record for internal master lb
  232. if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
  233. IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
  234. if [[ ! -f $dns ]]; then
  235. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
  236. fi
  237. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
  238. else
  239. echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists"
  240. fi
  241. # DNS record for router lb
  242. if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
  243. IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
  244. if [[ ! -f $dns ]]; then
  245. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
  246. fi
  247. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP"
  248. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
  249. else
  250. echo "DNS record for '{{ wildcard_zone }}' already exists"
  251. fi
  252. # Commit all DNS changes, retrying if preconditions are not met
  253. if [[ -f $dns ]]; then
  254. if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
  255. rc=$?
  256. if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
  257. continue
  258. fi
  259. exit $rc
  260. fi
  261. fi
  262. break
  263. done
  264. ) &
  265. # Create bucket for registry
  266. (
  267. if ! gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then
  268. gsutil mb -p "{{ openshift_gcp_project }}" -l "{{ openshift_gcp_region }}" "gs://{{ openshift_gcp_registry_bucket_name }}"
  269. else
  270. echo "Bucket '{{ openshift_gcp_registry_bucket_name }}' already exists"
  271. fi
  272. ) &
  273. # wait until all node groups are stable
  274. {% for node_group in openshift_gcp_node_group_config %}
  275. {% if node_group.wait_for_stable | default(False) or not (node_group.bootstrap | default(False)) %}
  276. # wait for stable {{ node_group.name }}
  277. ( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=600 ) &
  278. {% else %}
  279. # not waiting for {{ node_group.name }} due to bootstrapping
  280. {% endif %}
  281. {% endfor %}
  282. for i in `jobs -p`; do wait $i; done