remove.j2.sh 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. #!/bin/bash
  2. set -euo pipefail
  3. function teardown_cmd() {
  4. a=( $@ )
  5. local name=$1
  6. a=( "${a[@]:1}" )
  7. local flag=0
  8. local found=
  9. for i in ${a[@]}; do
  10. if [[ "$i" == "--"* ]]; then
  11. found=true
  12. break
  13. fi
  14. flag=$((flag+1))
  15. done
  16. if [[ -z "${found}" ]]; then
  17. flag=$((flag+1))
  18. fi
  19. if gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} describe "${name}" ${a[@]:$flag} &>/dev/null; then
  20. gcloud --project "{{ gce_project_id }}" ${a[@]::$flag} delete -q "${name}" ${a[@]:$flag}
  21. fi
  22. }
  23. function teardown() {
  24. for i in `seq 1 20`; do
  25. if teardown_cmd $@; then
  26. break
  27. fi
  28. sleep 0.5
  29. done
  30. }
  31. # Preemptively spin down the instances
  32. {% for node_group in provision_gce_node_groups %}
  33. # scale down {{ node_group.name }}
  34. (
  35. # performs a delete and scale down as one operation to ensure maximum parallelism
  36. if ! instances=$( gcloud --project "{{ gce_project_id }}" compute instance-groups managed list-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --format='value[terminator=","](instance)' ); then
  37. exit 0
  38. fi
  39. instances="${instances%?}"
  40. if [[ -z "${instances}" ]]; then
  41. echo "warning: No instances in {{ node_group.name }}" 1>&2
  42. exit 0
  43. fi
  44. if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed delete-instances "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --instances "${instances}"; then
  45. echo "warning: Unable to scale down the node group {{ node_group.name }}" 1>&2
  46. exit 0
  47. fi
  48. ) &
  49. {% endfor %}
  50. # Bucket for registry
  51. (
  52. if gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
  53. gsutil -m rm -r "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
  54. fi
  55. ) &
  56. # DNS
  57. (
  58. dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
  59. if gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
  60. # Retry DNS changes until they succeed since this may be a shared resource
  61. while true; do
  62. dns="${TMPDIR:-/tmp}/dns.yaml"
  63. rm -f "${dns}"
  64. # export all dns records that match into a zone format, and turn each line into a set of args for
  65. # record-sets transaction.
  66. gcloud dns record-sets export --project "{{ gce_project_id }}" -z "${dns_zone}" --zone-file-format "${dns}"
  67. if grep -F -e '{{ openshift_master_cluster_hostname }}' -e '{{ openshift_master_cluster_public_hostname }}' -e '{{ wildcard_zone }}' "${dns}" | \
  68. awk '{ print "--name", $1, "--ttl", $2, "--type", $4, $5; }' > "${dns}.input"
  69. then
  70. rm -f "${dns}"
  71. gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
  72. cat "${dns}.input" | xargs -L1 gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file="${dns}" remove -z "${dns_zone}"
  73. # Commit all DNS changes, retrying if preconditions are not met
  74. if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
  75. rc=$?
  76. if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
  77. continue
  78. fi
  79. exit $rc
  80. fi
  81. fi
  82. rm "${dns}.input"
  83. break
  84. done
  85. fi
  86. ) &
  87. (
  88. # Router network rules
  89. teardown "{{ provision_prefix }}router-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
  90. teardown "{{ provision_prefix }}router-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
  91. teardown "{{ provision_prefix }}router-network-lb-health-check" compute http-health-checks
  92. teardown "{{ provision_prefix }}router-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
  93. # Internal master network rules
  94. teardown "{{ provision_prefix }}master-network-lb-rule" compute forwarding-rules --region "{{ gce_region_name }}"
  95. teardown "{{ provision_prefix }}master-network-lb-pool" compute target-pools --region "{{ gce_region_name }}"
  96. teardown "{{ provision_prefix }}master-network-lb-health-check" compute http-health-checks
  97. teardown "{{ provision_prefix }}master-network-lb-ip" compute addresses --region "{{ gce_region_name }}"
  98. ) &
  99. (
  100. # Master SSL network rules
  101. teardown "{{ provision_prefix }}master-ssl-lb-rule" compute forwarding-rules --global
  102. teardown "{{ provision_prefix }}master-ssl-lb-target" compute target-tcp-proxies
  103. teardown "{{ provision_prefix }}master-ssl-lb-ip" compute addresses --global
  104. teardown "{{ provision_prefix }}master-ssl-lb-backend" compute backend-services --global
  105. teardown "{{ provision_prefix }}master-ssl-lb-health-check" compute health-checks
  106. ) &
  107. #Firewall rules
  108. #['name']='parameters for "gcloud compute firewall-rules create"'
  109. #For all possible parameters see: gcloud compute firewall-rules create --help
  110. declare -A FW_RULES=(
  111. ['icmp']=""
  112. ['ssh-external']=""
  113. ['ssh-internal']=""
  114. ['master-internal']=""
  115. ['master-external']=""
  116. ['node-internal']=""
  117. ['infra-node-internal']=""
  118. ['infra-node-external']=""
  119. )
  120. for rule in "${!FW_RULES[@]}"; do
  121. ( if gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
  122. # retry a few times because this call can be flaky
  123. for i in `seq 1 3`; do
  124. if gcloud -q --project "{{ gce_project_id }}" compute firewall-rules delete "{{ provision_prefix }}$rule"; then
  125. break
  126. fi
  127. done
  128. fi ) &
  129. done
  130. for i in `jobs -p`; do wait $i; done
  131. {% for node_group in provision_gce_node_groups %}
  132. # teardown {{ node_group.name }} - any load balancers referencing these groups must be removed
  133. (
  134. teardown "{{ provision_prefix }}ig-{{ node_group.suffix }}" compute instance-groups managed --zone "{{ gce_zone_name }}"
  135. teardown "{{ provision_prefix }}instance-template-{{ node_group.name }}" compute instance-templates
  136. ) &
  137. {% endfor %}
  138. for i in `jobs -p`; do wait $i; done
  139. # Network
  140. teardown "{{ gce_network_name }}" compute networks