provision.j2.sh 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. #!/bin/bash
  2. set -euo pipefail
  3. # Create SSH key for GCE
  4. if [ ! -f "{{ gce_ssh_private_key }}" ]; then
  5. ssh-keygen -t rsa -f "{{ gce_ssh_private_key }}" -C gce-provision-cloud-user -N ''
  6. ssh-add "{{ gce_ssh_private_key }}" || true
  7. fi
  8. # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
  9. pub_key=$(cut -d ' ' -f 2 < "{{ gce_ssh_private_key }}.pub")
  10. key_tmp_file='/tmp/ocp-gce-keys'
  11. if ! gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q "$pub_key"; then
  12. if gcloud --project "{{ gce_project_id }}" compute project-info describe | grep -q ssh-rsa; then
  13. gcloud --project "{{ gce_project_id }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
  14. fi
  15. echo -n 'cloud-user:' >> "$key_tmp_file"
  16. cat "{{ gce_ssh_private_key }}.pub" >> "$key_tmp_file"
  17. gcloud --project "{{ gce_project_id }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
  18. rm -f "$key_tmp_file"
  19. fi
  20. metadata=""
  21. if [[ -n "{{ provision_gce_startup_script_file }}" ]]; then
  22. if [[ ! -f "{{ provision_gce_startup_script_file }}" ]]; then
  23. echo "Startup script file missing at {{ provision_gce_startup_script_file }} from=$(pwd)"
  24. exit 1
  25. fi
  26. metadata+="--metadata-from-file=startup-script={{ provision_gce_startup_script_file }}"
  27. fi
  28. if [[ -n "{{ provision_gce_user_data_file }}" ]]; then
  29. if [[ ! -f "{{ provision_gce_user_data_file }}" ]]; then
  30. echo "User data file missing at {{ provision_gce_user_data_file }}"
  31. exit 1
  32. fi
  33. if [[ -n "${metadata}" ]]; then
  34. metadata+=","
  35. else
  36. metadata="--metadata-from-file="
  37. fi
  38. metadata+="user-data={{ provision_gce_user_data_file }}"
  39. fi
  40. # Select image or image family
  41. image="{{ provision_gce_registered_image }}"
  42. if ! gcloud --project "{{ gce_project_id }}" compute images describe "${image}" &>/dev/null; then
  43. if ! gcloud --project "{{ gce_project_id }}" compute images describe-from-family "${image}" &>/dev/null; then
  44. echo "No compute image or image-family found, create an image named '{{ provision_gce_registered_image }}' to continue'"
  45. exit 1
  46. fi
  47. image="family/${image}"
  48. fi
  49. ### PROVISION THE INFRASTRUCTURE ###
  50. dns_zone="{{ dns_managed_zone | default(provision_prefix + 'managed-zone') }}"
  51. # Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers
  52. if ! gcloud --project "{{ gce_project_id }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
  53. echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script"
  54. exit 1
  55. fi
  56. # Create network
  57. if ! gcloud --project "{{ gce_project_id }}" compute networks describe "{{ gce_network_name }}" &>/dev/null; then
  58. gcloud --project "{{ gce_project_id }}" compute networks create "{{ gce_network_name }}" --mode "auto"
  59. else
  60. echo "Network '{{ gce_network_name }}' already exists"
  61. fi
  62. # Firewall rules in a form:
  63. # ['name']='parameters for "gcloud compute firewall-rules create"'
  64. # For all possible parameters see: gcloud compute firewall-rules create --help
  65. range=""
  66. if [[ -n "{{ openshift_node_port_range }}" ]]; then
  67. range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}"
  68. fi
  69. declare -A FW_RULES=(
  70. ['icmp']='--allow icmp'
  71. ['ssh-external']='--allow tcp:22'
  72. ['ssh-internal']='--allow tcp:22 --source-tags bastion'
  73. ['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master"
  74. ['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master"
  75. ['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255 --source-tags ocp --target-tags ocp-node,ocp-infra-node"
  76. ['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node"
  77. ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node"
  78. )
  79. for rule in "${!FW_RULES[@]}"; do
  80. ( if ! gcloud --project "{{ gce_project_id }}" compute firewall-rules describe "{{ provision_prefix }}$rule" &>/dev/null; then
  81. gcloud --project "{{ gce_project_id }}" compute firewall-rules create "{{ provision_prefix }}$rule" --network "{{ gce_network_name }}" ${FW_RULES[$rule]}
  82. else
  83. echo "Firewall rule '{{ provision_prefix }}${rule}' already exists"
  84. fi ) &
  85. done
  86. # Master IP
  87. ( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
  88. gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-ssl-lb-ip" --global
  89. else
  90. echo "IP '{{ provision_prefix }}master-ssl-lb-ip' already exists"
  91. fi ) &
  92. # Internal master IP
  93. ( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
  94. gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}"
  95. else
  96. echo "IP '{{ provision_prefix }}master-network-lb-ip' already exists"
  97. fi ) &
  98. # Router IP
  99. ( if ! gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" &>/dev/null; then
  100. gcloud --project "{{ gce_project_id }}" compute addresses create "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}"
  101. else
  102. echo "IP '{{ provision_prefix }}router-network-lb-ip' already exists"
  103. fi ) &
  104. {% for node_group in provision_gce_node_groups %}
  105. # configure {{ node_group.name }}
  106. (
  107. if ! gcloud --project "{{ gce_project_id }}" compute instance-templates describe "{{ provision_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
  108. gcloud --project "{{ gce_project_id }}" compute instance-templates create "{{ provision_prefix }}instance-template-{{ node_group.name }}" \
  109. --machine-type "{{ node_group.machine_type }}" --network "{{ gce_network_name }}" \
  110. --tags "{{ provision_prefix }}ocp,ocp,{{ node_group.tags }}" \
  111. --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
  112. --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
  113. --image "${image}" ${metadata}
  114. else
  115. echo "Instance template '{{ provision_prefix }}instance-template-{{ node_group.name }}' already exists"
  116. fi
  117. # Create instance group
  118. if ! gcloud --project "{{ gce_project_id }}" compute instance-groups managed describe "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" &>/dev/null; then
  119. gcloud --project "{{ gce_project_id }}" compute instance-groups managed create "{{ provision_prefix }}ig-{{ node_group.suffix }}" \
  120. --zone "{{ gce_zone_name }}" --template "{{ provision_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
  121. else
  122. echo "Instance group '{{ provision_prefix }}ig-{{ node_group.suffix }}' already exists"
  123. fi
  124. ) &
  125. {% endfor %}
  126. for i in `jobs -p`; do wait $i; done
  127. # Configure the master external LB rules
  128. (
  129. # Master health check
  130. if ! gcloud --project "{{ gce_project_id }}" compute health-checks describe "{{ provision_prefix }}master-ssl-lb-health-check" &>/dev/null; then
  131. gcloud --project "{{ gce_project_id }}" compute health-checks create https "{{ provision_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
  132. else
  133. echo "Health check '{{ provision_prefix }}master-ssl-lb-health-check' already exists"
  134. fi
  135. gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-named-ports "{{ provision_prefix }}ig-m" \
  136. --zone "{{ gce_zone_name }}" --named-ports "{{ provision_prefix }}port-name-master:{{ internal_console_port }}"
  137. # Master backend service
  138. if ! gcloud --project "{{ gce_project_id }}" compute backend-services describe "{{ provision_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
  139. gcloud --project "{{ gce_project_id }}" compute backend-services create "{{ provision_prefix }}master-ssl-lb-backend" --health-checks "{{ provision_prefix }}master-ssl-lb-health-check" --port-name "{{ provision_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ provision_gce_master_https_timeout | default('2m') }}"
  140. gcloud --project "{{ gce_project_id }}" compute backend-services add-backend "{{ provision_prefix }}master-ssl-lb-backend" --instance-group "{{ provision_prefix }}ig-m" --global --instance-group-zone "{{ gce_zone_name }}"
  141. else
  142. echo "Backend service '{{ provision_prefix }}master-ssl-lb-backend' already exists"
  143. fi
  144. # Master tcp proxy target
  145. if ! gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies describe "{{ provision_prefix }}master-ssl-lb-target" &>/dev/null; then
  146. gcloud --project "{{ gce_project_id }}" compute target-tcp-proxies create "{{ provision_prefix }}master-ssl-lb-target" --backend-service "{{ provision_prefix }}master-ssl-lb-backend"
  147. else
  148. echo "Proxy target '{{ provision_prefix }}master-ssl-lb-target' already exists"
  149. fi
  150. # Master forwarding rule
  151. if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
  152. IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
  153. gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ provision_prefix }}master-ssl-lb-target"
  154. else
  155. echo "Forwarding rule '{{ provision_prefix }}master-ssl-lb-rule' already exists"
  156. fi
  157. ) &
  158. # Configure the master internal LB rules
  159. (
  160. # Internal master health check
  161. if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}master-network-lb-health-check" &>/dev/null; then
  162. gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
  163. else
  164. echo "Health check '{{ provision_prefix }}master-network-lb-health-check' already exists"
  165. fi
  166. # Internal master target pool
  167. if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}master-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
  168. gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}master-network-lb-pool" --http-health-check "{{ provision_prefix }}master-network-lb-health-check" --region "{{ gce_region_name }}"
  169. else
  170. echo "Target pool '{{ provision_prefix }}master-network-lb-pool' already exists"
  171. fi
  172. # Internal master forwarding rule
  173. if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}master-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
  174. IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
  175. gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}master-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}master-network-lb-pool"
  176. else
  177. echo "Forwarding rule '{{ provision_prefix }}master-network-lb-rule' already exists"
  178. fi
  179. ) &
  180. # Configure the infra node rules
  181. (
  182. # Router health check
  183. if ! gcloud --project "{{ gce_project_id }}" compute http-health-checks describe "{{ provision_prefix }}router-network-lb-health-check" &>/dev/null; then
  184. gcloud --project "{{ gce_project_id }}" compute http-health-checks create "{{ provision_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
  185. else
  186. echo "Health check '{{ provision_prefix }}router-network-lb-health-check' already exists"
  187. fi
  188. # Router target pool
  189. if ! gcloud --project "{{ gce_project_id }}" compute target-pools describe "{{ provision_prefix }}router-network-lb-pool" --region "{{ gce_region_name }}" &>/dev/null; then
  190. gcloud --project "{{ gce_project_id }}" compute target-pools create "{{ provision_prefix }}router-network-lb-pool" --http-health-check "{{ provision_prefix }}router-network-lb-health-check" --region "{{ gce_region_name }}"
  191. else
  192. echo "Target pool '{{ provision_prefix }}router-network-lb-pool' already exists"
  193. fi
  194. # Router forwarding rule
  195. if ! gcloud --project "{{ gce_project_id }}" compute forwarding-rules describe "{{ provision_prefix }}router-network-lb-rule" --region "{{ gce_region_name }}" &>/dev/null; then
  196. IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
  197. gcloud --project "{{ gce_project_id }}" compute forwarding-rules create "{{ provision_prefix }}router-network-lb-rule" --address "$IP" --region "{{ gce_region_name }}" --target-pool "{{ provision_prefix }}router-network-lb-pool"
  198. else
  199. echo "Forwarding rule '{{ provision_prefix }}router-network-lb-rule' already exists"
  200. fi
  201. ) &
  202. for i in `jobs -p`; do wait $i; done
  203. # set the target pools
  204. (
  205. if [[ "ig-m" == "{{ provision_gce_router_network_instance_group }}" ]]; then
  206. gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool,{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
  207. else
  208. gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}ig-m" --target-pools "{{ provision_prefix }}master-network-lb-pool" --zone "{{ gce_zone_name }}"
  209. gcloud --project "{{ gce_project_id }}" compute instance-groups managed set-target-pools "{{ provision_prefix }}{{ provision_gce_router_network_instance_group }}" --target-pools "{{ provision_prefix }}router-network-lb-pool" --zone "{{ gce_zone_name }}"
  210. fi
  211. ) &
  212. # configure DNS
  213. (
  214. # Retry DNS changes until they succeed since this may be a shared resource
  215. while true; do
  216. dns="${TMPDIR:-/tmp}/dns.yaml"
  217. rm -f $dns
  218. # DNS record for master lb
  219. if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
  220. IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-ssl-lb-ip" --global --format='value(address)')
  221. if [[ ! -f $dns ]]; then
  222. gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
  223. fi
  224. gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
  225. else
  226. echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists"
  227. fi
  228. # DNS record for internal master lb
  229. if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
  230. IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}master-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
  231. if [[ ! -f $dns ]]; then
  232. gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
  233. fi
  234. gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
  235. else
  236. echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists"
  237. fi
  238. # DNS record for router lb
  239. if ! gcloud --project "{{ gce_project_id }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
  240. IP=$(gcloud --project "{{ gce_project_id }}" compute addresses describe "{{ provision_prefix }}router-network-lb-ip" --region "{{ gce_region_name }}" --format='value(address)')
  241. if [[ ! -f $dns ]]; then
  242. gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
  243. fi
  244. gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP"
  245. gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
  246. else
  247. echo "DNS record for '{{ wildcard_zone }}' already exists"
  248. fi
  249. # Commit all DNS changes, retrying if preconditions are not met
  250. if [[ -f $dns ]]; then
  251. if ! out="$( gcloud --project "{{ gce_project_id }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
  252. rc=$?
  253. if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
  254. continue
  255. fi
  256. exit $rc
  257. fi
  258. fi
  259. break
  260. done
  261. ) &
  262. # Create bucket for registry
  263. (
  264. if ! gsutil ls -p "{{ gce_project_id }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}" &>/dev/null; then
  265. gsutil mb -p "{{ gce_project_id }}" -l "{{ gce_region_name }}" "gs://{{ openshift_hosted_registry_storage_gcs_bucket }}"
  266. else
  267. echo "Bucket '{{ openshift_hosted_registry_storage_gcs_bucket }}' already exists"
  268. fi
  269. ) &
  270. # wait until all node groups are stable
  271. {% for node_group in provision_gce_node_groups %}
  272. # wait for stable {{ node_group.name }}
  273. ( gcloud --project "{{ gce_project_id }}" compute instance-groups managed wait-until-stable "{{ provision_prefix }}ig-{{ node_group.suffix }}" --zone "{{ gce_zone_name }}" --timeout=300) &
  274. {% endfor %}
  275. for i in `jobs -p`; do wait $i; done