provision.j2.sh 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. #!/bin/bash
  2. set -euo pipefail
  3. if [[ -n "{{ openshift_gcp_ssh_private_key }}" ]]; then
  4. # Create SSH key for GCE
  5. if [ ! -f "{{ openshift_gcp_ssh_private_key }}" ]; then
  6. ssh-keygen -t rsa -f "{{ openshift_gcp_ssh_private_key }}" -C gce-provision-cloud-user -N ''
  7. ssh-add "{{ openshift_gcp_ssh_private_key }}" || true
  8. fi
  9. # Check if the ~/.ssh/google_compute_engine.pub key is in the project metadata, and if not, add it there
  10. pub_key=$(cut -d ' ' -f 2 < "{{ openshift_gcp_ssh_private_key }}.pub")
  11. key_tmp_file='/tmp/ocp-gce-keys'
  12. if ! gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q "$pub_key"; then
  13. if gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep -q ssh-rsa; then
  14. gcloud --project "{{ openshift_gcp_project }}" compute project-info describe | grep ssh-rsa | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/value: //' > "$key_tmp_file"
  15. fi
  16. echo -n 'cloud-user:' >> "$key_tmp_file"
  17. cat "{{ openshift_gcp_ssh_private_key }}.pub" >> "$key_tmp_file"
  18. gcloud --project "{{ openshift_gcp_project }}" compute project-info add-metadata --metadata-from-file "sshKeys=${key_tmp_file}"
  19. rm -f "$key_tmp_file"
  20. fi
  21. fi
  22. metadata=""
  23. if [[ -n "{{ openshift_gcp_startup_script_file }}" ]]; then
  24. if [[ ! -f "{{ openshift_gcp_startup_script_file }}" ]]; then
  25. echo "Startup script file missing at {{ openshift_gcp_startup_script_file }} from=$(pwd)"
  26. exit 1
  27. fi
  28. metadata+="--metadata-from-file=startup-script={{ openshift_gcp_startup_script_file }}"
  29. fi
  30. if [[ -n "{{ openshift_gcp_user_data_file }}" ]]; then
  31. if [[ ! -f "{{ openshift_gcp_user_data_file }}" ]]; then
  32. echo "User data file missing at {{ openshift_gcp_user_data_file }}"
  33. exit 1
  34. fi
  35. if [[ -n "${metadata}" ]]; then
  36. metadata+=","
  37. else
  38. metadata="--metadata-from-file="
  39. fi
  40. metadata+="user-data={{ openshift_gcp_user_data_file }}"
  41. fi
  42. # Select image or image family
  43. image="{{ openshift_gcp_image }}"
  44. if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe "${image}" &>/dev/null; then
  45. if ! gcloud --project "{{ openshift_gcp_project }}" compute images describe-from-family "${image}" &>/dev/null; then
  46. echo "No compute image or image-family found, create an image named '{{ openshift_gcp_image }}' to continue'"
  47. exit 1
  48. fi
  49. image="family/${image}"
  50. fi
  51. ### PROVISION THE INFRASTRUCTURE ###
  52. dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
  53. # Check the DNS managed zone in Google Cloud DNS, create it if it doesn't exist and exit after printing NS servers
  54. if ! gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${dns_zone}" &>/dev/null; then
  55. echo "DNS zone '${dns_zone}' doesn't exist. Must be configured prior to running this script"
  56. exit 1
  57. fi
  58. # Create network
  59. if ! gcloud --project "{{ openshift_gcp_project }}" compute networks describe "{{ openshift_gcp_network_name }}" &>/dev/null; then
  60. gcloud --project "{{ openshift_gcp_project }}" compute networks create "{{ openshift_gcp_network_name }}" --mode "auto"
  61. else
  62. echo "Network '{{ openshift_gcp_network_name }}' already exists"
  63. fi
  64. # Firewall rules in a form:
  65. # ['name']='parameters for "gcloud compute firewall-rules create"'
  66. # For all possible parameters see: gcloud compute firewall-rules create --help
  67. range=""
  68. if [[ -n "{{ openshift_node_port_range }}" ]]; then
  69. range=",tcp:{{ openshift_node_port_range }},udp:{{ openshift_node_port_range }}"
  70. fi
  71. declare -A FW_RULES=(
  72. ['icmp']='--allow icmp'
  73. ['ssh-external']='--allow tcp:22'
  74. ['ssh-internal']='--allow tcp:22 --source-tags bastion'
  75. ['master-internal']="--allow tcp:2224,tcp:2379,tcp:2380,tcp:4001,udp:4789,udp:5404,udp:5405,tcp:8053,udp:8053,tcp:8444,tcp:10250,tcp:10255,udp:10255,tcp:24224,udp:24224 --source-tags ocp --target-tags ocp-master"
  76. ['master-external']="--allow tcp:80,tcp:443,tcp:1936,tcp:8080,tcp:8443${range} --target-tags ocp-master"
  77. ['node-internal']="--allow udp:4789,tcp:10250,tcp:10255,udp:10255 --source-tags ocp --target-tags ocp-node,ocp-infra-node"
  78. ['infra-node-internal']="--allow tcp:5000 --source-tags ocp --target-tags ocp-infra-node"
  79. ['infra-node-external']="--allow tcp:80,tcp:443,tcp:1936${range} --target-tags ocp-infra-node"
  80. )
  81. for rule in "${!FW_RULES[@]}"; do
  82. ( if ! gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules describe "{{ openshift_gcp_prefix }}$rule" &>/dev/null; then
  83. gcloud --project "{{ openshift_gcp_project }}" compute firewall-rules create "{{ openshift_gcp_prefix }}$rule" --network "{{ openshift_gcp_network_name }}" ${FW_RULES[$rule]}
  84. else
  85. echo "Firewall rule '{{ openshift_gcp_prefix }}${rule}' already exists"
  86. fi ) &
  87. done
  88. # Master IP
  89. ( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global &>/dev/null; then
  90. gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global
  91. else
  92. echo "IP '{{ openshift_gcp_prefix }}master-ssl-lb-ip' already exists"
  93. fi ) &
  94. # Internal master IP
  95. ( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
  96. gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}"
  97. else
  98. echo "IP '{{ openshift_gcp_prefix }}master-network-lb-ip' already exists"
  99. fi ) &
  100. # Router IP
  101. ( if ! gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" &>/dev/null; then
  102. gcloud --project "{{ openshift_gcp_project }}" compute addresses create "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}"
  103. else
  104. echo "IP '{{ openshift_gcp_prefix }}router-network-lb-ip' already exists"
  105. fi ) &
  106. {% for node_group in openshift_gcp_node_group_config %}
  107. # configure {{ node_group.name }}
  108. (
  109. if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-templates describe "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
  110. gcloud --project "{{ openshift_gcp_project }}" compute instance-templates create "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" \
  111. --machine-type "{{ node_group.machine_type }}" --network "{{ openshift_gcp_network_name }}" \
  112. --tags "{{ openshift_gcp_prefix }}ocp,ocp,{{ node_group.tags }}" \
  113. --boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
  114. --scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
  115. --image "${image}" ${metadata}
  116. else
  117. echo "Instance template '{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}' already exists"
  118. fi
  119. # Create instance group
  120. if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed describe "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" &>/dev/null; then
  121. gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed create "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" \
  122. --zone "{{ openshift_gcp_zone }}" --template "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" --size "{{ node_group.scale }}"
  123. else
  124. echo "Instance group '{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}' already exists"
  125. fi
  126. ) &
  127. {% endfor %}
  128. for i in `jobs -p`; do wait $i; done
  129. # Configure the master external LB rules
  130. (
  131. # Master health check
  132. if ! gcloud --project "{{ openshift_gcp_project }}" compute health-checks describe "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" &>/dev/null; then
  133. gcloud --project "{{ openshift_gcp_project }}" compute health-checks create https "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port "{{ internal_console_port }}" --request-path "/healthz"
  134. else
  135. echo "Health check '{{ openshift_gcp_prefix }}master-ssl-lb-health-check' already exists"
  136. fi
  137. gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-named-ports "{{ openshift_gcp_prefix }}ig-m" \
  138. --zone "{{ openshift_gcp_zone }}" --named-ports "{{ openshift_gcp_prefix }}port-name-master:{{ internal_console_port }}"
  139. # Master backend service
  140. if ! gcloud --project "{{ openshift_gcp_project }}" compute backend-services describe "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --global &>/dev/null; then
  141. gcloud --project "{{ openshift_gcp_project }}" compute backend-services create "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --health-checks "{{ openshift_gcp_prefix }}master-ssl-lb-health-check" --port-name "{{ openshift_gcp_prefix }}port-name-master" --protocol "TCP" --global --timeout="{{ openshift_gcp_master_lb_timeout }}"
  142. gcloud --project "{{ openshift_gcp_project }}" compute backend-services add-backend "{{ openshift_gcp_prefix }}master-ssl-lb-backend" --instance-group "{{ openshift_gcp_prefix }}ig-m" --global --instance-group-zone "{{ openshift_gcp_zone }}"
  143. else
  144. echo "Backend service '{{ openshift_gcp_prefix }}master-ssl-lb-backend' already exists"
  145. fi
  146. # Master tcp proxy target
  147. if ! gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies describe "{{ openshift_gcp_prefix }}master-ssl-lb-target" &>/dev/null; then
  148. gcloud --project "{{ openshift_gcp_project }}" compute target-tcp-proxies create "{{ openshift_gcp_prefix }}master-ssl-lb-target" --backend-service "{{ openshift_gcp_prefix }}master-ssl-lb-backend"
  149. else
  150. echo "Proxy target '{{ openshift_gcp_prefix }}master-ssl-lb-target' already exists"
  151. fi
  152. # Master forwarding rule
  153. if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --global &>/dev/null; then
  154. IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
  155. gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-ssl-lb-rule" --address "$IP" --global --ports "{{ console_port }}" --target-tcp-proxy "{{ openshift_gcp_prefix }}master-ssl-lb-target"
  156. else
  157. echo "Forwarding rule '{{ openshift_gcp_prefix }}master-ssl-lb-rule' already exists"
  158. fi
  159. ) &
  160. # Configure the master internal LB rules
  161. (
  162. # Internal master health check
  163. if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}master-network-lb-health-check" &>/dev/null; then
  164. gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}master-network-lb-health-check" --port "8080" --request-path "/healthz"
  165. else
  166. echo "Health check '{{ openshift_gcp_prefix }}master-network-lb-health-check' already exists"
  167. fi
  168. # Internal master target pool
  169. if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}master-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
  170. gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}master-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}master-network-lb-health-check" --region "{{ openshift_gcp_region }}"
  171. else
  172. echo "Target pool '{{ openshift_gcp_prefix }}master-network-lb-pool' already exists"
  173. fi
  174. # Internal master forwarding rule
  175. if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}master-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
  176. IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
  177. gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}master-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}master-network-lb-pool"
  178. else
  179. echo "Forwarding rule '{{ openshift_gcp_prefix }}master-network-lb-rule' already exists"
  180. fi
  181. ) &
  182. # Configure the infra node rules
  183. (
  184. # Router health check
  185. if ! gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks describe "{{ openshift_gcp_prefix }}router-network-lb-health-check" &>/dev/null; then
  186. gcloud --project "{{ openshift_gcp_project }}" compute http-health-checks create "{{ openshift_gcp_prefix }}router-network-lb-health-check" --port "1936" --request-path "/healthz"
  187. else
  188. echo "Health check '{{ openshift_gcp_prefix }}router-network-lb-health-check' already exists"
  189. fi
  190. # Router target pool
  191. if ! gcloud --project "{{ openshift_gcp_project }}" compute target-pools describe "{{ openshift_gcp_prefix }}router-network-lb-pool" --region "{{ openshift_gcp_region }}" &>/dev/null; then
  192. gcloud --project "{{ openshift_gcp_project }}" compute target-pools create "{{ openshift_gcp_prefix }}router-network-lb-pool" --http-health-check "{{ openshift_gcp_prefix }}router-network-lb-health-check" --region "{{ openshift_gcp_region }}"
  193. else
  194. echo "Target pool '{{ openshift_gcp_prefix }}router-network-lb-pool' already exists"
  195. fi
  196. # Router forwarding rule
  197. if ! gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules describe "{{ openshift_gcp_prefix }}router-network-lb-rule" --region "{{ openshift_gcp_region }}" &>/dev/null; then
  198. IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
  199. gcloud --project "{{ openshift_gcp_project }}" compute forwarding-rules create "{{ openshift_gcp_prefix }}router-network-lb-rule" --address "$IP" --region "{{ openshift_gcp_region }}" --target-pool "{{ openshift_gcp_prefix }}router-network-lb-pool"
  200. else
  201. echo "Forwarding rule '{{ openshift_gcp_prefix }}router-network-lb-rule' already exists"
  202. fi
  203. ) &
  204. for i in `jobs -p`; do wait $i; done
  205. # set the target pools
  206. (
  207. if [[ "ig-m" == "{{ openshift_gcp_infra_network_instance_group }}" ]]; then
  208. gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool,{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
  209. else
  210. gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}ig-m" --target-pools "{{ openshift_gcp_prefix }}master-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
  211. gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed set-target-pools "{{ openshift_gcp_prefix }}{{ openshift_gcp_infra_network_instance_group }}" --target-pools "{{ openshift_gcp_prefix }}router-network-lb-pool" --zone "{{ openshift_gcp_zone }}"
  212. fi
  213. ) &
  214. # configure DNS
  215. (
  216. # Retry DNS changes until they succeed since this may be a shared resource
  217. while true; do
  218. dns="${TMPDIR:-/tmp}/dns.yaml"
  219. rm -f $dns
  220. # DNS record for master lb
  221. if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_public_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_public_hostname }}"; then
  222. IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-ssl-lb-ip" --global --format='value(address)')
  223. if [[ ! -f $dns ]]; then
  224. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
  225. fi
  226. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_public_hostname }}." --type A "$IP"
  227. else
  228. echo "DNS record for '{{ openshift_master_cluster_public_hostname }}' already exists"
  229. fi
  230. # DNS record for internal master lb
  231. if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ openshift_master_cluster_hostname }}" 2>/dev/null | grep -q "{{ openshift_master_cluster_hostname }}"; then
  232. IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}master-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
  233. if [[ ! -f $dns ]]; then
  234. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
  235. fi
  236. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ openshift_master_cluster_hostname }}." --type A "$IP"
  237. else
  238. echo "DNS record for '{{ openshift_master_cluster_hostname }}' already exists"
  239. fi
  240. # DNS record for router lb
  241. if ! gcloud --project "{{ openshift_gcp_project }}" dns record-sets list -z "${dns_zone}" --name "{{ wildcard_zone }}" 2>/dev/null | grep -q "{{ wildcard_zone }}"; then
  242. IP=$(gcloud --project "{{ openshift_gcp_project }}" compute addresses describe "{{ openshift_gcp_prefix }}router-network-lb-ip" --region "{{ openshift_gcp_region }}" --format='value(address)')
  243. if [[ ! -f $dns ]]; then
  244. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"
  245. fi
  246. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "{{ wildcard_zone }}." --type A "$IP"
  247. gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns add -z "${dns_zone}" --ttl 3600 --name "*.{{ wildcard_zone }}." --type CNAME "{{ wildcard_zone }}."
  248. else
  249. echo "DNS record for '{{ wildcard_zone }}' already exists"
  250. fi
  251. # Commit all DNS changes, retrying if preconditions are not met
  252. if [[ -f $dns ]]; then
  253. if ! out="$( gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns execute -z "${dns_zone}" 2>&1 )"; then
  254. rc=$?
  255. if [[ "${out}" == *"HTTPError 412: Precondition not met"* ]]; then
  256. continue
  257. fi
  258. exit $rc
  259. fi
  260. fi
  261. break
  262. done
  263. ) &
  264. # Create bucket for registry
  265. (
  266. if ! gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bucket_name }}" &>/dev/null; then
  267. gsutil mb -p "{{ openshift_gcp_project }}" -l "{{ openshift_gcp_region }}" "gs://{{ openshift_gcp_registry_bucket_name }}"
  268. else
  269. echo "Bucket '{{ openshift_gcp_registry_bucket_name }}' already exists"
  270. fi
  271. ) &
  272. # wait until all node groups are stable
  273. {% for node_group in openshift_gcp_node_group_config %}
  274. # wait for stable {{ node_group.name }}
  275. ( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=300) &
  276. {% endfor %}
  277. for i in `jobs -p`; do wait $i; done