sdn.yaml 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. kind: DaemonSet
  2. apiVersion: apps/v1
  3. metadata:
  4. name: sdn
  5. namespace: openshift-sdn
  6. annotations:
  7. kubernetes.io/description: |
  8. This daemon set launches the OpenShift networking components (kube-proxy, DNS, and openshift-sdn).
  9. It expects that OVS is running on the node.
  10. image.openshift.io/triggers: |
  11. [
  12. {"from":{"kind":"ImageStreamTag","name":"node:v3.10"},"fieldPath":"spec.template.spec.containers[?(@.name==\"sdn\")].image"}
  13. ]
  14. spec:
  15. selector:
  16. matchLabels:
  17. app: sdn
  18. updateStrategy:
  19. type: RollingUpdate
  20. template:
  21. metadata:
  22. labels:
  23. app: sdn
  24. component: network
  25. type: infra
  26. openshift.io/component: network
  27. annotations:
  28. scheduler.alpha.kubernetes.io/critical-pod: ''
  29. spec:
  30. # Requires fairly broad permissions - ability to read all services and network functions as well
  31. # as all pods.
  32. serviceAccountName: sdn
  33. hostNetwork: true
  34. hostPID: true
  35. containers:
  36. # The network container launches the openshift-sdn process, the kube-proxy, and the local DNS service.
  37. # It relies on an up to date node-config.yaml being present.
  38. - name: sdn
  39. image: " "
  40. command:
  41. - /bin/bash
  42. - -c
  43. - |
  44. #!/bin/bash
  45. set -euo pipefail
  46. # if another process is listening on the cni-server socket, wait until it exits
  47. trap 'kill $(jobs -p); exit 0' TERM
  48. retries=0
  49. while true; do
  50. if echo 'test' | socat - UNIX-CONNECT:/var/run/openshift-sdn/cni-server.sock >/dev/null; then
  51. echo "warning: Another process is currently listening on the CNI socket, waiting 15s ..." 2>&1
  52. sleep 15 & wait
  53. (( retries += 1 ))
  54. else
  55. break
  56. fi
  57. if [[ "${retries}" -gt 40 ]]; then
  58. echo "error: Another process is currently listening on the CNI socket, exiting" 2>&1
  59. exit 1
  60. fi
  61. done
  62. # if the node config doesn't exist yet, wait until it does
  63. retries=0
  64. while true; do
  65. file=/etc/sysconfig/origin-node
  66. if [[ -f /etc/sysconfig/atomic-openshift-node ]]; then
  67. file=/etc/sysconfig/atomic-openshift-node
  68. elif [[ -f /etc/sysconfig/origin-node ]]; then
  69. file=/etc/sysconfig/origin-node
  70. else
  71. echo "info: Waiting for the node sysconfig file to be created" 2>&1
  72. sleep 15 & wait
  73. continue
  74. fi
  75. config_file="$(sed -nE 's|^CONFIG_FILE=([^#].+)|\1|p' "${file}" | head -1)"
  76. if [[ -z "${config_file}" ]]; then
  77. echo "info: Waiting for CONFIG_FILE to be set" 2>&1
  78. sleep 15 & wait
  79. continue
  80. fi
  81. if [[ ! -f ${config_file} ]]; then
  82. echo "warning: Cannot find existing node-config.yaml, waiting 15s ..." 2>&1
  83. sleep 15 & wait
  84. (( retries += 1 ))
  85. else
  86. break
  87. fi
  88. if [[ "${retries}" -gt 40 ]]; then
  89. echo "error: No existing node-config.yaml, exiting" 2>&1
  90. exit 1
  91. fi
  92. done
  93. # Take over network functions on the node
  94. rm -Rf /etc/cni/net.d/*
  95. rm -Rf /host/opt/cni/bin/*
  96. cp -Rf /opt/cni/bin/* /host/opt/cni/bin/
  97. if [[ -f /etc/sysconfig/origin-node ]]; then
  98. set -o allexport
  99. source /etc/sysconfig/origin-node
  100. fi
  101. # use either the bootstrapped node kubeconfig or the static configuration
  102. file=/etc/origin/node/node.kubeconfig
  103. if [[ ! -f "${file}" ]]; then
  104. # use the static node config if it exists
  105. # TODO: remove when static node configuration is no longer supported
  106. for f in /etc/origin/node/system*.kubeconfig; do
  107. echo "info: Using ${f} for node configuration" 1>&2
  108. file="${f}"
  109. break
  110. done
  111. fi
  112. # Use the same config as the node, but with the service account token
  113. oc config "--config=${file}" view --flatten > /tmp/kubeconfig
  114. oc config --config=/tmp/kubeconfig set-credentials sa "--token=$( cat /var/run/secrets/kubernetes.io/serviceaccount/token )"
  115. oc config --config=/tmp/kubeconfig set-context "$( oc config --config=/tmp/kubeconfig current-context )" --user=sa
  116. # Launch the network process
  117. exec openshift start network --config=${config_file} --kubeconfig=/tmp/kubeconfig --loglevel=${DEBUG_LOGLEVEL:-2}
  118. securityContext:
  119. runAsUser: 0
  120. # Permission could be reduced by selecting an appropriate SELinux policy
  121. privileged: true
  122. volumeMounts:
  123. # Directory which contains the host configuration.
  124. - mountPath: /etc/origin/node/
  125. name: host-config
  126. readOnly: true
  127. - mountPath: /etc/sysconfig/origin-node
  128. name: host-sysconfig-node
  129. readOnly: true
  130. # Mount the entire run directory for socket access for Docker or CRI-o
  131. # TODO: remove
  132. - mountPath: /var/run
  133. name: host-var-run
  134. # Run directories where we need to be able to access sockets
  135. - mountPath: /var/run/dbus/
  136. name: host-var-run-dbus
  137. readOnly: true
  138. - mountPath: /var/run/openvswitch/
  139. name: host-var-run-ovs
  140. readOnly: true
  141. - mountPath: /var/run/kubernetes/
  142. name: host-var-run-kubernetes
  143. readOnly: true
  144. # We mount our socket here
  145. - mountPath: /var/run/openshift-sdn
  146. name: host-var-run-openshift-sdn
  147. # CNI related mounts which we take over
  148. - mountPath: /host/opt/cni/bin
  149. name: host-opt-cni-bin
  150. - mountPath: /etc/cni/net.d
  151. name: host-etc-cni-netd
  152. - mountPath: /var/lib/cni/networks/openshift-sdn
  153. name: host-var-lib-cni-networks-openshift-sdn
  154. resources:
  155. requests:
  156. cpu: 100m
  157. memory: 200Mi
  158. env:
  159. - name: OPENSHIFT_DNS_DOMAIN
  160. value: cluster.local
  161. ports:
  162. - name: healthz
  163. containerPort: 10256
  164. # TODO: Temporarily disabled until we determine how to wait for clean default
  165. # config
  166. # livenessProbe:
  167. # initialDelaySeconds: 10
  168. # httpGet:
  169. # path: /healthz
  170. # port: 10256
  171. # scheme: HTTP
  172. lifecycle:
  173. volumes:
  174. # In bootstrap mode, the host config contains information not easily available
  175. # from other locations.
  176. - name: host-config
  177. hostPath:
  178. path: /etc/origin/node
  179. - name: host-sysconfig-node
  180. hostPath:
  181. path: /etc/sysconfig/origin-node
  182. - name: host-modules
  183. hostPath:
  184. path: /lib/modules
  185. # TODO: access to the docker socket should be replaced by CRI socket
  186. - name: host-var-run
  187. hostPath:
  188. path: /var/run
  189. - name: host-var-run-dbus
  190. hostPath:
  191. path: /var/run/dbus
  192. - name: host-var-run-ovs
  193. hostPath:
  194. path: /var/run/openvswitch
  195. - name: host-var-run-kubernetes
  196. hostPath:
  197. path: /var/run/kubernetes
  198. - name: host-var-run-openshift-sdn
  199. hostPath:
  200. path: /var/run/openshift-sdn
  201. - name: host-opt-cni-bin
  202. hostPath:
  203. path: /opt/cni/bin
  204. - name: host-etc-cni-netd
  205. hostPath:
  206. path: /etc/cni/net.d
  207. - name: host-var-lib-cni-networks-openshift-sdn
  208. hostPath:
  209. path: /var/lib/cni/networks/openshift-sdn