sdn.yaml 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. kind: DaemonSet
  2. apiVersion: apps/v1
  3. metadata:
  4. name: sdn
  5. namespace: openshift-sdn
  6. annotations:
  7. kubernetes.io/description: |
  8. This daemon set launches the OpenShift networking components (kube-proxy, DNS, and openshift-sdn).
  9. It expects that OVS is running on the node.
  10. image.openshift.io/triggers: |
  11. [
  12. {"from":{"kind":"ImageStreamTag","name":"node:v3.11"},"fieldPath":"spec.template.spec.containers[?(@.name==\"sdn\")].image"}
  13. ]
  14. spec:
  15. selector:
  16. matchLabels:
  17. app: sdn
  18. updateStrategy:
  19. type: RollingUpdate
  20. template:
  21. metadata:
  22. labels:
  23. app: sdn
  24. component: network
  25. type: infra
  26. openshift.io/component: network
  27. annotations:
  28. scheduler.alpha.kubernetes.io/critical-pod: ''
  29. spec:
  30. # Requires fairly broad permissions - ability to read all services and network functions as well
  31. # as all pods.
  32. serviceAccountName: sdn
  33. hostNetwork: true
  34. hostPID: true
  35. containers:
  36. # The network container launches the openshift-sdn process, the kube-proxy, and the local DNS service.
  37. # It relies on an up to date node-config.yaml being present.
  38. - name: sdn
  39. image: " "
  40. command:
  41. - /bin/bash
  42. - -c
  43. - |
  44. #!/bin/bash
  45. set -euo pipefail
  46. # if another process is listening on the cni-server socket, wait until it exits
  47. trap 'kill $(jobs -p); exit 0' TERM
  48. retries=0
  49. while true; do
  50. if echo 'test' | socat - UNIX-CONNECT:/var/run/openshift-sdn/cni-server.sock >/dev/null; then
  51. echo "warning: Another process is currently listening on the CNI socket, waiting 15s ..." 2>&1
  52. sleep 15 & wait
  53. (( retries += 1 ))
  54. else
  55. break
  56. fi
  57. if [[ "${retries}" -gt 40 ]]; then
  58. echo "error: Another process is currently listening on the CNI socket, exiting" 2>&1
  59. exit 1
  60. fi
  61. done
  62. # if the node config doesn't exist yet, wait until it does
  63. retries=0
  64. while true; do
  65. if [[ ! -f /etc/origin/node/node-config.yaml ]]; then
  66. echo "warning: Cannot find existing node-config.yaml, waiting 15s ..." 2>&1
  67. sleep 15 & wait
  68. (( retries += 1 ))
  69. else
  70. break
  71. fi
  72. if [[ "${retries}" -gt 40 ]]; then
  73. echo "error: No existing node-config.yaml, exiting" 2>&1
  74. exit 1
  75. fi
  76. done
  77. # Take over network functions on the node
  78. rm -Rf /etc/cni/net.d/80-openshift-network.conf
  79. cp -Rf /opt/cni/bin/* /host/opt/cni/bin/
  80. if [[ -f /etc/sysconfig/origin-node ]]; then
  81. set -o allexport
  82. source /etc/sysconfig/origin-node
  83. fi
  84. # use either the bootstrapped node kubeconfig or the static configuration
  85. file=/etc/origin/node/node.kubeconfig
  86. if [[ ! -f "${file}" ]]; then
  87. # use the static node config if it exists
  88. # TODO: remove when static node configuration is no longer supported
  89. for f in /etc/origin/node/system*.kubeconfig; do
  90. echo "info: Using ${f} for node configuration" 1>&2
  91. file="${f}"
  92. break
  93. done
  94. fi
  95. # Use the same config as the node, but with the service account token
  96. oc config "--config=${file}" view --flatten > /tmp/kubeconfig
  97. oc config --config=/tmp/kubeconfig set-credentials sa "--token=$( cat /var/run/secrets/kubernetes.io/serviceaccount/token )"
  98. oc config --config=/tmp/kubeconfig set-context "$( oc config --config=/tmp/kubeconfig current-context )" --user=sa
  99. # Launch the network process
  100. exec openshift start network --config=/etc/origin/node/node-config.yaml --kubeconfig=/tmp/kubeconfig --loglevel=${DEBUG_LOGLEVEL:-2}
  101. securityContext:
  102. runAsUser: 0
  103. # Permission could be reduced by selecting an appropriate SELinux policy
  104. privileged: true
  105. volumeMounts:
  106. # Directory which contains the host configuration.
  107. - mountPath: /etc/origin/node/
  108. name: host-config
  109. readOnly: true
  110. - mountPath: /etc/sysconfig/origin-node
  111. name: host-sysconfig-node
  112. readOnly: true
  113. # Mount the entire run directory for socket access for Docker or CRI-o
  114. # TODO: remove
  115. - mountPath: /var/run
  116. name: host-var-run
  117. # Run directories where we need to be able to access sockets
  118. - mountPath: /var/run/dbus/
  119. name: host-var-run-dbus
  120. readOnly: true
  121. - mountPath: /var/run/openvswitch/
  122. name: host-var-run-ovs
  123. readOnly: true
  124. - mountPath: /var/run/kubernetes/
  125. name: host-var-run-kubernetes
  126. readOnly: true
  127. # We mount our socket here
  128. - mountPath: /var/run/openshift-sdn
  129. name: host-var-run-openshift-sdn
  130. # CNI related mounts which we take over
  131. - mountPath: /host/opt/cni/bin
  132. name: host-opt-cni-bin
  133. - mountPath: /etc/cni/net.d
  134. name: host-etc-cni-netd
  135. - mountPath: /var/lib/cni/networks/openshift-sdn
  136. name: host-var-lib-cni-networks-openshift-sdn
  137. resources:
  138. requests:
  139. cpu: 100m
  140. memory: 200Mi
  141. env:
  142. - name: OPENSHIFT_DNS_DOMAIN
  143. value: cluster.local
  144. ports:
  145. - name: healthz
  146. containerPort: 10256
  147. # TODO: Temporarily disabled until we determine how to wait for clean default
  148. # config
  149. # livenessProbe:
  150. # initialDelaySeconds: 10
  151. # httpGet:
  152. # path: /healthz
  153. # port: 10256
  154. # scheme: HTTP
  155. lifecycle:
  156. volumes:
  157. # In bootstrap mode, the host config contains information not easily available
  158. # from other locations.
  159. - name: host-config
  160. hostPath:
  161. path: /etc/origin/node
  162. - name: host-sysconfig-node
  163. hostPath:
  164. path: /etc/sysconfig/origin-node
  165. - name: host-modules
  166. hostPath:
  167. path: /lib/modules
  168. # TODO: access to the docker socket should be replaced by CRI socket
  169. - name: host-var-run
  170. hostPath:
  171. path: /var/run
  172. - name: host-var-run-dbus
  173. hostPath:
  174. path: /var/run/dbus
  175. - name: host-var-run-ovs
  176. hostPath:
  177. path: /var/run/openvswitch
  178. - name: host-var-run-kubernetes
  179. hostPath:
  180. path: /var/run/kubernetes
  181. - name: host-var-run-openshift-sdn
  182. hostPath:
  183. path: /var/run/openshift-sdn
  184. - name: host-opt-cni-bin
  185. hostPath:
  186. path: /opt/cni/bin
  187. - name: host-etc-cni-netd
  188. hostPath:
  189. path: /etc/cni/net.d
  190. - name: host-var-lib-cni-networks-openshift-sdn
  191. hostPath:
  192. path: /var/lib/cni/networks/openshift-sdn