|
@@ -0,0 +1,150 @@
|
|
|
+kind: DaemonSet
|
|
|
+apiVersion: apps/v1
|
|
|
+metadata:
|
|
|
+ name: proxy-and-dns
|
|
|
+ namespace: kube-proxy-and-dns
|
|
|
+ annotations:
|
|
|
+ kubernetes.io/description: |
|
|
|
+ This daemonset launches kube-proxy and DNS.
|
|
|
+ image.openshift.io/triggers: |
|
|
|
+ [
|
|
|
+ {"from":{"kind":"ImageStreamTag","name":"node:v3.10"},"fieldPath":"spec.template.spec.containers[?(@.name==\"proxy-and-dns\")].image"}
|
|
|
+ ]
|
|
|
+spec:
|
|
|
+ selector:
|
|
|
+ matchLabels:
|
|
|
+ app: proxy-and-dns
|
|
|
+ updateStrategy:
|
|
|
+ type: RollingUpdate
|
|
|
+ template:
|
|
|
+ metadata:
|
|
|
+ labels:
|
|
|
+ app: proxy-and-dns
|
|
|
+ component: network
|
|
|
+ type: infra
|
|
|
+ openshift.io/component: network
|
|
|
+ annotations:
|
|
|
+ scheduler.alpha.kubernetes.io/critical-pod: ''
|
|
|
+ spec:
|
|
|
+ # Requires fairly broad permissions - ability to read all services and network functions as well
|
|
|
+ # as all pods.
|
|
|
+ serviceAccountName: proxy
|
|
|
+ hostNetwork: true
|
|
|
+ hostPID: true
|
|
|
+ containers:
|
|
|
+ # The network container launches the kube-proxy and DNS.
|
|
|
+ # It relies on an up to date node-config.yaml being present.
|
|
|
+ - name: proxy-and-dns
|
|
|
+ image: " "
|
|
|
+ command:
|
|
|
+ - /bin/bash
|
|
|
+ - -c
|
|
|
+ - |
|
|
|
+ #!/bin/bash
|
|
|
+ set -euo pipefail
|
|
|
+
|
|
|
+ # if the node config doesn't exist yet, wait until it does
|
|
|
+ retries=0
|
|
|
+ while true; do
|
|
|
+ if [[ ! -f /etc/origin/node/node-config.yaml ]]; then
|
|
|
+ echo "warning: Cannot find existing node-config.yaml, waiting 15s ..." 2>&1
|
|
|
+ sleep 15 & wait
|
|
|
+ (( retries += 1 ))
|
|
|
+ else
|
|
|
+ break
|
|
|
+ fi
|
|
|
+ if [[ "${retries}" -gt 40 ]]; then
|
|
|
+ echo "error: No existing node-config.yaml, exiting" 2>&1
|
|
|
+ exit 1
|
|
|
+ fi
|
|
|
+ done
|
|
|
+
|
|
|
+ if [[ -f /etc/sysconfig/origin-node ]]; then
|
|
|
+ set -o allexport
|
|
|
+ source /etc/sysconfig/origin-node
|
|
|
+ fi
|
|
|
+
|
|
|
+ # use either the bootstrapped node kubeconfig or the static configuration
|
|
|
+ file=/etc/origin/node/node.kubeconfig
|
|
|
+ if [[ ! -f "${file}" ]]; then
|
|
|
+ # use the static node config if it exists
|
|
|
+ # TODO: remove when static node configuration is no longer supported
|
|
|
+ for f in /etc/origin/node/system*.kubeconfig; do
|
|
|
+ echo "info: Using ${f} for node configuration" 1>&2
|
|
|
+ file="${f}"
|
|
|
+ break
|
|
|
+ done
|
|
|
+ fi
|
|
|
+ # Use the same config as the node, but with the service account token
|
|
|
+ oc config "--config=${file}" view --flatten > /tmp/kubeconfig
|
|
|
+ oc config --config=/tmp/kubeconfig set-credentials sa "--token=$( cat /var/run/secrets/kubernetes.io/serviceaccount/token )"
|
|
|
+ oc config --config=/tmp/kubeconfig set-context "$( oc config --config=/tmp/kubeconfig current-context )" --user=sa
|
|
|
+ # Launch the kube-proxy and DNS process
|
|
|
+ exec openshift start network --disable=plugins --enable=proxy,dns --config=/etc/origin/node/node-config.yaml --kubeconfig=/tmp/kubeconfig --loglevel=${DEBUG_LOGLEVEL:-2}
|
|
|
+
|
|
|
+ securityContext:
|
|
|
+ runAsUser: 0
|
|
|
+ # Permission could be reduced by selecting an appropriate SELinux policy
|
|
|
+ privileged: true
|
|
|
+
|
|
|
+ volumeMounts:
|
|
|
+ # Directory which contains the host configuration.
|
|
|
+ - mountPath: /etc/origin/node/
|
|
|
+ name: host-config
|
|
|
+ readOnly: true
|
|
|
+ - mountPath: /etc/sysconfig/origin-node
|
|
|
+ name: host-sysconfig-node
|
|
|
+ readOnly: true
|
|
|
+ # Mount the entire run directory for iptables lockfile access
|
|
|
+ # TODO: remove
|
|
|
+ - mountPath: /var/run
|
|
|
+ name: host-var-run
|
|
|
+ # Run directories where we need to be able to access sockets
|
|
|
+ - mountPath: /var/run/dbus/
|
|
|
+ name: host-var-run-dbus
|
|
|
+ readOnly: true
|
|
|
+ - mountPath: /var/run/kubernetes/
|
|
|
+ name: host-var-run-kubernetes
|
|
|
+ readOnly: true
|
|
|
+
|
|
|
+ resources:
|
|
|
+ requests:
|
|
|
+ cpu: 100m
|
|
|
+ memory: 200Mi
|
|
|
+ env:
|
|
|
+ - name: OPENSHIFT_DNS_DOMAIN
|
|
|
+ value: cluster.local
|
|
|
+ ports:
|
|
|
+ - name: healthz
|
|
|
+ containerPort: 10256
|
|
|
+ # TODO: Temporarily disabled until we determine how to wait for clean default
|
|
|
+ # config
|
|
|
+ # livenessProbe:
|
|
|
+ # initialDelaySeconds: 10
|
|
|
+ # httpGet:
|
|
|
+ # path: /healthz
|
|
|
+ # port: 10256
|
|
|
+ # scheme: HTTP
|
|
|
+ lifecycle:
|
|
|
+
|
|
|
+ volumes:
|
|
|
+ # In bootstrap mode, the host config contains information not easily available
|
|
|
+ # from other locations.
|
|
|
+ - name: host-config
|
|
|
+ hostPath:
|
|
|
+ path: /etc/origin/node
|
|
|
+ - name: host-sysconfig-node
|
|
|
+ hostPath:
|
|
|
+ path: /etc/sysconfig/origin-node
|
|
|
+ - name: host-modules
|
|
|
+ hostPath:
|
|
|
+ path: /lib/modules
|
|
|
+ - name: host-var-run
|
|
|
+ hostPath:
|
|
|
+ path: /var/run
|
|
|
+ - name: host-var-run-dbus
|
|
|
+ hostPath:
|
|
|
+ path: /var/run/dbus
|
|
|
+ - name: host-var-run-kubernetes
|
|
|
+ hostPath:
|
|
|
+ path: /var/run/kubernetes
|