瀏覽代碼

kube_proxy_and_dns: add role that runs standalone kube-proxy + DNS

And depend on that role in Calico.
Dan Williams 6 年之前
父節點
當前提交
c7ce6f843d

+ 1 - 0
roles/calico_master/meta/main.yml

@@ -15,3 +15,4 @@ galaxy_info:
 dependencies:
 - role: lib_utils
 - role: openshift_facts
+- role: kube_proxy_and_dns

+ 10 - 0
roles/kube_proxy_and_dns/files/kube-proxy-and-dns-images.yaml

@@ -0,0 +1,10 @@
+apiVersion: image.openshift.io/v1
+kind: ImageStreamTag
+metadata:
+  name: node:v3.10
+  namespace: kube-proxy-and-dns
+tag:
+  reference: true
+  from:
+    kind: DockerImage
+    name: openshift/node:v3.10.0

+ 28 - 0
roles/kube_proxy_and_dns/files/kube-proxy-and-dns-policy.yaml

@@ -0,0 +1,28 @@
+kind: List
+apiVersion: v1
+items:
+- kind: ServiceAccount
+  apiVersion: v1
+  metadata:
+    name: proxy
+    namespace: kube-proxy-and-dns
+- apiVersion: authorization.openshift.io/v1
+  kind: ClusterRoleBinding
+  metadata:
+    name: proxy-cluster-reader
+  roleRef:
+    name: cluster-reader
+  subjects:
+  - kind: ServiceAccount
+    name: proxy
+    namespace: kube-proxy-and-dns
+- apiVersion: authorization.openshift.io/v1
+  kind: ClusterRoleBinding
+  metadata:
+    name: proxy-reader
+  roleRef:
+    name: system:proxy-reader
+  subjects:
+  - kind: ServiceAccount
+    name: proxy
+    namespace: kube-proxy-and-dns

+ 150 - 0
roles/kube_proxy_and_dns/files/kube-proxy-and-dns.yaml

@@ -0,0 +1,150 @@
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+  name: proxy-and-dns
+  namespace: kube-proxy-and-dns
+  annotations:
+    kubernetes.io/description: |
+      This daemonset launches kube-proxy and DNS.
+    image.openshift.io/triggers: |
+      [
+        {"from":{"kind":"ImageStreamTag","name":"node:v3.10"},"fieldPath":"spec.template.spec.containers[?(@.name==\"proxy-and-dns\")].image"}
+      ]
+spec:
+  selector:
+    matchLabels:
+      app: proxy-and-dns
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: proxy-and-dns
+        component: network
+        type: infra
+        openshift.io/component: network
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      # Requires fairly broad permissions - ability to read all services and network functions as well
+      # as all pods.
+      serviceAccountName: proxy
+      hostNetwork: true
+      hostPID: true
+      containers:
+      # The network container launches the kube-proxy and DNS.
+      # It relies on an up to date node-config.yaml being present.
+      - name: proxy-and-dns
+        image: " "
+        command: 
+        - /bin/bash
+        - -c
+        - |
+          #!/bin/bash
+          set -euo pipefail
+
+          # if the node config doesn't exist yet, wait until it does
+          retries=0
+          while true; do
+            if [[ ! -f /etc/origin/node/node-config.yaml ]]; then
+              echo "warning: Cannot find existing node-config.yaml, waiting 15s ..." 2>&1
+              sleep 15 & wait
+              (( retries += 1 ))
+            else
+              break
+            fi
+            if [[ "${retries}" -gt 40 ]]; then
+              echo "error: No existing node-config.yaml, exiting" 2>&1
+              exit 1
+            fi
+          done
+
+          if [[ -f /etc/sysconfig/origin-node ]]; then
+            set -o allexport
+            source /etc/sysconfig/origin-node
+          fi
+
+          # use either the bootstrapped node kubeconfig or the static configuration
+          file=/etc/origin/node/node.kubeconfig
+          if [[ ! -f "${file}" ]]; then
+            # use the static node config if it exists
+            # TODO: remove when static node configuration is no longer supported
+            for f in /etc/origin/node/system*.kubeconfig; do
+              echo "info: Using ${f} for node configuration" 1>&2
+              file="${f}"
+              break
+            done
+          fi
+          # Use the same config as the node, but with the service account token
+          oc config "--config=${file}" view --flatten > /tmp/kubeconfig
+          oc config --config=/tmp/kubeconfig set-credentials sa "--token=$( cat /var/run/secrets/kubernetes.io/serviceaccount/token )"
+          oc config --config=/tmp/kubeconfig set-context "$( oc config --config=/tmp/kubeconfig current-context )" --user=sa
+          # Launch the kube-proxy and DNS process
+          exec openshift start network --disable=plugins --enable=proxy,dns --config=/etc/origin/node/node-config.yaml --kubeconfig=/tmp/kubeconfig --loglevel=${DEBUG_LOGLEVEL:-2}
+
+        securityContext:
+          runAsUser: 0
+          # Permission could be reduced by selecting an appropriate SELinux policy
+          privileged: true
+
+        volumeMounts:
+        # Directory which contains the host configuration.
+        - mountPath: /etc/origin/node/
+          name: host-config
+          readOnly: true
+        - mountPath: /etc/sysconfig/origin-node
+          name: host-sysconfig-node
+          readOnly: true
+        # Mount the entire run directory for iptables lockfile access
+        # TODO: remove
+        - mountPath: /var/run
+          name: host-var-run
+        # Run directories where we need to be able to access sockets
+        - mountPath: /var/run/dbus/
+          name: host-var-run-dbus
+          readOnly: true
+        - mountPath: /var/run/kubernetes/
+          name: host-var-run-kubernetes
+          readOnly: true
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 200Mi
+        env:
+        - name: OPENSHIFT_DNS_DOMAIN
+          value: cluster.local
+        ports:
+        - name: healthz
+          containerPort: 10256
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10256
+        #     scheme: HTTP
+        lifecycle:
+
+      volumes:
+      # In bootstrap mode, the host config contains information not easily available
+      # from other locations.
+      - name: host-config
+        hostPath:
+          path: /etc/origin/node
+      - name: host-sysconfig-node
+        hostPath:
+          path: /etc/sysconfig/origin-node
+      - name: host-modules
+        hostPath:
+          path: /lib/modules
+      - name: host-var-run
+        hostPath:
+          path: /var/run
+      - name: host-var-run-dbus
+        hostPath:
+          path: /var/run/dbus
+      - name: host-var-run-kubernetes
+        hostPath:
+          path: /var/run/kubernetes

+ 19 - 0
roles/kube_proxy_and_dns/meta/main.yaml

@@ -0,0 +1,19 @@
+---
+galaxy_info:
+  author: OpenShift Development <dev@lists.openshift.redhat.com>
+  description: Deploy kube-proxy and DNS
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 2.4
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  - name: Fedora
+    versions:
+    - all
+  categories:
+  - openshift
+dependencies:
+- role: lib_openshift
+- role: openshift_facts

+ 48 - 0
roles/kube_proxy_and_dns/tasks/main.yml

@@ -0,0 +1,48 @@
+---
+- name: Ensure project exists
+  oc_project:
+    name: kube-proxy-and-dns
+    state: present
+    node_selector:
+      - ""
+
+- name: Make temp directory for templates
+  command: mktemp -d /tmp/ansible-XXXXXX
+  register: mktemp
+  changed_when: False
+
+- name: Copy templates to temp directory
+  copy:
+    src: "{{ item }}"
+    dest: "{{ mktemp.stdout }}/{{ item | basename }}"
+  with_fileglob:
+    - "files/*.yaml"
+
+- name: Update the image tag
+  yedit:
+    src: "{{ mktemp.stdout }}/kube-proxy-and-dns-images.yaml"
+    key: 'tag.from.name'
+    value: "{{ osn_image }}"
+
+- name: Ensure the service account can run privileged
+  oc_adm_policy_user:
+    namespace: "kube-proxy-and-dns"
+    resource_kind: scc
+    resource_name: privileged
+    state: present
+    user: "system:serviceaccount:kube-proxy-and-dns:proxy"
+
+# TODO: temporary until we fix apply for image stream tags
+- name: Remove the image stream tag
+  shell: >
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig delete -n kube-proxy-and-dns istag node:v3.10 --ignore-not-found
+
+- name: Apply the config
+  shell: >
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig apply -f "{{ mktemp.stdout }}"
+
+- name: Remove temp directory
+  file:
+    state: absent
+    name: "{{ mktemp.stdout }}"
+  changed_when: False