calico-policy-controller.yml.j2 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. ---
  2. apiVersion: v1
  3. kind: ServiceAccount
  4. metadata:
  5. name: calico
  6. namespace: kube-system
  7. ---
  8. kind: ClusterRole
  9. apiVersion: v1
  10. metadata:
  11. name: calico
  12. namespace: kube-system
  13. rules:
  14. - apiGroups: [""]
  15. resources:
  16. - pods
  17. - namespaces
  18. verbs:
  19. - list
  20. - get
  21. - watch
  22. - apiGroups: ["extensions"]
  23. resources:
  24. - networkpolicies
  25. verbs:
  26. - list
  27. - get
  28. - watch
  29. ---
  30. apiVersion: v1
  31. kind: ClusterRoleBinding
  32. metadata:
  33. name: calico
  34. roleRef:
  35. name: calico
  36. subjects:
  37. - kind: SystemUser
  38. name: kube-system:calico
  39. - kind: ServiceAccount
  40. name: calico
  41. namespace: kube-system
  42. userNames:
  43. - system:serviceaccount:kube-system:calico
  44. ---
  45. # This manifest deploys the Calico policy controller on Kubernetes.
  46. # See https://github.com/projectcalico/k8s-policy
  47. apiVersion: extensions/v1beta1
  48. kind: Deployment
  49. metadata:
  50. name: calico-policy-controller
  51. namespace: kube-system
  52. labels:
  53. k8s-app: calico-policy
  54. annotations:
  55. scheduler.alpha.kubernetes.io/critical-pod: ''
  56. scheduler.alpha.kubernetes.io/tolerations: |
  57. [{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
  58. {"key":"CriticalAddonsOnly", "operator":"Exists"}]
  59. spec:
  60. # The policy controller can only have a single active instance.
  61. replicas: 1
  62. strategy:
  63. type: Recreate
  64. template:
  65. metadata:
  66. name: calico-policy-controller
  67. namespace: kube-system
  68. labels:
  69. k8s-app: calico-policy
  70. spec:
  71. # The policy controller must run in the host network namespace so that
  72. # it isn't governed by policy that would prevent it from working.
  73. hostNetwork: true
  74. serviceAccountName: calico
  75. containers:
  76. - name: calico-policy-controller
  77. image: quay.io/calico/kube-policy-controller:v0.5.3
  78. env:
  79. # The location of the Calico etcd cluster.
  80. - name: ETCD_ENDPOINTS
  81. value: {{ etcd_endpoints }}
  82. # Location of the CA certificate for etcd.
  83. - name: ETCD_CA_CERT_FILE
  84. value: {{ calico_etcd_ca_cert_file }}
  85. # Location of the client key for etcd.
  86. - name: ETCD_KEY_FILE
  87. value: {{ calico_etcd_key_file }}
  88. # Location of the client certificate for etcd.
  89. - name: ETCD_CERT_FILE
  90. value: {{ calico_etcd_cert_file }}
  91. # Since we're running in the host namespace and might not have KubeDNS
  92. # access, configure the container's /etc/hosts to resolve
  93. # kubernetes.default to the correct service clusterIP.
  94. - name: CONFIGURE_ETC_HOSTS
  95. value: "true"
  96. volumeMounts:
  97. # Mount in the etcd TLS secrets.
  98. - name: certs
  99. mountPath: /etc/origin/calico
  100. volumes:
  101. # Mount in the etcd TLS secrets.
  102. - name: certs
  103. hostPath:
  104. path: /etc/origin/calico