github.com/jlmeeker/kismatic@v1.10.1-0.20180612190640-57f9005a1f1a/ansible/roles/kube-dns/templates/kubernetes-dns.yaml (about) 1 --- 2 apiVersion: v1 3 kind: ServiceAccount 4 metadata: 5 name: kube-dns 6 namespace: kube-system 7 labels: 8 kubernetes.io/cluster-service: "true" 9 addonmanager.kubernetes.io/mode: Reconcile 10 --- 11 12 apiVersion: v1 13 kind: ConfigMap 14 metadata: 15 name: kube-dns 16 namespace: kube-system 17 labels: 18 addonmanager.kubernetes.io/mode: EnsureExists 19 20 --- 21 apiVersion: v1 22 kind: Service 23 metadata: 24 name: kube-dns 25 namespace: kube-system 26 labels: 27 k8s-app: kube-dns 28 kubernetes.io/cluster-service: "true" 29 addonmanager.kubernetes.io/mode: Reconcile 30 kubernetes.io/name: "KubeDNS" 31 prometheus.io/port: "10254" 32 prometheus.io/scrape: "true" 33 spec: 34 selector: 35 k8s-app: kube-dns 36 clusterIP: {{ kubernetes_dns_service_ip }} 37 ports: 38 - name: dns 39 port: 53 40 protocol: UDP 41 - name: dns-tcp 42 port: 53 43 protocol: TCP 44 --- 45 46 apiVersion: apps/v1 47 kind: Deployment 48 metadata: 49 name: kube-dns 50 namespace: kube-system 51 labels: 52 k8s-app: kube-dns 53 kubernetes.io/cluster-service: "true" 54 addonmanager.kubernetes.io/mode: Reconcile 55 annotations: 56 kismatic/version: "{{ kismatic_short_version }}" 57 spec: 58 replicas: {{ dns.options.replicas|int }} 59 strategy: 60 rollingUpdate: 61 maxSurge: 1 62 maxUnavailable: 1 63 selector: 64 matchLabels: 65 k8s-app: kube-dns 66 template: 67 metadata: 68 labels: 69 k8s-app: kube-dns 70 annotations: 71 scheduler.alpha.kubernetes.io/critical-pod: '' 72 spec: 73 tolerations: 74 - key: "CriticalAddonsOnly" 75 operator: "Exists" 76 affinity: 77 podAntiAffinity: 78 preferredDuringSchedulingIgnoredDuringExecution: 79 - weight: 100 80 podAffinityTerm: 81 labelSelector: 82 matchExpressions: 83 - key: k8s-app 84 operator: In 85 values: 86 - kube-dns 87 topologyKey: kubernetes.io/hostname 88 volumes: 89 - name: kube-dns-config 90 configMap: 91 name: kube-dns 92 optional: true 93 containers: 94 - name: kubedns 95 image: "{{ images.kubedns }}" 96 imagePullPolicy: IfNotPresent 97 resources: 98 # TODO: Set memory limits when we've profiled the container for large 99 # clusters, then set request = limit to keep this container in 100 # guaranteed class. Currently, this container falls into the 101 # "burstable" category so the kubelet doesn't backoff from restarting it. 102 limits: 103 memory: 170Mi 104 requests: 105 cpu: 100m 106 memory: 70Mi 107 livenessProbe: 108 httpGet: 109 path: /healthcheck/kubedns 110 port: 10054 111 scheme: HTTP 112 initialDelaySeconds: 60 113 timeoutSeconds: 5 114 successThreshold: 1 115 failureThreshold: 5 116 readinessProbe: 117 httpGet: 118 path: /readiness 119 port: 8081 120 scheme: HTTP 121 # we poll on pod startup for the Kubernetes master service and 122 # only setup the /readiness HTTP server once that's available. 123 initialDelaySeconds: 3 124 timeoutSeconds: 5 125 args: 126 - --domain=cluster.local 127 - --dns-port=10053 128 - --config-dir=/kube-dns-config 129 #- --kube-master-url={{ kubernetes_master_ip }} 130 - --v=2 131 ports: 132 - containerPort: 10053 133 name: dns-local 134 protocol: UDP 135 - containerPort: 10053 136 name: dns-tcp-local 137 protocol: TCP 138 - containerPort: 10055 139 name: metrics 140 protocol: TCP 141 # setting these vars to use with serviceaccount 142 # --kube-master-url does not work with error: 143 # x509: failed to load system roots and no roots provided 144 # CA does not get mounted correctly with that flag 145 env: 146 - name: KUBERNETES_SERVICE_HOST 147 value: "{{ kubernetes_load_balanced_fqdn }}" 148 - name: KUBERNETES_SERVICE_PORT 149 value: "{{ kubernetes_master_secure_port }}" 150 - name: PROMETHEUS_PORT 151 value: "10055" 152 volumeMounts: 153 - name: kube-dns-config 154 mountPath: /kube-dns-config 155 - name: dnsmasq 156 image: "{{ images.kube_dnsmasq }}" 157 imagePullPolicy: IfNotPresent 158 livenessProbe: 159 httpGet: 160 path: /healthcheck/dnsmasq 161 port: 10054 162 scheme: HTTP 163 initialDelaySeconds: 60 164 timeoutSeconds: 5 165 successThreshold: 1 166 failureThreshold: 5 167 args: 168 - -v=2 169 - -logtostderr 170 - -configDir=/etc/k8s/dns/dnsmasq-nanny 171 - -restartDnsmasq=true 172 - -- 173 - -k 174 - --cache-size=1000 175 - --log-facility=- 176 - --server=/cluster.local/127.0.0.1#10053 177 - --server=/in-addr.arpa/127.0.0.1#10053 178 - --server=/ip6.arpa/127.0.0.1#10053 179 ports: 180 - containerPort: 53 181 name: dns 182 protocol: UDP 183 - containerPort: 53 184 name: dns-tcp 185 protocol: TCP 186 # see: https://github.com/kubernetes/kubernetes/issues/29055 for details 187 resources: 188 requests: 189 cpu: 150m 190 memory: 20Mi 191 volumeMounts: 192 - name: kube-dns-config 193 mountPath: /etc/k8s/dns/dnsmasq-nanny 194 - name: sidecar 195 image: "{{ images.kubedns_sidecar }}" 196 imagePullPolicy: IfNotPresent 197 livenessProbe: 198 httpGet: 199 path: /metrics 200 port: 10054 201 scheme: HTTP 202 initialDelaySeconds: 60 203 timeoutSeconds: 5 204 successThreshold: 1 205 failureThreshold: 5 206 args: 207 - --v=2 208 - --logtostderr 209 - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A 210 - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A 211 ports: 212 - containerPort: 10054 213 name: metrics 214 protocol: TCP 215 resources: 216 requests: 217 memory: 20Mi 218 cpu: 10m 219 dnsPolicy: Default # Don't use cluster DNS. 220 serviceAccountName: kube-dns