github.com/jlmeeker/kismatic@v1.10.1-0.20180612190640-57f9005a1f1a/ansible/roles/kube-dns/tasks/main.yaml (about) 1 --- 2 - name: create /etc/kubernetes/specs directory 3 file: 4 path: "{{ kubernetes_spec_dir }}" 5 state: directory 6 - name: copy kubernetes-dns.yaml to remote 7 template: 8 src: kubernetes-dns.yaml 9 dest: "{{ kubernetes_spec_dir }}/kubernetes-dns.yaml" 10 - name: start kubernetes-dns service 11 command: kubectl --kubeconfig {{ kubernetes_kubeconfig.kubectl }} apply -f {{ kubernetes_spec_dir }}/kubernetes-dns.yaml 12 register: out 13 14 - block: 15 - name: wait up to 5 minutes until DNS pods are ready 16 command: kubectl --kubeconfig {{ kubernetes_kubeconfig.kubectl }} get deployment kube-dns -n kube-system -o jsonpath='{.status.availableReplicas}' 17 register: readyReplicas 18 until: readyReplicas.stdout|int == dns.options.replicas|int 19 retries: 30 20 delay: 10 21 failed_when: false # We don't want this task to actually fail (We catch the failure with a custom msg in the next task) 22 23 - name: fail if DNS pod validation command returned an error 24 fail: 25 msg: | 26 Attempted to validate the DNS pods, but got an error: {{ readyReplicas.stderr }} 27 when: readyReplicas.stderr != "" 28 29 - name: fail if DNS pod validation command could not determine if the DNS pods are ready 30 fail: 31 msg: | 32 Waited for all DNS pods to be ready, but they took longer than 5 minutes to be in the ready state. 33 when: readyReplicas.stdout == "" 34 35 - name: find the DNS pods that failed to start 36 # Get the name and status/phase for all kubedns pods, and then filter out the ones that are not running. 37 # Once we have those, grab the first one, and cut the status/phase out of the output. 38 raw: > 39 kubectl get pods -n kube-system -l k8s-app=kube-dns 40 --no-headers -o custom-columns=name:{.metadata.name},status:{.status.phase} | grep -v "Running" | head -n 1 | cut -d " " -f 1 41 register: failedDNSPodNames 42 when: readyReplicas.stdout|int != dns.options.replicas|int 43 44 - name: fail if DNS pod validation command could not determine the broken pod 45 fail: 46 msg: | 47 Attempted to find the broken DNS pods, got an empty response. 48 when: failedDNSPodNames.stdout is defined and failedDNSPodNames.stdout == "" 49 50 - name: get the logs of the first DNS pod that did not start up in time 51 command: kubectl --kubeconfig {{ kubernetes_kubeconfig.kubectl }} logs -c kubedns -n kube-system {{ failedDNSPodNames.stdout_lines[0] }} --tail 15 52 register: failedDNSPodLogs 53 when: "'stdout_lines' in failedDNSPodNames and failedDNSPodNames.stdout_lines|length > 0" 54 55 - name: fail if DNS pods are not ready 56 fail: 57 msg: | 58 Waited for all DNS pods to be ready, but they took longer than 5 minutes to be in the ready state. 59 60 The pod's latest logs may indicate why it failed to start up: 61 62 {{ failedDNSPodLogs.stdout }} 63 64 when: "'stdout' in failedDNSPodLogs and readyReplicas.stdout|int != dns.options.replicas|int" 65 when: run_pod_validation|bool == true