github.com/jlmeeker/kismatic@v1.10.1-0.20180612190640-57f9005a1f1a/ansible/roles/preflight/tasks/main.yaml (about)

     1  ---
     2    - name: verify hostname
     3      fail: msg="provided hostname does not match reported hostname of {{ ansible_nodename }}"
     4      failed_when: "ansible_nodename not in [ inventory_hostname, inventory_hostname_short ]"
     5  
     6    - name: verify systemd
     7      fail: msg="systemd is required"
     8      failed_when: ansible_service_mgr != "systemd"
     9  
    10    # kubernetes checks /proc/swaps lines > 1
    11    # don't verify if host has only etcd role
    12    - name: list memory swaps in /proc/swaps
    13      command: cat /proc/swaps
    14      register: memory_swaps
    15      failed_when: false
    16      when: >
    17        not ((kubelet_overrides is defined and 
    18        kubelet_overrides['fail-swap-on'] is defined and 
    19        kubelet_overrides['fail-swap-on'] == 'false') or 
    20        (kubelet_node_overrides[inventory_hostname] is defined and 
    21        kubelet_node_overrides[inventory_hostname]['fail-swap-on'] is defined and 
    22        kubelet_node_overrides[inventory_hostname]['fail-swap-on'] == 'false')) and 
    23        ('etcd' not in group_names or
    24        ('etcd' in group_names and (group_names | length > 1)))
    25        
    26  
    27    - name: fail if memory swap is not disabled
    28      fail:
    29        msg: "Memory swap is enabled on the node, disable it or set '--fail-swap-on=false' on the kubelet"
    30      when: >
    31        memory_swaps is defined and 
    32        memory_swaps.rc is defined and 
    33        (memory_swaps.rc != 0 or 
    34        (memory_swaps.stdout_lines is defined and memory_swaps.stdout_lines|length > 1)) and
    35        ('etcd' not in group_names or
    36        ('etcd' in group_names and (group_names | length > 1)))
    37  
    38    - name: validate devicemapper direct-lvm block device
    39      include: direct_lvm_preflight.yaml
    40      when: >
    41        ansible_os_family == 'RedHat' and 
    42        docker.storage.driver == 'devicemapper' and 
    43        docker.storage.direct_lvm_block_device.path != ''
    44  
    45    # Every etcd node should be able to reach all etcd nodes. This is quadratic,
    46    # but we can live with it because etcd count is usually <= 5
    47    - name: verify etcd to etcd node connectivity using IP
    48      command: ping -c 2 {{ item }}
    49      # Using map here to get the right item shown in stdout
    50      with_items: "{{ groups['etcd']|map('extract', hostvars, 'internal_ipv4')|list }}"
    51      when: "'etcd' in group_names"
    52    - name: verify etcd to etcd node connectivity using hostname
    53      command: ping -c 2 {{ item }}
    54      with_items: "{{ groups['etcd'] }}"
    55      when: "'etcd' in group_names"
    56  
    57    # Every master node should be able to reach all etcd nodes
    58    - name: verify master node to etcd node connectivity using IP
    59      command: ping -c 2 {{ item }}
    60      with_items: "{{ groups['etcd']|map('extract', hostvars, 'internal_ipv4')|list }}"
    61      when: "'master' in group_names"
    62    - name: verify master node to etcd node connectivity using hostname
    63      command: ping -c 2 {{ item }}
    64      with_items: "{{ groups['etcd'] }}"
    65      when: "'master' in group_names"
    66  
    67    # Every worker node should be able to reach all master nodes
    68    - name: verify worker node to master node connectivity using IP
    69      command: ping -c 2 {{ item }}
    70      with_items: "{{ groups['master']|map('extract', hostvars, 'internal_ipv4')|list }}"
    71      when: "'worker' in group_names"
    72    - name: verify worker node to master node connectivity using hostname
    73      command: ping -c 2 {{ item }}
    74      with_items: "{{ groups['master'] }}"
    75      when: "'worker' in group_names"
    76  
    77    # Every ingress node should be able to reach all master nodes
    78    - name: verify ingress node to master node connectivity using IP
    79      command: ping -c 2 {{ item }}
    80      with_items: "{{ groups['master']|map('extract', hostvars, 'internal_ipv4')|list }}"
    81      when: "'ingress' in group_names"
    82    - name: verify ingress node to master node connectivity using hostname
    83      command: ping -c 2 {{ item }}
    84      with_items: "{{ groups['master'] }}"
    85      when: "'ingress' in group_names"
    86  
    87    # Every ingress node should be able to reach all worker nodes
    88    - name: verify ingress node to worker node connectivity using IP
    89      command: ping -c 2 {{ item }}
    90      with_items: "{{ groups['worker']|map('extract', hostvars, 'internal_ipv4')|list }}"
    91      when: "'ingress' in group_names"
    92    - name: verify ingress node to worker node connectivity using hostname
    93      command: ping -c 2 {{ item }}
    94      with_items: "{{ groups['worker'] }}"
    95      when: "'ingress' in group_names"
    96  
    97    # Every worker node should be able to reach all worker nodes.
    98    # We use a random sampling of worker nodes to avoid quadratic complexity.
    99    - name: verify worker to worker node connectivity with random sample
   100      include: random_ping.yaml
   101      with_items: # Ping three nodes at random
   102        - 1
   103        - 2
   104        - 3
   105      loop_control:
   106        loop_var: outer_item # Define this (even thought we don't use it) so that ansible doesn't complain.
   107      when: "'worker' in group_names"
   108  
   109    # Run from the install node, 
   110    # Check if the helm repos can be reached
   111    - name: verify install node can reach official helm chart repo
   112      uri:
   113        url: https://kubernetes-charts.storage.googleapis.com
   114      delegate_to: 127.0.0.1
   115      become: no
   116      run_once: true
   117      when: helm.enabled|bool == true and disconnected_installation|bool != true
   118  
   119    # setup Kismatic Inspector
   120    - name: copy Kismatic Inspector to node
   121      copy:
   122        src: "{{ kismatic_preflight_checker }}"
   123        dest: "{{ bin_dir }}/kismatic-inspector"
   124        mode: 0744
   125  
   126    - name: copy kismatic-inspector.service to remote
   127      template:
   128        src: kismatic-inspector.service.j2
   129        dest: "{{ init_system_dir }}/kismatic-inspector.service"
   130      notify:
   131        - reload services
   132  
   133    - meta: flush_handlers  #Run handlers
   134  
   135    - name: start kismatic-inspector service
   136      service:
   137        name: kismatic-inspector.service
   138        state: restarted # always restart to ensure that any existing inspectors are replaced by this one
   139  
   140    # Run the pre-flights checks, and always stop the checker regardless of result
   141    - block:
   142        - name: run pre-flight checks using Kismatic Inspector from the master
   143          command: '{{ bin_dir }}/kismatic-inspector client {{ internal_ipv4 }}:8888 -o json --node-roles {{ ",".join(group_names) }} {% if upgrading|default("false")|bool %}--upgrade{% endif %} --additional-vars kubernetes_yum_version={{ kubernetes_yum_version }},kubernetes_deb_version={{ kubernetes_deb_version }}'
   144          delegate_to: "{{ groups['master'][0] }}"
   145          register: out
   146        - name: run pre-flight checks using Kismatic Inspector from the worker
   147          command: '{{ bin_dir }}/kismatic-inspector client {{ internal_ipv4 }}:8888 -o json --node-roles {{ ",".join(group_names) }} {% if upgrading|default("false")|bool %}--upgrade{% endif %} --additional-vars kubernetes_yum_version={{ kubernetes_yum_version }},kubernetes_deb_version={{ kubernetes_deb_version }}'
   148          delegate_to: "{{ groups['worker'][0] }}"
   149          register: out
   150      always:
   151        - name: stop kismatic-inspector service
   152          service:
   153            name: kismatic-inspector.service
   154            state: stopped
   155        - name: verify Kismatic Inspector succeeded
   156          command: /bin/true
   157          failed_when: "out.rc != 0"