github.com/jlmeeker/kismatic@v1.10.1-0.20180612190640-57f9005a1f1a/ansible/roles/volume-add/tasks/main.yaml (about)

     1  ---
     2    - name: verify volume name is unique
     3      command: gluster volume list
     4      register: out
     5      failed_when: "'{{ volume_name }}' in out.stdout"
     6      run_once: true
     7  
     8    - name: set initial storage availability fact
     9      set_fact:
    10        storage_node_space: unavailable
    11  
    12    - name: get storage nodes with enough disk space
    13      set_fact:
    14        storage_node_space: available
    15        free_disk_space_bytes: "{{ item.size_available }}"
    16      when: "{{ item.mount == volume_mount and item.size_available > volume_quota_bytes|float }}"
    17      with_items: "{{ ansible_mounts }}"
    18  
    19    # Groups the nodes into "available_storage_nodes" and "unavailable_storage_nodes"
    20    - name: create group of nodes that have enough disk space
    21      group_by:
    22        key: "{{storage_node_space}}_storage_nodes"
    23  
    24    # Due to 1-node -> 1-disk mapping, we need, at least, replica_count * distribution_count storage nodes
    25    - name: verify we have enough storage nodes for the requested volume
    26      fail:
    27        msg: "Not enough nodes with sufficient disk space for the requested volume. Required: {{ volume_replica_count * volume_distribution_count }} Available: {% if groups['available_storage_nodes'] is defined %}{{groups['available_storage_nodes']|length}}{% else %}0{% endif %}."
    28      run_once: true
    29      when: "groups['available_storage_nodes'] is not defined or groups['available_storage_nodes']|length < volume_replica_count * volume_distribution_count"
    30  
    31    - name: create brick directory
    32      file:
    33        path: "{{ volume_mount }}{{ volume_base_dir }}{{ volume_name }}"
    34        state: directory
    35        mode: "{{ volume_mode }}"
    36  
    37    - name: create gluster volume
    38      # Hack to sort hosts by free disk space
    39      # sort filter is ascending by default. we want descending to get nodes with most free disk space.
    40      command: >
    41        gluster volume create {{ volume_name }}
    42        {% if volume_replica_count|int > 1 %} replica {{ volume_replica_count|int }} {% endif %}
    43        {% set nodes = [] %}
    44        {% for host in groups['available_storage_nodes'] %}
    45        {% if nodes.insert(0, {'inventory_hostname': host, 'free': hostvars[host]['free_disk_space_bytes']}) %}{% endif %}
    46        {% endfor %}
    47        {% for host in nodes|sort(attribute='free', reverse=True) %}
    48        {% if loop.index0 < volume_replica_count|int * volume_distribution_count|int %}
    49        {{ host.inventory_hostname }}:/data/{{ volume_name }}
    50        {% endif %}
    51        {% endfor %}
    52        force
    53      run_once: true
    54  
    55    - name: enable NFS on the gluster volume
    56      command: gluster volume set {{ volume_name }} nfs.disable off
    57      run_once: true
    58  
    59    - name: set allowed IP address whitelist on gluster volume
    60      command: gluster volume set {{ volume_name }} nfs.rpc-auth-allow {{ volume_allow_ips }}
    61      run_once: true
    62      when: volume_allow_ips is defined
    63  
    64    - name: start gluster volume
    65      command: gluster volume start {{ volume_name }}
    66      run_once: true
    67  
    68    - name: set quota on the gluster volume
    69      command: gluster volume quota {{ volume_name }} enable
    70      run_once: true
    71  
    72    - name: gluster volume quota set
    73      command: gluster volume quota {{ volume_name }} limit-usage / {{ volume_quota_gb }}GB
    74      run_once: true
    75  
    76    - name: enable quota statistics on the gluster volume
    77      command: gluster volume set {{ volume_name }} quota-deem-statfs on
    78      run_once: true