storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/docs/deployment/kernel-tuning/sysctl.sh (about)

     1  #!/bin/bash
     2  
     3  cat > sysctl.conf <<EOF
     4  # maximum number of open files/file descriptors
     5  fs.file-max = 4194303
     6  
     7  # use as little swap space as possible
     8  vm.swappiness = 1
     9  
    10  # prioritize application RAM against disk/swap cache
    11  vm.vfs_cache_pressure = 50
    12  
    13  # minimum free memory
    14  vm.min_free_kbytes = 1000000
    15  
    16  # follow mellanox best practices https://community.mellanox.com/s/article/linux-sysctl-tuning
    17  # the following changes are recommended for improving IPv4 traffic performance by Mellanox
    18  
    19  # disable the TCP timestamps option for better CPU utilization
    20  net.ipv4.tcp_timestamps = 0
    21  
    22  # enable the TCP selective acks option for better throughput
    23  net.ipv4.tcp_sack = 1
    24  
    25  # increase the maximum length of processor input queues
    26  net.core.netdev_max_backlog = 250000
    27  
    28  # increase the TCP maximum and default buffer sizes using setsockopt()
    29  net.core.rmem_max = 4194304
    30  net.core.wmem_max = 4194304
    31  net.core.rmem_default = 4194304
    32  net.core.wmem_default = 4194304
    33  net.core.optmem_max = 4194304
    34  
    35  # increase memory thresholds to prevent packet dropping:
    36  net.ipv4.tcp_rmem = 4096 87380 4194304
    37  net.ipv4.tcp_wmem = 4096 65536 4194304
    38  
    39  # enable low latency mode for TCP:
    40  net.ipv4.tcp_low_latency = 1
    41  
    42  # the following variable is used to tell the kernel how much of the socket buffer
    43  # space should be used for TCP window size, and how much to save for an application
    44  # buffer. A value of 1 means the socket buffer will be divided evenly between.
    45  # TCP windows size and application.
    46  net.ipv4.tcp_adv_win_scale = 1
    47  
    48  # maximum number of incoming connections
    49  net.core.somaxconn = 65535
    50  
    51  # maximum number of packets queued
    52  net.core.netdev_max_backlog = 10000
    53  
    54  # queue length of completely established sockets waiting for accept
    55  net.ipv4.tcp_max_syn_backlog = 4096
    56  
    57  # time to wait (seconds) for FIN packet
    58  net.ipv4.tcp_fin_timeout = 15
    59  
    60  # disable icmp send redirects
    61  net.ipv4.conf.all.send_redirects = 0
    62  
    63  # disable icmp accept redirect
    64  net.ipv4.conf.all.accept_redirects = 0
    65  
    66  # drop packets with LSR or SSR
    67  net.ipv4.conf.all.accept_source_route = 0
    68  
    69  # MTU discovery, only enable when ICMP blackhole detected
    70  net.ipv4.tcp_mtu_probing = 1
    71  
    72  EOF
    73  
    74  echo "Enabling system level tuning params"
    75  sysctl --quiet --load sysctl.conf && rm -f sysctl.conf
    76  
    77  # `Transparent Hugepage Support`*: This is a Linux kernel feature intended to improve
    78  # performance by making more efficient use of processor’s memory-mapping hardware.
    79  # But this may cause https://blogs.oracle.com/linux/performance-issues-with-transparent-huge-pages-thp
    80  # for non-optimized applications. As most Linux distributions set it to `enabled=always` by default,
    81  # we recommend changing this to `enabled=madvise`. This will allow applications optimized
    82  # for transparent hugepages to obtain the performance benefits, while preventing the
    83  # associated problems otherwise. Also, set `transparent_hugepage=madvise` on your kernel
    84  # command line (e.g. in /etc/default/grub) to persistently set this value.
    85  
    86  echo "Enabling THP madvise"
    87  echo madvise | sudo tee /sys/kernel/mm/transparent_hugepage/enabled