github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/test/benchmarks/tcp/tcp_benchmark.sh (about) 1 #!/bin/bash 2 3 # Copyright 2018 The gVisor Authors. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 # TCP benchmark; see README.md for documentation. 18 19 # Fixed parameters. 20 iperf_port=45201 # Not likely to be privileged. 21 proxy_port=44000 # Ditto. 22 client_addr=10.0.0.1 23 client_proxy_addr=10.0.0.2 24 server_proxy_addr=10.0.0.3 25 server_addr=10.0.0.4 26 mask=8 27 28 # Defaults; this provides a reasonable approximation of a decent internet link. 29 # Parameters can be varied independently from this set to see response to 30 # various changes in the kind of link available. 31 client=false 32 server=false 33 verbose=false 34 gso=0 35 swgso=false 36 mtu=1280 # 1280 is a reasonable lowest-common-denominator. 37 latency=10 # 10ms approximates a fast, dedicated connection. 38 latency_variation=1 # +/- 1ms is a relatively low amount of jitter. 39 loss=0.1 # 0.1% loss is non-zero, but not extremely high. 40 duplicate=0.1 # 0.1% means duplicates are 1/10x as frequent as losses. 41 duration=30 # 30s is enough time to consistent results (experimentally). 42 helper_dir=$(dirname $0) 43 netstack_opts= 44 disable_linux_gso= 45 num_client_threads=1 46 47 # Check for netem support. 48 lsmod_output=$(lsmod | grep sch_netem) 49 if [ "$?" != "0" ]; then 50 echo "warning: sch_netem may not be installed." >&2 51 fi 52 53 while [ $# -gt 0 ]; do 54 case "$1" in 55 --client) 56 client=true 57 ;; 58 --client_tcp_probe_file) 59 shift 60 netstack_opts="${netstack_opts} -client_tcp_probe_file=$1" 61 ;; 62 --server) 63 server=true 64 ;; 65 --verbose) 66 verbose=true 67 ;; 68 --gso) 69 shift 70 gso=$1 71 ;; 72 --swgso) 73 swgso=true 74 ;; 75 --server_tcp_probe_file) 76 shift 77 netstack_opts="${netstack_opts} -server_tcp_probe_file=$1" 78 ;; 79 --ideal) 80 mtu=1500 # Standard ethernet. 81 latency=0 # No latency. 82 latency_variation=0 # No jitter. 83 loss=0 # No loss. 84 duplicate=0 # No duplicates. 85 ;; 86 --mtu) 87 shift 88 [ "$#" -le 0 ] && echo "no mtu provided" && exit 1 89 mtu=$1 90 ;; 91 --sack) 92 netstack_opts="${netstack_opts} -sack" 93 ;; 94 --rack) 95 netstack_opts="${netstack_opts} -rack" 96 ;; 97 --cubic) 98 netstack_opts="${netstack_opts} -cubic" 99 ;; 100 --moderate-recv-buf) 101 netstack_opts="${netstack_opts} -moderate_recv_buf" 102 ;; 103 --duration) 104 shift 105 [ "$#" -le 0 ] && echo "no duration provided" && exit 1 106 duration=$1 107 ;; 108 --latency) 109 shift 110 [ "$#" -le 0 ] && echo "no latency provided" && exit 1 111 latency=$1 112 ;; 113 --latency-variation) 114 shift 115 [ "$#" -le 0 ] && echo "no latency variation provided" && exit 1 116 latency_variation=$1 117 ;; 118 --loss) 119 shift 120 [ "$#" -le 0 ] && echo "no loss probability provided" && exit 1 121 loss=$1 122 ;; 123 --duplicate) 124 shift 125 [ "$#" -le 0 ] && echo "no duplicate provided" && exit 1 126 duplicate=$1 127 ;; 128 --cpuprofile) 129 shift 130 netstack_opts="${netstack_opts} -cpuprofile=$1" 131 ;; 132 --memprofile) 133 shift 134 netstack_opts="${netstack_opts} -memprofile=$1" 135 ;; 136 --disable-linux-gso) 137 disable_linux_gso=1 138 ;; 139 --num-client-threads) 140 shift 141 num_client_threads=$1 142 ;; 143 --helpers) 144 shift 145 [ "$#" -le 0 ] && echo "no helper dir provided" && exit 1 146 helper_dir=$1 147 ;; 148 *) 149 echo "usage: $0 [options]" 150 echo "options:" 151 echo " --help show this message" 152 echo " --verbose verbose output" 153 echo " --client use netstack as the client" 154 echo " --ideal reset all network emulation" 155 echo " --server use netstack as the server" 156 echo " --mtu set the mtu (bytes)" 157 echo " --sack enable SACK support" 158 echo " --rack enable RACK support" 159 echo " --moderate-recv-buf enable TCP receive buffer auto-tuning" 160 echo " --cubic enable CUBIC congestion control for Netstack" 161 echo " --duration set the test duration (s)" 162 echo " --latency set the latency (ms)" 163 echo " --latency-variation set the latency variation" 164 echo " --loss set the loss probability (%)" 165 echo " --duplicate set the duplicate probability (%)" 166 echo " --helpers set the helper directory" 167 echo " --num-client-threads number of parallel client threads to run" 168 echo " --disable-linux-gso disable segmentation offload in the Linux network stack" 169 echo "" 170 echo "The output will of the script will be:" 171 echo " <throughput> <client-cpu-usage> <server-cpu-usage>" 172 exit 1 173 esac 174 shift 175 done 176 177 if [ ${verbose} == "true" ]; then 178 set -x 179 fi 180 181 # Latency needs to be halved, since it's applied on both ways. 182 half_latency=$(echo ${latency}/2 | bc -l | awk '{printf "%1.2f", $0}') 183 half_loss=$(echo ${loss}/2 | bc -l | awk '{printf "%1.6f", $0}') 184 half_duplicate=$(echo ${duplicate}/2 | bc -l | awk '{printf "%1.6f", $0}') 185 helper_dir=${helper_dir#$(pwd)/} # Use relative paths. 186 proxy_binary=${helper_dir}/tcp_proxy 187 nsjoin_binary=${helper_dir}/nsjoin 188 189 if [ ! -e ${proxy_binary} ]; then 190 echo "Could not locate ${proxy_binary}, please make sure you've built the binary" 191 exit 1 192 fi 193 194 if [ ! -e ${nsjoin_binary} ]; then 195 echo "Could not locate ${nsjoin_binary}, please make sure you've built the binary" 196 exit 1 197 fi 198 199 if [ $(echo ${latency_variation} | awk '{printf "%1.2f", $0}') != "0.00" ]; then 200 # As long as there's some jitter, then we use the paretonormal distribution. 201 # This will preserve the minimum RTT, but add a realistic amount of jitter to 202 # the connection and cause re-ordering, etc. The regular pareto distribution 203 # appears to an unreasonable level of delay (we want only small spikes.) 204 distribution="distribution paretonormal" 205 else 206 distribution="" 207 fi 208 209 # Client proxy that will listen on the client's iperf target forward traffic 210 # using the host networking stack. 211 client_args="${proxy_binary} -port ${proxy_port} -forward ${server_proxy_addr}:${proxy_port}" 212 if ${client}; then 213 # Client proxy that will listen on the client's iperf target 214 # and forward traffic using netstack. 215 client_args="${proxy_binary} ${netstack_opts} -port ${proxy_port} -client \\ 216 -mtu ${mtu} -iface client.0 -addr ${client_proxy_addr} -mask ${mask} \\ 217 -forward ${server_proxy_addr}:${proxy_port} -gso=${gso} -swgso=${swgso}" 218 fi 219 220 # Server proxy that will listen on the proxy port and forward to the server's 221 # iperf server using the host networking stack. 222 server_args="${proxy_binary} -port ${proxy_port} -forward ${server_addr}:${iperf_port}" 223 if ${server}; then 224 # Server proxy that will listen on the proxy port and forward to the servers' 225 # iperf server using netstack. 226 server_args="${proxy_binary} ${netstack_opts} -port ${proxy_port} -server \\ 227 -mtu ${mtu} -iface server.0 -addr ${server_proxy_addr} -mask ${mask} \\ 228 -forward ${server_addr}:${iperf_port} -gso=${gso} -swgso=${swgso}" 229 fi 230 231 # Specify loss and duplicate parameters only if they are non-zero 232 loss_opt="" 233 if [ "$(echo $half_loss | bc -q)" != "0" ]; then 234 loss_opt="loss random ${half_loss}%" 235 fi 236 duplicate_opt="" 237 if [ "$(echo $half_duplicate | bc -q)" != "0" ]; then 238 duplicate_opt="duplicate ${half_duplicate}%" 239 fi 240 241 exec unshare -U -m -n -r -f -p --mount-proc /bin/bash << EOF 242 set -e -m 243 244 if [ ${verbose} == "true" ]; then 245 set -x 246 fi 247 248 mount -t tmpfs netstack-bench /tmp 249 250 # We may have reset the path in the unshare if the shell loaded some public 251 # profiles. Ensure that tools are discoverable via the parent's PATH. 252 export PATH=${PATH} 253 254 # Add client, server interfaces. 255 ip link add client.0 type veth peer name client.1 256 ip link add server.0 type veth peer name server.1 257 258 # Add network emulation devices. 259 ip link add wan.0 type veth peer name wan.1 260 ip link set wan.0 up 261 ip link set wan.1 up 262 263 # Enroll on the bridge. 264 ip link add name br0 type bridge 265 ip link add name br1 type bridge 266 ip link set client.1 master br0 267 ip link set server.1 master br1 268 ip link set wan.0 master br0 269 ip link set wan.1 master br1 270 ip link set br0 up 271 ip link set br1 up 272 273 # Set the MTU appropriately. 274 ip link set client.0 mtu ${mtu} 275 ip link set server.0 mtu ${mtu} 276 ip link set wan.0 mtu ${mtu} 277 ip link set wan.1 mtu ${mtu} 278 279 # Add appropriate latency, loss and duplication. 280 # 281 # This is added in at the point of bridge connection. 282 for device in wan.0 wan.1; do 283 # NOTE: We don't support a loss correlation as testing has shown that it 284 # actually doesn't work. The man page actually has a small comment about this 285 # "It is also possible to add a correlation, but this option is now deprecated 286 # due to the noticed bad behavior." For more information see netem(8). 287 tc qdisc add dev \$device root netem \\ 288 delay ${half_latency}ms ${latency_variation}ms ${distribution} \\ 289 ${loss_opt} ${duplicate_opt} 290 done 291 292 # Start a client proxy. 293 touch /tmp/client.netns 294 unshare -n mount --bind /proc/self/ns/net /tmp/client.netns 295 296 # Move the endpoint into the namespace. 297 while ip link | grep client.0 > /dev/null; do 298 ip link set dev client.0 netns /tmp/client.netns 299 done 300 301 if ! ${client}; then 302 # Only add the address to NIC if netstack is not in use. Otherwise the host 303 # will also process the inbound SYN and send a RST back. 304 ${nsjoin_binary} /tmp/client.netns ip addr add ${client_proxy_addr}/${mask} dev client.0 305 fi 306 307 # Start a server proxy. 308 touch /tmp/server.netns 309 unshare -n mount --bind /proc/self/ns/net /tmp/server.netns 310 # Move the endpoint into the namespace. 311 while ip link | grep server.0 > /dev/null; do 312 ip link set dev server.0 netns /tmp/server.netns 313 done 314 if ! ${server}; then 315 # Only add the address to NIC if netstack is not in use. Otherwise the host 316 # will also process the inbound SYN and send a RST back. 317 ${nsjoin_binary} /tmp/server.netns ip addr add ${server_proxy_addr}/${mask} dev server.0 318 fi 319 320 # Add client and server addresses, and bring everything up. 321 ${nsjoin_binary} /tmp/client.netns ip addr add ${client_addr}/${mask} dev client.0 322 ${nsjoin_binary} /tmp/server.netns ip addr add ${server_addr}/${mask} dev server.0 323 if [ "${disable_linux_gso}" == "1" ]; then 324 ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 tso off 325 ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 gro off 326 ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 gso off 327 ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 tso off 328 ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 gso off 329 ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 gro off 330 fi 331 ${nsjoin_binary} /tmp/client.netns ip link set client.0 up 332 ${nsjoin_binary} /tmp/client.netns ip link set lo up 333 ${nsjoin_binary} /tmp/server.netns ip link set server.0 up 334 ${nsjoin_binary} /tmp/server.netns ip link set lo up 335 ip link set dev client.1 up 336 ip link set dev server.1 up 337 338 ${nsjoin_binary} /tmp/client.netns ${client_args} & 339 client_pid=\$! 340 ${nsjoin_binary} /tmp/server.netns ${server_args} & 341 server_pid=\$! 342 343 # Start the iperf server. 344 ${nsjoin_binary} /tmp/server.netns iperf -p ${iperf_port} -s >&2 & 345 iperf_pid=\$! 346 347 # Show traffic information. 348 if ! ${client} && ! ${server}; then 349 ${nsjoin_binary} /tmp/client.netns ping -c 100 -i 0.001 -W 1 ${server_addr} >&2 || true 350 fi 351 352 results_file=\$(mktemp) 353 function cleanup { 354 rm -f \$results_file 355 kill -TERM \$client_pid 356 kill -TERM \$server_pid 357 wait \$client_pid 358 wait \$server_pid 359 kill -9 \$iperf_pid 2>/dev/null 360 } 361 362 # Allow failure from this point. 363 set +e 364 trap cleanup EXIT 365 366 # Run the benchmark, recording the results file. 367 while ${nsjoin_binary} /tmp/client.netns iperf \\ 368 -p ${proxy_port} -c ${client_addr} -t ${duration} -f m -P ${num_client_threads} 2>&1 \\ 369 | tee \$results_file \\ 370 | grep "connect failed" >/dev/null; do 371 sleep 0.1 # Wait for all services. 372 done 373 374 # Unlink all relevant devices from the bridge. This is because when the bridge 375 # is deleted, the kernel may hang. It appears that this problem is fixed in 376 # upstream commit 1ce5cce895309862d2c35d922816adebe094fe4a. 377 ip link set client.1 nomaster 378 ip link set server.1 nomaster 379 ip link set wan.0 nomaster 380 ip link set wan.1 nomaster 381 382 # Emit raw results. 383 cat \$results_file >&2 384 385 # Emit a useful result (final throughput). 386 mbits=\$(grep Mbits/sec \$results_file \\ 387 | sed -n -e 's/^.*[[:space:]]\\([[:digit:]]\\+\\(\\.[[:digit:]]\\+\\)\\?\\)[[:space:]]*Mbits\\/sec.*/\\1/p') 388 client_cpu_ticks=\$(cat /proc/\$client_pid/stat \\ 389 | awk '{print (\$14+\$15);}') 390 server_cpu_ticks=\$(cat /proc/\$server_pid/stat \\ 391 | awk '{print (\$14+\$15);}') 392 ticks_per_sec=\$(getconf CLK_TCK) 393 client_cpu_load=\$(bc -l <<< \$client_cpu_ticks/\$ticks_per_sec/${duration}) 394 server_cpu_load=\$(bc -l <<< \$server_cpu_ticks/\$ticks_per_sec/${duration}) 395 echo \$mbits \$client_cpu_load \$server_cpu_load 396 EOF