github.com/fafucoder/cilium@v1.6.11/tests/09-perf-gce.sh (about) 1 #!/bin/bash 2 3 dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) 4 source "${dir}/helpers.bash" 5 # dir might have been overwritten by helpers.bash 6 dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) 7 8 TEST_NAME=$(get_filename_without_extension $0) 9 LOGS_DIR="${dir}/cilium-files/${TEST_NAME}/logs" 10 redirect_debug_logs ${LOGS_DIR} 11 12 set -ex 13 14 NETPERF_IMAGE="tgraf/nettools" 15 TEST_TIME=30 16 SERVER_LABEL="id.server" 17 CLIENT_LABEL="id.client" 18 SERVER_NAME="server" 19 CLIENT_NAME="client" 20 HEADERS=${HEADERS_OFF:+"-P 0"} 21 22 # Only run these tests if BENCHMARK=1 and GCE=1 has been set 23 if [ -z ${BENCHMARK} ] || [ -z ${GCE} ]; then 24 exit 0 25 fi 26 27 function create_k8s_files { 28 sed -e "s+NETPERF_IMAGE+${NETPERF_IMAGE}+" \ 29 -e "s+CLIENT_NAME+${CLIENT_NAME}+" \ 30 -e "s+CLIENT_LABEL+${CLIENT_LABEL}+" \ 31 ./gce-deployment/client.json.sed > ./gce-deployment/client.json 32 sed -e "s+NETPERF_IMAGE+${NETPERF_IMAGE}+" \ 33 -e "s+SERVER_NAME+${SERVER_NAME}+" \ 34 -e "s+SERVER_LABEL+${SERVER_LABEL}+" \ 35 ./gce-deployment/server.json.sed > ./gce-deployment/server.json 36 } 37 38 create_k8s_files 39 40 function cleanup_k8s { 41 kubectl delete -f ./gce-deployment/client.json || true 42 kubectl delete -f ./gce-deployment/server.json || true 43 } 44 45 trap cleanup_k8s EXIT 46 47 kubectl create -f ./gce-deployment/client.json 48 kubectl create -f ./gce-deployment/server.json 49 50 wait_for_running_pod ${CLIENT_NAME} 51 wait_for_running_pod ${SERVER_NAME} 52 53 echo "Getting Client and Server IPv6, IPv4 and ID from containers" 54 55 server_pod=$(kubectl get pods --output=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "${SERVER_NAME}") 56 client_pod=$(kubectl get pods --output=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "${CLIENT_NAME}") 57 58 server_worker=$(kubectl get pods --output=jsonpath='{range .items[*]}{.metadata.name} {.spec.nodeName}{"\n"}{end}' | grep "${SERVER_NAME}" | cut -d' ' -f2) 59 client_worker=$(kubectl get pods --output=jsonpath='{range .items[*]}{.metadata.name} {.spec.nodeName}{"\n"}{end}' | grep "${CLIENT_NAME}" | cut -d' ' -f2) 60 61 server_cilium=$(kubectl get pods --output=jsonpath='{range .items[*]}{.metadata.name} {.spec.nodeName}{"\n"}{end}' | grep cilium | grep "${server_worker}" | cut -d' ' -f1) 62 client_cilium=$(kubectl get pods --output=jsonpath='{range .items[*]}{.metadata.name} {.spec.nodeName}{"\n"}{end}' | grep cilium | grep "${client_worker}" | cut -d' ' -f1) 63 64 echo "..." 65 66 function cleanup_cilium { 67 cleanup_k8s 68 69 for line in ${server_cilium} ${client_cilium}; do 70 kubectl exec -i ${line} -- cilium config DropNotification=true TraceNotification=true Debug=true 71 done 72 } 73 74 trap cleanup_cilium EXIT 75 76 CLIENT_IP=$(kubectl exec ${client_pod} -- ip -6 a s | grep global | tr -s ' ' | cut -d' ' -f 3 | sed 's:/.*::') 77 CLIENT_IP4=$(kubectl exec ${client_pod} -- ip -4 a s | grep global | tr -s ' ' | cut -d' ' -f 3 | sed 's:/.*::') 78 CLIENT_ID=$(kubectl exec ${client_cilium} -- cilium endpoint list | grep $CLIENT_LABEL | awk '{ print $1}') 79 SERVER_IP=$(kubectl exec ${server_pod} -- ip -6 a s | grep global | tr -s ' ' | cut -d' ' -f 3 | sed 's:/.*::') 80 SERVER_IP4=$(kubectl exec ${server_pod} -- ip -4 a s | grep global | tr -s ' ' | cut -d' ' -f 3 | sed 's:/.*::') 81 SERVER_ID=$(kubectl exec ${server_cilium} -- cilium endpoint list | grep $SERVER_LABEL | awk '{ print $1}') 82 83 HOST_IP=$(echo $SERVER_IP | sed -e 's/:[0-9a-f]\{4\}$/:ffff/') 84 SERVER_DEV=$(kubectl exec ${server_cilium} -- cilium endpoint get $SERVER_ID | grep interface-name | awk '{print $2}' | sed 's/"//g' | sed 's/,$//') 85 NODE_MAC=$(kubectl exec ${server_cilium} -- cilium endpoint get $SERVER_ID | grep host-mac | awk '{print $2}' | sed 's/"//g' | sed 's/,$//') 86 LXC_MAC=$(kubectl exec ${server_cilium} -- cilium endpoint get $SERVER_ID | grep mac | awk '{print $2}' | sed 's/"//g' | sed 's/,$//') 87 88 echo "... Done" 89 90 cat <<EOF | kubectl exec -i "${server_cilium}" -- cilium -D policy import - 91 [{ 92 "endpointSelector": {"matchLabels":{"k8s:${SERVER_LABEL}":""}}, 93 "ingress": [{ 94 "fromEndpoints": [ 95 {"matchLabels":{"k8s:${CLIENT_LABEL}":""}} 96 ] 97 }] 98 }] 99 EOF 100 101 function perf_test() { 102 kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t TCP_STREAM -H $SERVER_IP || { 103 abort "Error: Unable to reach netperf TCP endpoint" 104 } 105 106 if [ $SERVER_IP4 ]; then 107 kubectl exec ${client_pod} -- netperf -4 $HEADERS -l $TEST_TIME -t TCP_STREAM -H $SERVER_IP4 || { 108 abort "Error: Unable to reach netperf TCP endpoint" 109 } 110 fi 111 112 kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t TCP_SENDFILE -H $SERVER_IP || { 113 abort "Error: Unable to reach netperf TCP endpoint" 114 } 115 116 if [ $SERVER_IP4 ]; then 117 kubectl exec ${client_pod} -- netperf -4 $HEADERS -l $TEST_TIME -t TCP_SENDFILE -H $SERVER_IP4 || { 118 abort "Error: Unable to reach netperf TCP endpoint" 119 } 120 fi 121 122 kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t UDP_STREAM -H $SERVER_IP -- -R1 || { 123 abort "Error: Unable to reach netperf UDP endpoint" 124 } 125 126 if [ $SERVER_IP4 ]; then 127 kubectl exec ${client_pod} -- netperf -4 $HEADERS -l $TEST_TIME -t UDP_STREAM -H $SERVER_IP4 -- -R1 || { 128 abort "Error: Unable to reach netperf UDP endpoint" 129 } 130 fi 131 132 kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t TCP_SENDFILE -H $SERVER_IP -- -m 256 || { 133 abort "Error: Unable to reach netperf TCP endpoint" 134 } 135 136 kubectl exec ${client_pod} -- super_netperf 8 -6 -l $TEST_TIME -t TCP_SENDFILE -H $SERVER_IP || { 137 abort "Error: Unable to reach netperf TCP endpoint" 138 } 139 140 if [ $SERVER_IP4 ]; then 141 kubectl exec ${client_pod} -- super_netperf 8 -4 -l $TEST_TIME -t TCP_SENDFILE -H $SERVER_IP4 || { 142 abort "Error: Unable to reach netperf TCP endpoint" 143 } 144 fi 145 146 kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t TCP_RR -H $SERVER_IP || { 147 abort "Error: Unable to reach netperf TCP endpoint" 148 } 149 150 if [ $SERVER_IP4 ]; then 151 kubectl exec ${client_pod} -- netperf -4 $HEADERS -l $TEST_TIME -t TCP_RR -H $SERVER_IP4 || { 152 abort "Error: Unable to reach netperf TCP endpoint" 153 } 154 fi 155 156 # FIXME 157 # kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t TCP_CRR -H $SERVER_IP || { 158 # abort "Error: Unable to reach netperf TCP endpoint" 159 # } 160 # 161 # if [ $SERVER_IP4 ]; then 162 # kubectl exec ${client_pod} -- netperf -4 $HEADERS -l $TEST_TIME -t TCP_CRR -H $SERVER_IP4 || { 163 # abort "Error: Unable to reach netperf TCP endpoint" 164 # } 165 # fi 166 167 kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t UDP_RR -H $SERVER_IP -- -R1 || { 168 abort "Error: Unable to reach netperf UDP endpoint" 169 } 170 171 if [ $SERVER_IP4 ]; then 172 kubectl exec ${client_pod} -- netperf -4 $HEADERS -l $TEST_TIME -t UDP_RR -H $SERVER_IP4 -- -R1 || { 173 abort "Error: Unable to reach netperf UDP endpoint" 174 } 175 fi 176 } 177 178 kubectl exec ${server_cilium} -- cilium config DropNotification=false TraceNotification=false Debug=false 179 kubectl exec ${client_cilium} -- cilium config DropNotification=false TraceNotification=false Debug=false 180 kubectl exec ${server_cilium} -- cilium endpoint config $SERVER_ID DropNotification=false TraceNotification=false Debug=false 181 kubectl exec ${client_cilium} -- cilium endpoint config $CLIENT_ID DropNotification=false TraceNotification=false Debug=false 182 perf_test 183 184 kubectl exec ${server_cilium} -- cilium endpoint config $SERVER_ID ConntrackAccounting=false 185 kubectl exec ${client_cilium} -- cilium endpoint config $CLIENT_ID ConntrackAccounting=false 186 perf_test 187 188 # FIXME 189 echo "Conntrack=false test won't be run!" 190 #kubectl exec ${server_cilium} -- cilium endpoint config $SERVER_ID Conntrack=false 191 #kubectl exec ${client_cilium} -- cilium endpoint config $CLIENT_ID Conntrack=false 192 #perf_test 193 194 kubectl exec ${server_cilium} -- cilium endpoint config $SERVER_ID IngressPolicy=false 195 kubectl exec ${server_cilium} -- cilium endpoint config $SERVER_ID EgressPolicy=false 196 kubectl exec ${client_cilium} -- cilium endpoint config $CLIENT_ID IngressPolicy=false 197 kubectl exec ${client_cilium} -- cilium endpoint config $CLIENT_ID EgressPolicy=false 198 perf_test 199 200 kubectl exec ${server_cilium} -- cilium policy delete "${SERVER_LABEL}"