go.ligato.io/vpp-agent/v3@v3.5.0/tests/perf/grpc-perf/main.go (about) 1 // Copyright (c) 2019 Cisco and/or its affiliates. 2 // 3 4 // Licensed under the Apache License, Version 2.0 (the "License"); 5 // you may not use this file except in compliance with the License. 6 // You may obtain a copy of the License at: 7 // 8 // http://www.apache.org/licenses/LICENSE-2.0 9 // 10 // Unless required by applicable law or agreed to in writing, software 11 // distributed under the License is distributed on an "AS IS" BASIS, 12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 // See the License for the specific language governing permissions and 14 // limitations under the License. 15 16 package main 17 18 import ( 19 "context" 20 "fmt" 21 "io" 22 "log" 23 "net" 24 "net/http" 25 "sync" 26 "time" 27 28 grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" 29 "github.com/namsral/flag" 30 "github.com/prometheus/client_golang/prometheus" 31 "github.com/prometheus/client_golang/prometheus/promhttp" 32 "github.com/sirupsen/logrus" 33 "go.ligato.io/cn-infra/v2/agent" 34 "go.ligato.io/cn-infra/v2/infra" 35 "go.ligato.io/cn-infra/v2/logging" 36 "google.golang.org/grpc" 37 "google.golang.org/grpc/credentials/insecure" 38 "google.golang.org/protobuf/encoding/protojson" 39 40 "go.ligato.io/vpp-agent/v3/pkg/version" 41 "go.ligato.io/vpp-agent/v3/proto/ligato/configurator" 42 "go.ligato.io/vpp-agent/v3/proto/ligato/vpp" 43 interfaces "go.ligato.io/vpp-agent/v3/proto/ligato/vpp/interfaces" 44 ipsec "go.ligato.io/vpp-agent/v3/proto/ligato/vpp/ipsec" 45 vpp_l3 "go.ligato.io/vpp-agent/v3/proto/ligato/vpp/l3" 46 ) 47 48 var ( 49 reg = prometheus.NewRegistry() 50 grpcMetrics = grpc_prometheus.NewClientMetrics() 51 perfTestSettings *prometheus.GaugeVec 52 ) 53 54 func init() { 55 flag.Parse() 56 57 grpcMetrics.EnableClientHandlingTimeHistogram() 58 reg.MustRegister(grpcMetrics) 59 perfTestSettings = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 60 Namespace: "ligato", 61 Subsystem: "perf_test", 62 Name: "client_settings", 63 Help: "", 64 ConstLabels: map[string]string{ 65 "num_clients": fmt.Sprint(*numClients), 66 "num_tunnels": fmt.Sprint(*numTunnels), 67 "num_per_req": fmt.Sprint(*numPerRequest), 68 }, 69 }, []string{"start_time"}) 70 reg.MustRegister(perfTestSettings) 71 } 72 73 var ( 74 address = flag.String("address", "127.0.0.1:9111", "address of GRPC server") 75 socketType = flag.String("socket-type", "tcp", "socket type [tcp, tcp4, tcp6, unix, unixpacket]") 76 numClients = flag.Int("clients", 1, "number of concurrent grpc clients") 77 numTunnels = flag.Int("tunnels", 100, "number of tunnels to stress per client") 78 numPerRequest = flag.Int("numperreq", 1, "number of tunnels/routes per grpc request") 79 withIPs = flag.Bool("with-ips", false, "configure IP address for each tunnel on memif at the end") 80 debug = flag.Bool("debug", false, "turn on debug dump") 81 dumpMetrics = flag.Bool("dumpmetrics", false, "Dump metrics before exit.") 82 timeout = flag.Uint("timeout", 300, "timeout for requests (in seconds)") 83 reportProgress = flag.Uint("progress", 20, "percent of progress to report") 84 85 dialTimeout = time.Second * 3 86 reqTimeout = time.Second * 300 87 ) 88 89 func main() { 90 if *debug { 91 logging.DefaultLogger.SetLevel(logging.DebugLevel) 92 } 93 94 perfTestSettings.WithLabelValues(time.Now().Format(time.Stamp)).Set(1) 95 96 go serveMetrics() 97 98 quit := make(chan struct{}) 99 100 ep := NewGRPCStressPlugin() 101 102 ver, rev, date := version.Data() 103 a := agent.NewAgent( 104 agent.AllPlugins(ep), 105 agent.QuitOnClose(quit), 106 agent.Version(ver, date, rev), 107 ) 108 109 if err := a.Start(); err != nil { 110 log.Fatalln(err) 111 } 112 113 ep.setupInitial() 114 ep.runAllClients() 115 116 if err := a.Stop(); err != nil { 117 log.Fatalln(err) 118 } 119 120 if *dumpMetrics { 121 resp, err := http.Get("http://localhost:9094/metrics") 122 if err != nil { 123 log.Fatalln(err) 124 } 125 if b, err := io.ReadAll(resp.Body); err != nil { 126 log.Fatalln(err) 127 } else { 128 fmt.Println("----------------------") 129 fmt.Println("-> CLIENT METRICS") 130 fmt.Println("----------------------") 131 fmt.Print(string(b)) 132 fmt.Println("----------------------") 133 } 134 } 135 136 time.Sleep(time.Second * 5) 137 } 138 139 func serveMetrics() { 140 h := promhttp.HandlerFor(reg, promhttp.HandlerOpts{}) 141 142 // Create a HTTP server for prometheus. 143 httpServer := &http.Server{ 144 Handler: h, 145 Addr: fmt.Sprintf(":%d", 9094), 146 } 147 148 // Start your http server for prometheus. 149 if err := httpServer.ListenAndServe(); err != nil { 150 log.Println("Unable to start a http server.") 151 } 152 } 153 154 // GRPCStressPlugin makes use of the remoteclient to locally CRUD ipsec tunnels and routes. 155 type GRPCStressPlugin struct { 156 infra.PluginName 157 Log *logrus.Logger 158 159 conn *grpc.ClientConn 160 161 wg sync.WaitGroup 162 } 163 164 func NewGRPCStressPlugin() *GRPCStressPlugin { 165 p := &GRPCStressPlugin{} 166 p.SetName("grpc-stress-test-client") 167 p.Log = logrus.New() 168 p.Log.SetFormatter(&logrus.TextFormatter{ 169 ForceColors: true, 170 EnvironmentOverrideColors: true, 171 }) 172 return p 173 } 174 175 func (p *GRPCStressPlugin) Init() error { 176 return nil 177 } 178 func (p *GRPCStressPlugin) Close() error { 179 if p.conn != nil { 180 return p.conn.Close() 181 } 182 return nil 183 } 184 185 // Dialer for unix domain socket 186 func dialer(socket, address string, timeoutVal time.Duration) func(context.Context, string) (net.Conn, error) { 187 return func(ctx context.Context, addr string) (net.Conn, error) { 188 // Dial with timeout 189 return net.DialTimeout(socket, address, timeoutVal) 190 } 191 } 192 193 func (p *GRPCStressPlugin) setupInitial() { 194 conn, err := grpc.Dial("unix", 195 grpc.WithTransportCredentials(insecure.NewCredentials()), 196 grpc.WithContextDialer(dialer(*socketType, *address, dialTimeout)), 197 grpc.WithUnaryInterceptor(grpcMetrics.UnaryClientInterceptor()), 198 grpc.WithStreamInterceptor(grpcMetrics.StreamClientInterceptor()), 199 ) 200 if err != nil { 201 log.Fatal(err) 202 } 203 p.conn = conn 204 205 reqTimeout = time.Second * time.Duration(*timeout) 206 207 client := configurator.NewConfiguratorServiceClient(conn) 208 209 if *debug { 210 p.Log.Infof("Requesting get..") 211 cfg, err := client.Get(context.Background(), &configurator.GetRequest{}) 212 if err != nil { 213 log.Fatalln(err) 214 } 215 out := protojson.Format(cfg) 216 fmt.Printf("Config:\n %+v\n", out) 217 218 p.Log.Infof("Requesting dump..") 219 dump, err := client.Dump(context.Background(), &configurator.DumpRequest{}) 220 if err != nil { 221 log.Fatalln(err) 222 } 223 fmt.Printf("Dump:\n %+v\n", protojson.Format(dump)) 224 } 225 226 time.Sleep(time.Second * 1) 227 228 // create a conn/client to create the red/black interfaces 229 // that each tunnel will reference 230 p.runGRPCCreateRedBlackMemifs(client) 231 } 232 233 // create the initial red and black memif's that kiknos uses ... 234 // ipsec wil ref the red ONLY i guess we dont need the black yet 235 // but maybe there will be a reason 236 func (p *GRPCStressPlugin) runGRPCCreateRedBlackMemifs(client configurator.ConfiguratorServiceClient) { 237 p.Log.Infof("Configuring memif interfaces..") 238 239 memIFRed := &interfaces.Interface{ 240 Name: "red", 241 Type: interfaces.Interface_MEMIF, 242 IpAddresses: []string{"100.0.0.1/24"}, 243 Mtu: 9200, 244 Enabled: true, 245 Link: &interfaces.Interface_Memif{ 246 Memif: &interfaces.MemifLink{ 247 Id: 1, 248 Master: false, 249 SocketFilename: "/var/run/memif_k8s-master.sock", 250 }, 251 }, 252 } 253 memIFBlack := &interfaces.Interface{ 254 Name: "black", 255 Type: interfaces.Interface_MEMIF, 256 IpAddresses: []string{"192.168.20.1/24"}, 257 Mtu: 9200, 258 Enabled: true, 259 Link: &interfaces.Interface_Memif{ 260 Memif: &interfaces.MemifLink{ 261 Id: 2, 262 Master: false, 263 SocketFilename: "/var/run/memif_k8s-master.sock", 264 }, 265 }, 266 } 267 ifaces := []*interfaces.Interface{memIFRed, memIFBlack} 268 269 ctx, cancel := context.WithTimeout(context.Background(), reqTimeout) 270 _, err := client.Update(ctx, &configurator.UpdateRequest{ 271 Update: &configurator.Config{ 272 VppConfig: &vpp.ConfigData{ 273 Interfaces: ifaces, 274 }, 275 }, 276 FullResync: true, 277 }) 278 if err != nil { 279 log.Fatalln(err) 280 } 281 cancel() 282 283 if *debug { 284 p.Log.Infof("Requesting get..") 285 286 cfg, err := client.Get(context.Background(), &configurator.GetRequest{}) 287 if err != nil { 288 log.Fatalln(err) 289 } 290 fmt.Printf("Config:\n %+v\n", protojson.Format(cfg)) 291 } 292 293 } 294 295 func (p *GRPCStressPlugin) runAllClients() { 296 p.Log.Infof("----------------------------------------") 297 p.Log.Infof(" SETTINGS:") 298 p.Log.Infof("----------------------------------------") 299 p.Log.Infof(" -> Clients: %d", *numClients) 300 p.Log.Infof(" -> Requests: %d", *numTunnels) 301 p.Log.Infof(" -> Tunnels per request: %d", *numPerRequest) 302 p.Log.Infof("----------------------------------------") 303 p.Log.Infof("Launching all clients..") 304 305 t := time.Now() 306 307 p.wg.Add(*numClients) 308 for i := 0; i < *numClients; i++ { 309 // Set up connection to the server. 310 /*conn, err := grpc.Dial("unix", 311 grpc.WithInsecure(), 312 grpc.WithDialer(dialer(*socketType, *address, dialTimeout)), 313 ) 314 if err != nil { 315 log.Fatal(err) 316 } 317 p.conns = append(p.conns, conn) 318 client := configurator.NewConfiguratorServiceClient(p.conns[i])*/ 319 320 client := configurator.NewConfiguratorServiceClient(p.conn) 321 322 go p.runGRPCStressCreate(i, client, *numTunnels) 323 } 324 325 p.Log.Debugf("Waiting for clients..") 326 327 p.wg.Wait() 328 329 took := time.Since(t) 330 perSec := float64((*numTunnels)*(*numClients)) / took.Seconds() 331 332 p.Log.Infof("All clients done!") 333 p.Log.Infof("========================================") 334 p.Log.Infof(" RESULTS:") 335 p.Log.Infof("========================================") 336 p.Log.Infof(" Elapsed: %.2f sec", took.Seconds()) 337 p.Log.Infof(" Average: %.1f req/sec", perSec) 338 p.Log.Infof("========================================") 339 340 /*for i := 0; i < *numClients; i++ { 341 if err := p.conns[i].Close(); err != nil { 342 log.Fatal(err) 343 } 344 }*/ 345 346 if *debug { 347 client := configurator.NewConfiguratorServiceClient(p.conn) 348 349 time.Sleep(time.Second * 5) 350 p.Log.Infof("Requesting get..") 351 352 cfg, err := client.Get(context.Background(), &configurator.GetRequest{}) 353 if err != nil { 354 log.Fatalln(err) 355 } 356 fmt.Printf("Config:\n %+v\n", protojson.Format(cfg)) 357 358 time.Sleep(time.Second * 5) 359 p.Log.Infof("Requesting dump..") 360 361 dump, err := client.Dump(context.Background(), &configurator.DumpRequest{}) 362 if err != nil { 363 log.Fatalln(err) 364 } 365 fmt.Printf("Dump:\n %+v\n", protojson.Format(dump)) 366 } 367 368 } 369 370 // runGRPCStressCreate creates 1 tunnel and 1 route ... emulating what strongswan does on a per remote warrior 371 func (p *GRPCStressPlugin) runGRPCStressCreate(clientId int, client configurator.ConfiguratorServiceClient, numTunnels int) { 372 defer p.wg.Done() 373 374 p.Log.Debugf("Creating %d tunnels/routes ... for client %d, ", numTunnels, clientId) 375 376 startTime := time.Now() 377 378 ips := []string{"10.0.0.1/24"} 379 380 report := 0.0 381 lastNumTunnels := 0 382 lastReport := startTime 383 384 for tunNum := 0; tunNum < numTunnels; { 385 if tunNum == numTunnels { 386 break 387 } 388 389 var ifaces []*interfaces.Interface 390 var routes []*vpp_l3.Route 391 392 for req := 0; req < *numPerRequest; req++ { 393 if tunNum == numTunnels { 394 break 395 } 396 397 tunID := clientId*numTunnels + tunNum 398 tunNum++ 399 400 ipsecTunnelName := fmt.Sprintf("ipsec-%d", tunID) 401 402 ipPart0 := 100 + (uint32(tunID)>>16)&0xFF 403 ipPart := gen2octets(uint32(tunID)) 404 localIP := fmt.Sprintf("%d.%s.1", ipPart0, ipPart) 405 remoteIP := fmt.Sprintf("%d.%s.254", ipPart0, ipPart) 406 407 ips = append(ips, localIP+"/24") 408 409 ipsecInfo := &interfaces.Interface_Ipsec{ 410 Ipsec: &interfaces.IPSecLink{ 411 LocalIp: localIP, 412 RemoteIp: remoteIP, 413 LocalSpi: 200000 + uint32(tunID), 414 RemoteSpi: 100000 + uint32(tunID), 415 CryptoAlg: ipsec.CryptoAlg_AES_CBC_256, 416 LocalCryptoKey: "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF", 417 RemoteCryptoKey: "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF", 418 IntegAlg: ipsec.IntegAlg_SHA_512_256, 419 LocalIntegKey: "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF", 420 RemoteIntegKey: "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF", 421 }, 422 } 423 ipsecTunnel := &interfaces.Interface{ 424 Name: ipsecTunnelName, 425 Type: interfaces.Interface_IPSEC_TUNNEL, 426 Enabled: true, 427 Mtu: 9000, 428 Unnumbered: &interfaces.Interface_Unnumbered{ 429 InterfaceWithIp: "red", 430 }, 431 Link: ipsecInfo, 432 } 433 434 route := &vpp_l3.Route{ 435 DstNetwork: "10." + gen3octets(uint32(tunID)) + "/32", 436 NextHopAddr: remoteIP, 437 OutgoingInterface: ipsecTunnelName, 438 } 439 440 // p.Log.Infof("Creating %s ... client: %d, tunNum: %d", ipsecTunnelName, clientId, tunNum) 441 442 ifaces = append(ifaces, ipsecTunnel) 443 routes = append(routes, route) 444 } 445 446 // p.Log.Infof("Creating %d ifaces & %d routes", len(ifaces), len(routes)) 447 448 _, err := client.Update(context.Background(), &configurator.UpdateRequest{ 449 Update: &configurator.Config{ 450 VppConfig: &vpp.ConfigData{ 451 Interfaces: ifaces, 452 Routes: routes, 453 }, 454 }, 455 }) 456 if err != nil { 457 log.Fatalf("Error creating tun/route: clientId/tun=%d/%d, err: %s", clientId, tunNum, err) 458 } 459 460 progress := (float64(tunNum) / float64(numTunnels)) * 100 461 if uint(progress-report) >= *reportProgress { 462 tunNumReport := tunNum - lastNumTunnels 463 464 took := time.Since(lastReport) 465 perSec := float64(tunNumReport) / took.Seconds() 466 467 p.Log.Infof("client #%d - progress % 3.0f%% -> %d tunnels took %.3fs (%.1f tunnels/sec)", 468 clientId, progress, tunNumReport, took.Seconds(), perSec) 469 470 report = progress 471 lastReport = time.Now() 472 lastNumTunnels = tunNum 473 } 474 } 475 476 if *withIPs { 477 p.Log.Infof("updating %d ip addresses on memif", len(ips)) 478 479 memIFRed := &interfaces.Interface{ 480 Name: "red", 481 Type: interfaces.Interface_MEMIF, 482 IpAddresses: ips, 483 Mtu: 9000, 484 Enabled: true, 485 Link: &interfaces.Interface_Memif{ 486 Memif: &interfaces.MemifLink{ 487 Id: 1, 488 Master: false, 489 SocketFilename: "/var/run/memif_k8s-master.sock", 490 }, 491 }, 492 } 493 494 ctx, cancel := context.WithTimeout(context.Background(), reqTimeout) 495 _, err := client.Update(ctx, &configurator.UpdateRequest{ 496 Update: &configurator.Config{ 497 VppConfig: &vpp.ConfigData{ 498 Interfaces: []*interfaces.Interface{memIFRed}, 499 }, 500 }, 501 }) 502 cancel() 503 if err != nil { 504 log.Fatalln(err) 505 } 506 } 507 508 took := time.Since(startTime) 509 perSec := float64(numTunnels) / took.Seconds() 510 511 p.Log.Infof("client #%d done => %d tunnels took %.3fs (%.1f tunnels/sec)", 512 clientId, numTunnels, took.Seconds(), perSec) 513 } 514 515 func gen3octets(num uint32) string { 516 return fmt.Sprintf("%d.%d.%d", (num>>16)&0xFF, (num>>8)&0xFF, (num)&0xFF) 517 } 518 519 func gen2octets(num uint32) string { 520 return fmt.Sprintf("%d.%d", (num>>8)&0xFF, (num)&0xFF) 521 }