github.com/hxx258456/ccgo@v0.0.5-0.20230213014102-48b35f46f66f/grpc/benchmark/client/main.go (about)

     1  /*
     2   *
     3   * Copyright 2017 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  /*
    20  Package main provides a client used for benchmarking.  Before running the
    21  client, the user would need to launch the grpc server.
    22  
    23  To start the server before running the client, you can run look for the command
    24  under the following file:
    25  
    26  	benchmark/server/main.go
    27  
    28  After starting the server, the client can be run.  An example of how to run this
    29  command is:
    30  
    31  go run benchmark/client/main.go -test_name=grpc_test
    32  
    33  If the server is running on a different port than 50051, then use the port flag
    34  for the client to hit the server on the correct port.
    35  An example for how to run this command on a different port can be found here:
    36  
    37  go run benchmark/client/main.go -test_name=grpc_test -port=8080
    38  */
    39  package main
    40  
    41  import (
    42  	"context"
    43  	"flag"
    44  	"fmt"
    45  	"os"
    46  	"runtime"
    47  	"runtime/pprof"
    48  	"sync"
    49  	"time"
    50  
    51  	grpc "github.com/hxx258456/ccgo/grpc"
    52  	"github.com/hxx258456/ccgo/grpc/benchmark"
    53  	"github.com/hxx258456/ccgo/grpc/benchmark/stats"
    54  	"github.com/hxx258456/ccgo/grpc/grpclog"
    55  	"github.com/hxx258456/ccgo/grpc/internal/syscall"
    56  
    57  	testgrpc "github.com/hxx258456/ccgo/grpc/interop/grpc_testing"
    58  	testpb "github.com/hxx258456/ccgo/grpc/interop/grpc_testing"
    59  )
    60  
    61  var (
    62  	port      = flag.String("port", "50051", "Localhost port to connect to.")
    63  	numRPC    = flag.Int("r", 1, "The number of concurrent RPCs on each connection.")
    64  	numConn   = flag.Int("c", 1, "The number of parallel connections.")
    65  	warmupDur = flag.Int("w", 10, "Warm-up duration in seconds")
    66  	duration  = flag.Int("d", 60, "Benchmark duration in seconds")
    67  	rqSize    = flag.Int("req", 1, "Request message size in bytes.")
    68  	rspSize   = flag.Int("resp", 1, "Response message size in bytes.")
    69  	rpcType   = flag.String("rpc_type", "unary",
    70  		`Configure different client rpc type. Valid options are:
    71  		   unary;
    72  		   streaming.`)
    73  	testName = flag.String("test_name", "", "Name of the test used for creating profiles.")
    74  	wg       sync.WaitGroup
    75  	hopts    = stats.HistogramOptions{
    76  		NumBuckets:   2495,
    77  		GrowthFactor: .01,
    78  	}
    79  	mu    sync.Mutex
    80  	hists []*stats.Histogram
    81  
    82  	logger = grpclog.Component("benchmark")
    83  )
    84  
    85  func main() {
    86  	flag.Parse()
    87  	if *testName == "" {
    88  		logger.Fatalf("test_name not set")
    89  	}
    90  	req := &testpb.SimpleRequest{
    91  		ResponseType: testpb.PayloadType_COMPRESSABLE,
    92  		ResponseSize: int32(*rspSize),
    93  		Payload: &testpb.Payload{
    94  			Type: testpb.PayloadType_COMPRESSABLE,
    95  			Body: make([]byte, *rqSize),
    96  		},
    97  	}
    98  	connectCtx, connectCancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
    99  	defer connectCancel()
   100  	ccs := buildConnections(connectCtx)
   101  	warmDeadline := time.Now().Add(time.Duration(*warmupDur) * time.Second)
   102  	endDeadline := warmDeadline.Add(time.Duration(*duration) * time.Second)
   103  	cf, err := os.Create("/tmp/" + *testName + ".cpu")
   104  	if err != nil {
   105  		logger.Fatalf("Error creating file: %v", err)
   106  	}
   107  	defer cf.Close()
   108  	pprof.StartCPUProfile(cf)
   109  	cpuBeg := syscall.GetCPUTime()
   110  	for _, cc := range ccs {
   111  		runWithConn(cc, req, warmDeadline, endDeadline)
   112  	}
   113  	wg.Wait()
   114  	cpu := time.Duration(syscall.GetCPUTime() - cpuBeg)
   115  	pprof.StopCPUProfile()
   116  	mf, err := os.Create("/tmp/" + *testName + ".mem")
   117  	if err != nil {
   118  		logger.Fatalf("Error creating file: %v", err)
   119  	}
   120  	defer mf.Close()
   121  	runtime.GC() // materialize all statistics
   122  	if err := pprof.WriteHeapProfile(mf); err != nil {
   123  		logger.Fatalf("Error writing memory profile: %v", err)
   124  	}
   125  	hist := stats.NewHistogram(hopts)
   126  	for _, h := range hists {
   127  		hist.Merge(h)
   128  	}
   129  	parseHist(hist)
   130  	fmt.Println("Client CPU utilization:", cpu)
   131  	fmt.Println("Client CPU profile:", cf.Name())
   132  	fmt.Println("Client Mem Profile:", mf.Name())
   133  }
   134  
   135  func buildConnections(ctx context.Context) []*grpc.ClientConn {
   136  	ccs := make([]*grpc.ClientConn, *numConn)
   137  	for i := range ccs {
   138  		ccs[i] = benchmark.NewClientConnWithContext(ctx, "localhost:"+*port, grpc.WithInsecure(), grpc.WithBlock())
   139  	}
   140  	return ccs
   141  }
   142  
   143  func runWithConn(cc *grpc.ClientConn, req *testpb.SimpleRequest, warmDeadline, endDeadline time.Time) {
   144  	for i := 0; i < *numRPC; i++ {
   145  		wg.Add(1)
   146  		go func() {
   147  			defer wg.Done()
   148  			caller := makeCaller(cc, req)
   149  			hist := stats.NewHistogram(hopts)
   150  			for {
   151  				start := time.Now()
   152  				if start.After(endDeadline) {
   153  					mu.Lock()
   154  					hists = append(hists, hist)
   155  					mu.Unlock()
   156  					return
   157  				}
   158  				caller()
   159  				elapsed := time.Since(start)
   160  				if start.After(warmDeadline) {
   161  					hist.Add(elapsed.Nanoseconds())
   162  				}
   163  			}
   164  		}()
   165  	}
   166  }
   167  
   168  func makeCaller(cc *grpc.ClientConn, req *testpb.SimpleRequest) func() {
   169  	client := testgrpc.NewBenchmarkServiceClient(cc)
   170  	if *rpcType == "unary" {
   171  		return func() {
   172  			if _, err := client.UnaryCall(context.Background(), req); err != nil {
   173  				logger.Fatalf("RPC failed: %v", err)
   174  			}
   175  		}
   176  	}
   177  	stream, err := client.StreamingCall(context.Background())
   178  	if err != nil {
   179  		logger.Fatalf("RPC failed: %v", err)
   180  	}
   181  	return func() {
   182  		if err := stream.Send(req); err != nil {
   183  			logger.Fatalf("Streaming RPC failed to send: %v", err)
   184  		}
   185  		if _, err := stream.Recv(); err != nil {
   186  			logger.Fatalf("Streaming RPC failed to read: %v", err)
   187  		}
   188  	}
   189  }
   190  
   191  func parseHist(hist *stats.Histogram) {
   192  	fmt.Println("qps:", float64(hist.Count)/float64(*duration))
   193  	fmt.Printf("Latency: (50/90/99 %%ile): %v/%v/%v\n",
   194  		time.Duration(median(.5, hist)),
   195  		time.Duration(median(.9, hist)),
   196  		time.Duration(median(.99, hist)))
   197  }
   198  
   199  func median(percentile float64, h *stats.Histogram) int64 {
   200  	need := int64(float64(h.Count) * percentile)
   201  	have := int64(0)
   202  	for _, bucket := range h.Buckets {
   203  		count := bucket.Count
   204  		if have+count >= need {
   205  			percent := float64(need-have) / float64(count)
   206  			return int64((1.0-percent)*bucket.LowBound + percent*bucket.LowBound*(1.0+hopts.GrowthFactor))
   207  		}
   208  		have += bucket.Count
   209  	}
   210  	panic("should have found a bound")
   211  }