gitee.com/ks-custle/core-gm@v0.0.0-20230922171213-b83bdd97b62c/grpc/benchmark/worker/benchmark_client.go (about)

     1  /*
     2   *
     3   * Copyright 2016 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package main
    20  
    21  import (
    22  	"context"
    23  	"flag"
    24  	"math"
    25  	"runtime"
    26  	"sync"
    27  	"time"
    28  
    29  	grpc "gitee.com/ks-custle/core-gm/grpc"
    30  	"gitee.com/ks-custle/core-gm/grpc/benchmark"
    31  	"gitee.com/ks-custle/core-gm/grpc/benchmark/stats"
    32  	"gitee.com/ks-custle/core-gm/grpc/codes"
    33  	"gitee.com/ks-custle/core-gm/grpc/credentials"
    34  	"gitee.com/ks-custle/core-gm/grpc/internal/syscall"
    35  	"gitee.com/ks-custle/core-gm/grpc/status"
    36  	"gitee.com/ks-custle/core-gm/grpc/testdata"
    37  
    38  	testgrpc "gitee.com/ks-custle/core-gm/grpc/interop/grpc_testing"
    39  	testpb "gitee.com/ks-custle/core-gm/grpc/interop/grpc_testing"
    40  )
    41  
    42  var caFile = flag.String("ca_file", "", "The file containing the CA root cert file")
    43  
    44  type lockingHistogram struct {
    45  	mu        sync.Mutex
    46  	histogram *stats.Histogram
    47  }
    48  
    49  func (h *lockingHistogram) add(value int64) {
    50  	h.mu.Lock()
    51  	defer h.mu.Unlock()
    52  	h.histogram.Add(value)
    53  }
    54  
    55  // swap sets h.histogram to o and returns its old value.
    56  func (h *lockingHistogram) swap(o *stats.Histogram) *stats.Histogram {
    57  	h.mu.Lock()
    58  	defer h.mu.Unlock()
    59  	old := h.histogram
    60  	h.histogram = o
    61  	return old
    62  }
    63  
    64  func (h *lockingHistogram) mergeInto(merged *stats.Histogram) {
    65  	h.mu.Lock()
    66  	defer h.mu.Unlock()
    67  	merged.Merge(h.histogram)
    68  }
    69  
    70  type benchmarkClient struct {
    71  	closeConns        func()
    72  	stop              chan bool
    73  	lastResetTime     time.Time
    74  	histogramOptions  stats.HistogramOptions
    75  	lockingHistograms []lockingHistogram
    76  	rusageLastReset   *syscall.Rusage
    77  }
    78  
    79  func printClientConfig(config *testpb.ClientConfig) {
    80  	// Some config options are ignored:
    81  	// - client type:
    82  	//     will always create sync client
    83  	// - async client threads.
    84  	// - core list
    85  	logger.Infof(" * client type: %v (ignored, always creates sync client)", config.ClientType)
    86  	logger.Infof(" * async client threads: %v (ignored)", config.AsyncClientThreads)
    87  	// TODO: use cores specified by CoreList when setting list of cores is supported in go.
    88  	logger.Infof(" * core list: %v (ignored)", config.CoreList)
    89  
    90  	logger.Infof(" - security params: %v", config.SecurityParams)
    91  	logger.Infof(" - core limit: %v", config.CoreLimit)
    92  	logger.Infof(" - payload config: %v", config.PayloadConfig)
    93  	logger.Infof(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel)
    94  	logger.Infof(" - channel number: %v", config.ClientChannels)
    95  	logger.Infof(" - load params: %v", config.LoadParams)
    96  	logger.Infof(" - rpc type: %v", config.RpcType)
    97  	logger.Infof(" - histogram params: %v", config.HistogramParams)
    98  	logger.Infof(" - server targets: %v", config.ServerTargets)
    99  }
   100  
   101  func setupClientEnv(config *testpb.ClientConfig) {
   102  	// Use all cpu cores available on machine by default.
   103  	// TODO: Revisit this for the optimal default setup.
   104  	if config.CoreLimit > 0 {
   105  		runtime.GOMAXPROCS(int(config.CoreLimit))
   106  	} else {
   107  		runtime.GOMAXPROCS(runtime.NumCPU())
   108  	}
   109  }
   110  
   111  // createConns creates connections according to given config.
   112  // It returns the connections and corresponding function to close them.
   113  // It returns non-nil error if there is anything wrong.
   114  func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error) {
   115  	var opts []grpc.DialOption
   116  
   117  	// Sanity check for client type.
   118  	switch config.ClientType {
   119  	case testpb.ClientType_SYNC_CLIENT:
   120  	case testpb.ClientType_ASYNC_CLIENT:
   121  	default:
   122  		return nil, nil, status.Errorf(codes.InvalidArgument, "unknown client type: %v", config.ClientType)
   123  	}
   124  
   125  	// Check and set security options.
   126  	if config.SecurityParams != nil {
   127  		if *caFile == "" {
   128  			*caFile = testdata.Path("ca.pem")
   129  		}
   130  		creds, err := credentials.NewClientTLSFromFile(*caFile, config.SecurityParams.ServerHostOverride)
   131  		if err != nil {
   132  			return nil, nil, status.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err)
   133  		}
   134  		opts = append(opts, grpc.WithTransportCredentials(creds))
   135  	} else {
   136  		opts = append(opts, grpc.WithInsecure())
   137  	}
   138  
   139  	// Use byteBufCodec if it is required.
   140  	if config.PayloadConfig != nil {
   141  		switch config.PayloadConfig.Payload.(type) {
   142  		case *testpb.PayloadConfig_BytebufParams:
   143  			opts = append(opts, grpc.WithDefaultCallOptions(grpc.CallCustomCodec(byteBufCodec{})))
   144  		case *testpb.PayloadConfig_SimpleParams:
   145  		default:
   146  			return nil, nil, status.Errorf(codes.InvalidArgument, "unknown payload config: %v", config.PayloadConfig)
   147  		}
   148  	}
   149  
   150  	// Create connections.
   151  	connCount := int(config.ClientChannels)
   152  	conns := make([]*grpc.ClientConn, connCount)
   153  	for connIndex := 0; connIndex < connCount; connIndex++ {
   154  		conns[connIndex] = benchmark.NewClientConn(config.ServerTargets[connIndex%len(config.ServerTargets)], opts...)
   155  	}
   156  
   157  	return conns, func() {
   158  		for _, conn := range conns {
   159  			conn.Close()
   160  		}
   161  	}, nil
   162  }
   163  
   164  func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benchmarkClient) error {
   165  	// Read payload size and type from config.
   166  	var (
   167  		payloadReqSize, payloadRespSize int
   168  		payloadType                     string
   169  	)
   170  	if config.PayloadConfig != nil {
   171  		switch c := config.PayloadConfig.Payload.(type) {
   172  		case *testpb.PayloadConfig_BytebufParams:
   173  			payloadReqSize = int(c.BytebufParams.ReqSize)
   174  			payloadRespSize = int(c.BytebufParams.RespSize)
   175  			payloadType = "bytebuf"
   176  		case *testpb.PayloadConfig_SimpleParams:
   177  			payloadReqSize = int(c.SimpleParams.ReqSize)
   178  			payloadRespSize = int(c.SimpleParams.RespSize)
   179  			payloadType = "protobuf"
   180  		default:
   181  			return status.Errorf(codes.InvalidArgument, "unknown payload config: %v", config.PayloadConfig)
   182  		}
   183  	}
   184  
   185  	// TODO add open loop distribution.
   186  	switch config.LoadParams.Load.(type) {
   187  	case *testpb.LoadParams_ClosedLoop:
   188  	case *testpb.LoadParams_Poisson:
   189  		return status.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams)
   190  	default:
   191  		return status.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams)
   192  	}
   193  
   194  	rpcCountPerConn := int(config.OutstandingRpcsPerChannel)
   195  
   196  	switch config.RpcType {
   197  	case testpb.RpcType_UNARY:
   198  		bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize)
   199  		// TODO open loop.
   200  	case testpb.RpcType_STREAMING:
   201  		bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType)
   202  		// TODO open loop.
   203  	default:
   204  		return status.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType)
   205  	}
   206  
   207  	return nil
   208  }
   209  
   210  func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) {
   211  	printClientConfig(config)
   212  
   213  	// Set running environment like how many cores to use.
   214  	setupClientEnv(config)
   215  
   216  	conns, closeConns, err := createConns(config)
   217  	if err != nil {
   218  		return nil, err
   219  	}
   220  
   221  	rpcCountPerConn := int(config.OutstandingRpcsPerChannel)
   222  	bc := &benchmarkClient{
   223  		histogramOptions: stats.HistogramOptions{
   224  			NumBuckets:     int(math.Log(config.HistogramParams.MaxPossible)/math.Log(1+config.HistogramParams.Resolution)) + 1,
   225  			GrowthFactor:   config.HistogramParams.Resolution,
   226  			BaseBucketSize: (1 + config.HistogramParams.Resolution),
   227  			MinValue:       0,
   228  		},
   229  		lockingHistograms: make([]lockingHistogram, rpcCountPerConn*len(conns)),
   230  
   231  		stop:            make(chan bool),
   232  		lastResetTime:   time.Now(),
   233  		closeConns:      closeConns,
   234  		rusageLastReset: syscall.GetRusage(),
   235  	}
   236  
   237  	if err = performRPCs(config, conns, bc); err != nil {
   238  		// Close all connections if performRPCs failed.
   239  		closeConns()
   240  		return nil, err
   241  	}
   242  
   243  	return bc, nil
   244  }
   245  
   246  func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) {
   247  	for ic, conn := range conns {
   248  		client := testgrpc.NewBenchmarkServiceClient(conn)
   249  		// For each connection, create rpcCountPerConn goroutines to do rpc.
   250  		for j := 0; j < rpcCountPerConn; j++ {
   251  			// Create histogram for each goroutine.
   252  			idx := ic*rpcCountPerConn + j
   253  			bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions)
   254  			// Start goroutine on the created mutex and histogram.
   255  			go func(idx int) {
   256  				// TODO: do warm up if necessary.
   257  				// Now relying on worker client to reserve time to do warm up.
   258  				// The worker client needs to wait for some time after client is created,
   259  				// before starting benchmark.
   260  				done := make(chan bool)
   261  				for {
   262  					go func() {
   263  						start := time.Now()
   264  						if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil {
   265  							select {
   266  							case <-bc.stop:
   267  							case done <- false:
   268  							}
   269  							return
   270  						}
   271  						elapse := time.Since(start)
   272  						bc.lockingHistograms[idx].add(int64(elapse))
   273  						select {
   274  						case <-bc.stop:
   275  						case done <- true:
   276  						}
   277  					}()
   278  					select {
   279  					case <-bc.stop:
   280  						return
   281  					case <-done:
   282  					}
   283  				}
   284  			}(idx)
   285  		}
   286  	}
   287  }
   288  
   289  func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) {
   290  	var doRPC func(testgrpc.BenchmarkService_StreamingCallClient, int, int) error
   291  	if payloadType == "bytebuf" {
   292  		doRPC = benchmark.DoByteBufStreamingRoundTrip
   293  	} else {
   294  		doRPC = benchmark.DoStreamingRoundTrip
   295  	}
   296  	for ic, conn := range conns {
   297  		// For each connection, create rpcCountPerConn goroutines to do rpc.
   298  		for j := 0; j < rpcCountPerConn; j++ {
   299  			c := testgrpc.NewBenchmarkServiceClient(conn)
   300  			stream, err := c.StreamingCall(context.Background())
   301  			if err != nil {
   302  				logger.Fatalf("%v.StreamingCall(_) = _, %v", c, err)
   303  			}
   304  			// Create histogram for each goroutine.
   305  			idx := ic*rpcCountPerConn + j
   306  			bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions)
   307  			// Start goroutine on the created mutex and histogram.
   308  			go func(idx int) {
   309  				// TODO: do warm up if necessary.
   310  				// Now relying on worker client to reserve time to do warm up.
   311  				// The worker client needs to wait for some time after client is created,
   312  				// before starting benchmark.
   313  				for {
   314  					start := time.Now()
   315  					if err := doRPC(stream, reqSize, respSize); err != nil {
   316  						return
   317  					}
   318  					elapse := time.Since(start)
   319  					bc.lockingHistograms[idx].add(int64(elapse))
   320  					select {
   321  					case <-bc.stop:
   322  						return
   323  					default:
   324  					}
   325  				}
   326  			}(idx)
   327  		}
   328  	}
   329  }
   330  
   331  // getStats returns the stats for benchmark client.
   332  // It resets lastResetTime and all histograms if argument reset is true.
   333  func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats {
   334  	var wallTimeElapsed, uTimeElapsed, sTimeElapsed float64
   335  	mergedHistogram := stats.NewHistogram(bc.histogramOptions)
   336  
   337  	if reset {
   338  		// Merging histogram may take some time.
   339  		// Put all histograms aside and merge later.
   340  		toMerge := make([]*stats.Histogram, len(bc.lockingHistograms))
   341  		for i := range bc.lockingHistograms {
   342  			toMerge[i] = bc.lockingHistograms[i].swap(stats.NewHistogram(bc.histogramOptions))
   343  		}
   344  
   345  		for i := 0; i < len(toMerge); i++ {
   346  			mergedHistogram.Merge(toMerge[i])
   347  		}
   348  
   349  		wallTimeElapsed = time.Since(bc.lastResetTime).Seconds()
   350  		latestRusage := syscall.GetRusage()
   351  		uTimeElapsed, sTimeElapsed = syscall.CPUTimeDiff(bc.rusageLastReset, latestRusage)
   352  
   353  		bc.rusageLastReset = latestRusage
   354  		bc.lastResetTime = time.Now()
   355  	} else {
   356  		// Merge only, not reset.
   357  		for i := range bc.lockingHistograms {
   358  			bc.lockingHistograms[i].mergeInto(mergedHistogram)
   359  		}
   360  
   361  		wallTimeElapsed = time.Since(bc.lastResetTime).Seconds()
   362  		uTimeElapsed, sTimeElapsed = syscall.CPUTimeDiff(bc.rusageLastReset, syscall.GetRusage())
   363  	}
   364  
   365  	b := make([]uint32, len(mergedHistogram.Buckets))
   366  	for i, v := range mergedHistogram.Buckets {
   367  		b[i] = uint32(v.Count)
   368  	}
   369  	return &testpb.ClientStats{
   370  		Latencies: &testpb.HistogramData{
   371  			Bucket:       b,
   372  			MinSeen:      float64(mergedHistogram.Min),
   373  			MaxSeen:      float64(mergedHistogram.Max),
   374  			Sum:          float64(mergedHistogram.Sum),
   375  			SumOfSquares: float64(mergedHistogram.SumOfSquares),
   376  			Count:        float64(mergedHistogram.Count),
   377  		},
   378  		TimeElapsed: wallTimeElapsed,
   379  		TimeUser:    uTimeElapsed,
   380  		TimeSystem:  sTimeElapsed,
   381  	}
   382  }
   383  
   384  func (bc *benchmarkClient) shutdown() {
   385  	close(bc.stop)
   386  	bc.closeConns()
   387  }