google.golang.org/grpc@v1.72.2/benchmark/benchmain/main.go (about)

     1  /*
     2   *
     3   * Copyright 2017 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  /*
    20  Package main provides benchmark with setting flags.
    21  
    22  An example to run some benchmarks with profiling enabled:
    23  
    24  	go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \
    25  	  -compression=gzip -maxConcurrentCalls=1 -trace=off \
    26  	  -reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \
    27  	  -cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result
    28  
    29  As a suggestion, when creating a branch, you can run this benchmark and save the result
    30  file "-resultFile=basePerf", and later when you at the middle of the work or finish the
    31  work, you can get the benchmark result and compare it with the base anytime.
    32  
    33  Assume there are two result files names as "basePerf" and "curPerf" created by adding
    34  -resultFile=basePerf and -resultFile=curPerf.
    35  
    36  		To format the curPerf, run:
    37  	  	go run benchmark/benchresult/main.go curPerf
    38  		To observe how the performance changes based on a base result, run:
    39  	  	go run benchmark/benchresult/main.go basePerf curPerf
    40  */
    41  package main
    42  
    43  import (
    44  	"context"
    45  	"encoding/gob"
    46  	"flag"
    47  	"fmt"
    48  	"io"
    49  	"log"
    50  	rand "math/rand/v2"
    51  	"net"
    52  	"os"
    53  	"reflect"
    54  	"runtime"
    55  	"runtime/pprof"
    56  	"strconv"
    57  	"strings"
    58  	"sync"
    59  	"sync/atomic"
    60  	"time"
    61  
    62  	"google.golang.org/grpc"
    63  	"google.golang.org/grpc/benchmark"
    64  	"google.golang.org/grpc/benchmark/flags"
    65  	"google.golang.org/grpc/benchmark/latency"
    66  	"google.golang.org/grpc/benchmark/stats"
    67  	"google.golang.org/grpc/credentials/insecure"
    68  	"google.golang.org/grpc/encoding/gzip"
    69  	"google.golang.org/grpc/grpclog"
    70  	"google.golang.org/grpc/internal"
    71  	"google.golang.org/grpc/internal/channelz"
    72  	"google.golang.org/grpc/keepalive"
    73  	"google.golang.org/grpc/mem"
    74  	"google.golang.org/grpc/metadata"
    75  	"google.golang.org/grpc/test/bufconn"
    76  
    77  	testgrpc "google.golang.org/grpc/interop/grpc_testing"
    78  	testpb "google.golang.org/grpc/interop/grpc_testing"
    79  )
    80  
    81  var (
    82  	workloads = flags.StringWithAllowedValues("workloads", workloadsAll,
    83  		fmt.Sprintf("Workloads to execute - One of: %v", strings.Join(allWorkloads, ", ")), allWorkloads)
    84  	traceMode = flags.StringWithAllowedValues("trace", toggleModeOff,
    85  		fmt.Sprintf("Trace mode - One of: %v", strings.Join(allToggleModes, ", ")), allToggleModes)
    86  	preloaderMode = flags.StringWithAllowedValues("preloader", toggleModeOff,
    87  		fmt.Sprintf("Preloader mode - One of: %v, preloader works only in streaming and unconstrained modes and will be ignored in unary mode",
    88  			strings.Join(allToggleModes, ", ")), allToggleModes)
    89  	channelzOn = flags.StringWithAllowedValues("channelz", toggleModeOff,
    90  		fmt.Sprintf("Channelz mode - One of: %v", strings.Join(allToggleModes, ", ")), allToggleModes)
    91  	compressorMode = flags.StringWithAllowedValues("compression", compModeOff,
    92  		fmt.Sprintf("Compression mode - One of: %v", strings.Join(allCompModes, ", ")), allCompModes)
    93  	networkMode = flags.StringWithAllowedValues("networkMode", networkModeNone,
    94  		"Network mode includes LAN, WAN, Local and Longhaul", allNetworkModes)
    95  	readLatency           = flags.DurationSlice("latency", defaultReadLatency, "Simulated one-way network latency - may be a comma-separated list")
    96  	readKbps              = flags.IntSlice("kbps", defaultReadKbps, "Simulated network throughput (in kbps) - may be a comma-separated list")
    97  	readMTU               = flags.IntSlice("mtu", defaultReadMTU, "Simulated network MTU (Maximum Transmission Unit) - may be a comma-separated list")
    98  	maxConcurrentCalls    = flags.IntSlice("maxConcurrentCalls", defaultMaxConcurrentCalls, "Number of concurrent RPCs during benchmarks")
    99  	readReqSizeBytes      = flags.IntSlice("reqSizeBytes", nil, "Request size in bytes - may be a comma-separated list")
   100  	readRespSizeBytes     = flags.IntSlice("respSizeBytes", nil, "Response size in bytes - may be a comma-separated list")
   101  	reqPayloadCurveFiles  = flags.StringSlice("reqPayloadCurveFiles", nil, "comma-separated list of CSV files describing the shape a random distribution of request payload sizes")
   102  	respPayloadCurveFiles = flags.StringSlice("respPayloadCurveFiles", nil, "comma-separated list of CSV files describing the shape a random distribution of response payload sizes")
   103  	benchTime             = flag.Duration("benchtime", time.Second, "Configures the amount of time to run each benchmark")
   104  	memProfile            = flag.String("memProfile", "", "Enables memory profiling output to the filename provided.")
   105  	memProfileRate        = flag.Int("memProfileRate", 512*1024, "Configures the memory profiling rate. \n"+
   106  		"memProfile should be set before setting profile rate. To include every allocated block in the profile, "+
   107  		"set MemProfileRate to 1. To turn off profiling entirely, set MemProfileRate to 0. 512 * 1024 by default.")
   108  	cpuProfile          = flag.String("cpuProfile", "", "Enables CPU profiling output to the filename provided")
   109  	benchmarkResultFile = flag.String("resultFile", "", "Save the benchmark result into a binary file")
   110  	useBufconn          = flag.Bool("bufconn", false, "Use in-memory connection instead of system network I/O")
   111  	enableKeepalive     = flag.Bool("enable_keepalive", false, "Enable client keepalive. \n"+
   112  		"Keepalive.Time is set to 10s, Keepalive.Timeout is set to 1s, Keepalive.PermitWithoutStream is set to true.")
   113  	clientReadBufferSize  = flags.IntSlice("clientReadBufferSize", []int{-1}, "Configures the client read buffer size in bytes. If negative, use the default - may be a comma-separated list")
   114  	clientWriteBufferSize = flags.IntSlice("clientWriteBufferSize", []int{-1}, "Configures the client write buffer size in bytes. If negative, use the default - may be a comma-separated list")
   115  	serverReadBufferSize  = flags.IntSlice("serverReadBufferSize", []int{-1}, "Configures the server read buffer size in bytes. If negative, use the default - may be a comma-separated list")
   116  	serverWriteBufferSize = flags.IntSlice("serverWriteBufferSize", []int{-1}, "Configures the server write buffer size in bytes. If negative, use the default - may be a comma-separated list")
   117  	sleepBetweenRPCs      = flags.DurationSlice("sleepBetweenRPCs", []time.Duration{0}, "Configures the maximum amount of time the client should sleep between consecutive RPCs - may be a comma-separated list")
   118  	connections           = flag.Int("connections", 1, "The number of connections. Each connection will handle maxConcurrentCalls RPC streams")
   119  	recvBufferPool        = flags.StringWithAllowedValues("recvBufferPool", recvBufferPoolNil, "Configures the shared receive buffer pool. One of: nil, simple, all", allRecvBufferPools)
   120  	sharedWriteBuffer     = flags.StringWithAllowedValues("sharedWriteBuffer", toggleModeOff,
   121  		fmt.Sprintf("Configures both client and server to share write buffer - One of: %v", strings.Join(allToggleModes, ", ")), allToggleModes)
   122  
   123  	logger = grpclog.Component("benchmark")
   124  )
   125  
   126  const (
   127  	workloadsUnary         = "unary"
   128  	workloadsStreaming     = "streaming"
   129  	workloadsUnconstrained = "unconstrained"
   130  	workloadsAll           = "all"
   131  	// Compression modes.
   132  	compModeOff  = "off"
   133  	compModeGzip = "gzip"
   134  	compModeNop  = "nop"
   135  	compModeAll  = "all"
   136  	// Toggle modes.
   137  	toggleModeOff  = "off"
   138  	toggleModeOn   = "on"
   139  	toggleModeBoth = "both"
   140  	// Network modes.
   141  	networkModeNone  = "none"
   142  	networkModeLocal = "Local"
   143  	networkModeLAN   = "LAN"
   144  	networkModeWAN   = "WAN"
   145  	networkLongHaul  = "Longhaul"
   146  	// Shared recv buffer pool
   147  	recvBufferPoolNil    = "nil"
   148  	recvBufferPoolSimple = "simple"
   149  	recvBufferPoolAll    = "all"
   150  
   151  	numStatsBuckets = 10
   152  	warmupCallCount = 10
   153  	warmuptime      = time.Second
   154  )
   155  
   156  var useNopBufferPool atomic.Bool
   157  
   158  type swappableBufferPool struct {
   159  	mem.BufferPool
   160  }
   161  
   162  func (p swappableBufferPool) Get(length int) *[]byte {
   163  	var pool mem.BufferPool
   164  	if useNopBufferPool.Load() {
   165  		pool = mem.NopBufferPool{}
   166  	} else {
   167  		pool = p.BufferPool
   168  	}
   169  	return pool.Get(length)
   170  }
   171  
   172  func (p swappableBufferPool) Put(i *[]byte) {
   173  	if useNopBufferPool.Load() {
   174  		return
   175  	}
   176  	p.BufferPool.Put(i)
   177  }
   178  
   179  func init() {
   180  	internal.SetDefaultBufferPoolForTesting.(func(mem.BufferPool))(swappableBufferPool{mem.DefaultBufferPool()})
   181  }
   182  
   183  var (
   184  	allWorkloads              = []string{workloadsUnary, workloadsStreaming, workloadsUnconstrained, workloadsAll}
   185  	allCompModes              = []string{compModeOff, compModeGzip, compModeNop, compModeAll}
   186  	allToggleModes            = []string{toggleModeOff, toggleModeOn, toggleModeBoth}
   187  	allNetworkModes           = []string{networkModeNone, networkModeLocal, networkModeLAN, networkModeWAN, networkLongHaul}
   188  	allRecvBufferPools        = []string{recvBufferPoolNil, recvBufferPoolSimple, recvBufferPoolAll}
   189  	defaultReadLatency        = []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay.
   190  	defaultReadKbps           = []int{0, 10240}                           // if non-positive, infinite
   191  	defaultReadMTU            = []int{0}                                  // if non-positive, infinite
   192  	defaultMaxConcurrentCalls = []int{1, 8, 64, 512}
   193  	defaultReqSizeBytes       = []int{1, 1024, 1024 * 1024}
   194  	defaultRespSizeBytes      = []int{1, 1024, 1024 * 1024}
   195  	networks                  = map[string]latency.Network{
   196  		networkModeLocal: latency.Local,
   197  		networkModeLAN:   latency.LAN,
   198  		networkModeWAN:   latency.WAN,
   199  		networkLongHaul:  latency.Longhaul,
   200  	}
   201  	keepaliveTime    = 10 * time.Second
   202  	keepaliveTimeout = 1 * time.Second
   203  	// This is 0.8*keepaliveTime to prevent connection issues because of server
   204  	// keepalive enforcement.
   205  	keepaliveMinTime = 8 * time.Second
   206  )
   207  
   208  // runModes indicates the workloads to run. This is initialized with a call to
   209  // `runModesFromWorkloads`, passing the workloads flag set by the user.
   210  type runModes struct {
   211  	unary, streaming, unconstrained bool
   212  }
   213  
   214  // runModesFromWorkloads determines the runModes based on the value of
   215  // workloads flag set by the user.
   216  func runModesFromWorkloads(workload string) runModes {
   217  	r := runModes{}
   218  	switch workload {
   219  	case workloadsUnary:
   220  		r.unary = true
   221  	case workloadsStreaming:
   222  		r.streaming = true
   223  	case workloadsUnconstrained:
   224  		r.unconstrained = true
   225  	case workloadsAll:
   226  		r.unary = true
   227  		r.streaming = true
   228  		r.unconstrained = true
   229  	default:
   230  		log.Fatalf("Unknown workloads setting: %v (want one of: %v)",
   231  			workloads, strings.Join(allWorkloads, ", "))
   232  	}
   233  	return r
   234  }
   235  
   236  type startFunc func(mode string, bf stats.Features)
   237  type stopFunc func(count uint64)
   238  type ucStopFunc func(req uint64, resp uint64)
   239  type rpcCallFunc func(cn, pos int)
   240  type rpcSendFunc func(cn, pos int)
   241  type rpcRecvFunc func(cn, pos int)
   242  type rpcCleanupFunc func()
   243  
   244  func unaryBenchmark(start startFunc, stop stopFunc, bf stats.Features, s *stats.Stats) {
   245  	caller, cleanup := makeFuncUnary(bf)
   246  	defer cleanup()
   247  	runBenchmark(caller, start, stop, bf, s, workloadsUnary)
   248  }
   249  
   250  func streamBenchmark(start startFunc, stop stopFunc, bf stats.Features, s *stats.Stats) {
   251  	caller, cleanup := makeFuncStream(bf)
   252  	defer cleanup()
   253  	runBenchmark(caller, start, stop, bf, s, workloadsStreaming)
   254  }
   255  
   256  func unconstrainedStreamBenchmark(start startFunc, stop ucStopFunc, bf stats.Features) {
   257  	var sender rpcSendFunc
   258  	var recver rpcRecvFunc
   259  	var cleanup rpcCleanupFunc
   260  	if bf.EnablePreloader {
   261  		sender, recver, cleanup = makeFuncUnconstrainedStreamPreloaded(bf)
   262  	} else {
   263  		sender, recver, cleanup = makeFuncUnconstrainedStream(bf)
   264  	}
   265  	defer cleanup()
   266  
   267  	var req, resp uint64
   268  	go func() {
   269  		// Resets the counters once warmed up
   270  		<-time.NewTimer(warmuptime).C
   271  		atomic.StoreUint64(&req, 0)
   272  		atomic.StoreUint64(&resp, 0)
   273  		start(workloadsUnconstrained, bf)
   274  	}()
   275  
   276  	bmEnd := time.Now().Add(bf.BenchTime + warmuptime)
   277  	var wg sync.WaitGroup
   278  	wg.Add(2 * bf.Connections * bf.MaxConcurrentCalls)
   279  	maxSleep := int(bf.SleepBetweenRPCs)
   280  	for cn := 0; cn < bf.Connections; cn++ {
   281  		for pos := 0; pos < bf.MaxConcurrentCalls; pos++ {
   282  			go func(cn, pos int) {
   283  				defer wg.Done()
   284  				for {
   285  					if maxSleep > 0 {
   286  						time.Sleep(time.Duration(rand.IntN(maxSleep)))
   287  					}
   288  					t := time.Now()
   289  					if t.After(bmEnd) {
   290  						return
   291  					}
   292  					sender(cn, pos)
   293  					atomic.AddUint64(&req, 1)
   294  				}
   295  			}(cn, pos)
   296  			go func(cn, pos int) {
   297  				defer wg.Done()
   298  				for {
   299  					t := time.Now()
   300  					if t.After(bmEnd) {
   301  						return
   302  					}
   303  					recver(cn, pos)
   304  					atomic.AddUint64(&resp, 1)
   305  				}
   306  			}(cn, pos)
   307  		}
   308  	}
   309  	wg.Wait()
   310  	stop(req, resp)
   311  }
   312  
   313  // makeClients returns a gRPC client (or multiple clients) for the grpc.testing.BenchmarkService
   314  // service. The client is configured using the different options in the passed
   315  // 'bf'. Also returns a cleanup function to close the client and release
   316  // resources.
   317  func makeClients(bf stats.Features) ([]testgrpc.BenchmarkServiceClient, func()) {
   318  	nw := &latency.Network{Kbps: bf.Kbps, Latency: bf.Latency, MTU: bf.MTU}
   319  	opts := []grpc.DialOption{}
   320  	sopts := []grpc.ServerOption{}
   321  	if bf.ModeCompressor == compModeNop {
   322  		sopts = append(sopts,
   323  			grpc.RPCCompressor(nopCompressor{}),
   324  			grpc.RPCDecompressor(nopDecompressor{}),
   325  		)
   326  		opts = append(opts,
   327  			grpc.WithCompressor(nopCompressor{}),
   328  			grpc.WithDecompressor(nopDecompressor{}),
   329  		)
   330  	}
   331  	if bf.ModeCompressor == compModeGzip {
   332  		opts = append(opts,
   333  			grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)),
   334  		)
   335  	}
   336  	if bf.EnableKeepalive {
   337  		sopts = append(sopts,
   338  			grpc.KeepaliveParams(keepalive.ServerParameters{
   339  				Time:    keepaliveTime,
   340  				Timeout: keepaliveTimeout,
   341  			}),
   342  			grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
   343  				MinTime:             keepaliveMinTime,
   344  				PermitWithoutStream: true,
   345  			}),
   346  		)
   347  		opts = append(opts,
   348  			grpc.WithKeepaliveParams(keepalive.ClientParameters{
   349  				Time:                keepaliveTime,
   350  				Timeout:             keepaliveTimeout,
   351  				PermitWithoutStream: true,
   352  			}),
   353  		)
   354  	}
   355  	if bf.ClientReadBufferSize >= 0 {
   356  		opts = append(opts, grpc.WithReadBufferSize(bf.ClientReadBufferSize))
   357  	}
   358  	if bf.ClientWriteBufferSize >= 0 {
   359  		opts = append(opts, grpc.WithWriteBufferSize(bf.ClientWriteBufferSize))
   360  	}
   361  	if bf.ServerReadBufferSize >= 0 {
   362  		sopts = append(sopts, grpc.ReadBufferSize(bf.ServerReadBufferSize))
   363  	}
   364  	if bf.SharedWriteBuffer {
   365  		opts = append(opts, grpc.WithSharedWriteBuffer(true))
   366  		sopts = append(sopts, grpc.SharedWriteBuffer(true))
   367  	}
   368  	if bf.ServerWriteBufferSize >= 0 {
   369  		sopts = append(sopts, grpc.WriteBufferSize(bf.ServerWriteBufferSize))
   370  	}
   371  	switch bf.RecvBufferPool {
   372  	case recvBufferPoolNil:
   373  		useNopBufferPool.Store(true)
   374  	case recvBufferPoolSimple:
   375  		// Do nothing as buffering is enabled by default.
   376  	default:
   377  		logger.Fatalf("Unknown shared recv buffer pool type: %v", bf.RecvBufferPool)
   378  	}
   379  
   380  	sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(bf.MaxConcurrentCalls+1)))
   381  	opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
   382  
   383  	var lis net.Listener
   384  	if bf.UseBufConn {
   385  		bcLis := bufconn.Listen(256 * 1024)
   386  		lis = bcLis
   387  		opts = append(opts, grpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) {
   388  			return nw.ContextDialer(func(context.Context, string, string) (net.Conn, error) {
   389  				return bcLis.Dial()
   390  			})(ctx, "", "")
   391  		}))
   392  	} else {
   393  		var err error
   394  		lis, err = net.Listen("tcp", "localhost:0")
   395  		if err != nil {
   396  			logger.Fatalf("Failed to listen: %v", err)
   397  		}
   398  		opts = append(opts, grpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) {
   399  			return nw.ContextDialer((internal.NetDialerWithTCPKeepalive().DialContext))(ctx, "tcp", lis.Addr().String())
   400  		}))
   401  	}
   402  	lis = nw.Listener(lis)
   403  	stopper := benchmark.StartServer(benchmark.ServerInfo{Type: "protobuf", Listener: lis}, sopts...)
   404  	conns := make([]*grpc.ClientConn, bf.Connections)
   405  	clients := make([]testgrpc.BenchmarkServiceClient, bf.Connections)
   406  	for cn := 0; cn < bf.Connections; cn++ {
   407  		conns[cn] = benchmark.NewClientConn("" /* target not used */, opts...)
   408  		clients[cn] = testgrpc.NewBenchmarkServiceClient(conns[cn])
   409  	}
   410  
   411  	return clients, func() {
   412  		for _, conn := range conns {
   413  			conn.Close()
   414  		}
   415  		stopper()
   416  	}
   417  }
   418  
   419  func makeFuncUnary(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) {
   420  	clients, cleanup := makeClients(bf)
   421  	return func(cn, _ int) {
   422  		reqSizeBytes := bf.ReqSizeBytes
   423  		respSizeBytes := bf.RespSizeBytes
   424  		if bf.ReqPayloadCurve != nil {
   425  			reqSizeBytes = bf.ReqPayloadCurve.ChooseRandom()
   426  		}
   427  		if bf.RespPayloadCurve != nil {
   428  			respSizeBytes = bf.RespPayloadCurve.ChooseRandom()
   429  		}
   430  		unaryCaller(clients[cn], reqSizeBytes, respSizeBytes)
   431  	}, cleanup
   432  }
   433  
   434  func makeFuncStream(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) {
   435  	streams, req, cleanup := setupStream(bf, false)
   436  
   437  	var preparedMsg [][]*grpc.PreparedMsg
   438  	if bf.EnablePreloader {
   439  		preparedMsg = prepareMessages(streams, req)
   440  	}
   441  
   442  	return func(cn, pos int) {
   443  		reqSizeBytes := bf.ReqSizeBytes
   444  		respSizeBytes := bf.RespSizeBytes
   445  		if bf.ReqPayloadCurve != nil {
   446  			reqSizeBytes = bf.ReqPayloadCurve.ChooseRandom()
   447  		}
   448  		if bf.RespPayloadCurve != nil {
   449  			respSizeBytes = bf.RespPayloadCurve.ChooseRandom()
   450  		}
   451  		var req any
   452  		if bf.EnablePreloader {
   453  			req = preparedMsg[cn][pos]
   454  		} else {
   455  			pl := benchmark.NewPayload(testpb.PayloadType_COMPRESSABLE, reqSizeBytes)
   456  			req = &testpb.SimpleRequest{
   457  				ResponseType: pl.Type,
   458  				ResponseSize: int32(respSizeBytes),
   459  				Payload:      pl,
   460  			}
   461  		}
   462  		streamCaller(streams[cn][pos], req)
   463  	}, cleanup
   464  }
   465  
   466  func makeFuncUnconstrainedStreamPreloaded(bf stats.Features) (rpcSendFunc, rpcRecvFunc, rpcCleanupFunc) {
   467  	streams, req, cleanup := setupStream(bf, true)
   468  
   469  	preparedMsg := prepareMessages(streams, req)
   470  
   471  	return func(cn, pos int) {
   472  			streams[cn][pos].SendMsg(preparedMsg[cn][pos])
   473  		}, func(cn, pos int) {
   474  			streams[cn][pos].Recv()
   475  		}, cleanup
   476  }
   477  
   478  func makeFuncUnconstrainedStream(bf stats.Features) (rpcSendFunc, rpcRecvFunc, rpcCleanupFunc) {
   479  	streams, req, cleanup := setupStream(bf, true)
   480  
   481  	return func(cn, pos int) {
   482  			streams[cn][pos].Send(req)
   483  		}, func(cn, pos int) {
   484  			streams[cn][pos].Recv()
   485  		}, cleanup
   486  }
   487  
   488  func setupStream(bf stats.Features, unconstrained bool) ([][]testgrpc.BenchmarkService_StreamingCallClient, *testpb.SimpleRequest, rpcCleanupFunc) {
   489  	clients, cleanup := makeClients(bf)
   490  
   491  	streams := make([][]testgrpc.BenchmarkService_StreamingCallClient, bf.Connections)
   492  	ctx := context.Background()
   493  	if unconstrained {
   494  		md := metadata.Pairs(benchmark.UnconstrainedStreamingHeader, "1", benchmark.UnconstrainedStreamingDelayHeader, bf.SleepBetweenRPCs.String())
   495  		ctx = metadata.NewOutgoingContext(ctx, md)
   496  	}
   497  	if bf.EnablePreloader {
   498  		md := metadata.Pairs(benchmark.PreloadMsgSizeHeader, strconv.Itoa(bf.RespSizeBytes), benchmark.UnconstrainedStreamingDelayHeader, bf.SleepBetweenRPCs.String())
   499  		ctx = metadata.NewOutgoingContext(ctx, md)
   500  	}
   501  	for cn := 0; cn < bf.Connections; cn++ {
   502  		tc := clients[cn]
   503  		streams[cn] = make([]testgrpc.BenchmarkService_StreamingCallClient, bf.MaxConcurrentCalls)
   504  		for pos := 0; pos < bf.MaxConcurrentCalls; pos++ {
   505  			stream, err := tc.StreamingCall(ctx)
   506  			if err != nil {
   507  				logger.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
   508  			}
   509  			streams[cn][pos] = stream
   510  		}
   511  	}
   512  
   513  	pl := benchmark.NewPayload(testpb.PayloadType_COMPRESSABLE, bf.ReqSizeBytes)
   514  	req := &testpb.SimpleRequest{
   515  		ResponseType: pl.Type,
   516  		ResponseSize: int32(bf.RespSizeBytes),
   517  		Payload:      pl,
   518  	}
   519  
   520  	return streams, req, cleanup
   521  }
   522  
   523  func prepareMessages(streams [][]testgrpc.BenchmarkService_StreamingCallClient, req *testpb.SimpleRequest) [][]*grpc.PreparedMsg {
   524  	preparedMsg := make([][]*grpc.PreparedMsg, len(streams))
   525  	for cn, connStreams := range streams {
   526  		preparedMsg[cn] = make([]*grpc.PreparedMsg, len(connStreams))
   527  		for pos, stream := range connStreams {
   528  			preparedMsg[cn][pos] = &grpc.PreparedMsg{}
   529  			if err := preparedMsg[cn][pos].Encode(stream, req); err != nil {
   530  				logger.Fatalf("%v.Encode(%v, %v) = %v", preparedMsg[cn][pos], req, stream, err)
   531  			}
   532  		}
   533  	}
   534  	return preparedMsg
   535  }
   536  
   537  // Makes a UnaryCall gRPC request using the given BenchmarkServiceClient and
   538  // request and response sizes.
   539  func unaryCaller(client testgrpc.BenchmarkServiceClient, reqSize, respSize int) {
   540  	if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil {
   541  		logger.Fatalf("DoUnaryCall failed: %v", err)
   542  	}
   543  }
   544  
   545  func streamCaller(stream testgrpc.BenchmarkService_StreamingCallClient, req any) {
   546  	if err := benchmark.DoStreamingRoundTripPreloaded(stream, req); err != nil {
   547  		logger.Fatalf("DoStreamingRoundTrip failed: %v", err)
   548  	}
   549  }
   550  
   551  func runBenchmark(caller rpcCallFunc, start startFunc, stop stopFunc, bf stats.Features, s *stats.Stats, mode string) {
   552  	// if SleepBetweenRPCs > 0 we skip the warmup because otherwise
   553  	// we are going to send a set of simultaneous requests on every connection,
   554  	// which is something we are trying to avoid when using SleepBetweenRPCs.
   555  	if bf.SleepBetweenRPCs == 0 {
   556  		// Warm up connections.
   557  		for i := 0; i < warmupCallCount; i++ {
   558  			for cn := 0; cn < bf.Connections; cn++ {
   559  				caller(cn, 0)
   560  			}
   561  		}
   562  	}
   563  
   564  	// Run benchmark.
   565  	start(mode, bf)
   566  	var wg sync.WaitGroup
   567  	wg.Add(bf.Connections * bf.MaxConcurrentCalls)
   568  	bmEnd := time.Now().Add(bf.BenchTime)
   569  	maxSleep := int(bf.SleepBetweenRPCs)
   570  	var count uint64
   571  	for cn := 0; cn < bf.Connections; cn++ {
   572  		for pos := 0; pos < bf.MaxConcurrentCalls; pos++ {
   573  			go func(cn, pos int) {
   574  				defer wg.Done()
   575  				for {
   576  					if maxSleep > 0 {
   577  						time.Sleep(time.Duration(rand.IntN(maxSleep)))
   578  					}
   579  					t := time.Now()
   580  					if t.After(bmEnd) {
   581  						return
   582  					}
   583  					start := time.Now()
   584  					caller(cn, pos)
   585  					elapse := time.Since(start)
   586  					atomic.AddUint64(&count, 1)
   587  					s.AddDuration(elapse)
   588  				}
   589  			}(cn, pos)
   590  		}
   591  	}
   592  	wg.Wait()
   593  	stop(count)
   594  }
   595  
   596  // benchOpts represents all configurable options available while running this
   597  // benchmark. This is built from the values passed as flags.
   598  type benchOpts struct {
   599  	rModes              runModes
   600  	benchTime           time.Duration
   601  	memProfileRate      int
   602  	memProfile          string
   603  	cpuProfile          string
   604  	networkMode         string
   605  	benchmarkResultFile string
   606  	useBufconn          bool
   607  	enableKeepalive     bool
   608  	connections         int
   609  	features            *featureOpts
   610  }
   611  
   612  // featureOpts represents options which can have multiple values. The user
   613  // usually provides a comma-separated list of options for each of these
   614  // features through command line flags. We generate all possible combinations
   615  // for the provided values and run the benchmarks for each combination.
   616  type featureOpts struct {
   617  	enableTrace           []bool
   618  	readLatencies         []time.Duration
   619  	readKbps              []int
   620  	readMTU               []int
   621  	maxConcurrentCalls    []int
   622  	reqSizeBytes          []int
   623  	respSizeBytes         []int
   624  	reqPayloadCurves      []*stats.PayloadCurve
   625  	respPayloadCurves     []*stats.PayloadCurve
   626  	compModes             []string
   627  	enableChannelz        []bool
   628  	enablePreloader       []bool
   629  	clientReadBufferSize  []int
   630  	clientWriteBufferSize []int
   631  	serverReadBufferSize  []int
   632  	serverWriteBufferSize []int
   633  	sleepBetweenRPCs      []time.Duration
   634  	recvBufferPools       []string
   635  	sharedWriteBuffer     []bool
   636  }
   637  
   638  // makeFeaturesNum returns a slice of ints of size 'maxFeatureIndex' where each
   639  // element of the slice (indexed by 'featuresIndex' enum) contains the number
   640  // of features to be exercised by the benchmark code.
   641  // For example: Index 0 of the returned slice contains the number of values for
   642  // enableTrace feature, while index 1 contains the number of value of
   643  // readLatencies feature and so on.
   644  func makeFeaturesNum(b *benchOpts) []int {
   645  	featuresNum := make([]int, stats.MaxFeatureIndex)
   646  	for i := 0; i < len(featuresNum); i++ {
   647  		switch stats.FeatureIndex(i) {
   648  		case stats.EnableTraceIndex:
   649  			featuresNum[i] = len(b.features.enableTrace)
   650  		case stats.ReadLatenciesIndex:
   651  			featuresNum[i] = len(b.features.readLatencies)
   652  		case stats.ReadKbpsIndex:
   653  			featuresNum[i] = len(b.features.readKbps)
   654  		case stats.ReadMTUIndex:
   655  			featuresNum[i] = len(b.features.readMTU)
   656  		case stats.MaxConcurrentCallsIndex:
   657  			featuresNum[i] = len(b.features.maxConcurrentCalls)
   658  		case stats.ReqSizeBytesIndex:
   659  			featuresNum[i] = len(b.features.reqSizeBytes)
   660  		case stats.RespSizeBytesIndex:
   661  			featuresNum[i] = len(b.features.respSizeBytes)
   662  		case stats.ReqPayloadCurveIndex:
   663  			featuresNum[i] = len(b.features.reqPayloadCurves)
   664  		case stats.RespPayloadCurveIndex:
   665  			featuresNum[i] = len(b.features.respPayloadCurves)
   666  		case stats.CompModesIndex:
   667  			featuresNum[i] = len(b.features.compModes)
   668  		case stats.EnableChannelzIndex:
   669  			featuresNum[i] = len(b.features.enableChannelz)
   670  		case stats.EnablePreloaderIndex:
   671  			featuresNum[i] = len(b.features.enablePreloader)
   672  		case stats.ClientReadBufferSize:
   673  			featuresNum[i] = len(b.features.clientReadBufferSize)
   674  		case stats.ClientWriteBufferSize:
   675  			featuresNum[i] = len(b.features.clientWriteBufferSize)
   676  		case stats.ServerReadBufferSize:
   677  			featuresNum[i] = len(b.features.serverReadBufferSize)
   678  		case stats.ServerWriteBufferSize:
   679  			featuresNum[i] = len(b.features.serverWriteBufferSize)
   680  		case stats.SleepBetweenRPCs:
   681  			featuresNum[i] = len(b.features.sleepBetweenRPCs)
   682  		case stats.RecvBufferPool:
   683  			featuresNum[i] = len(b.features.recvBufferPools)
   684  		case stats.SharedWriteBuffer:
   685  			featuresNum[i] = len(b.features.sharedWriteBuffer)
   686  		default:
   687  			log.Fatalf("Unknown feature index %v in generateFeatures. maxFeatureIndex is %v", i, stats.MaxFeatureIndex)
   688  		}
   689  	}
   690  	return featuresNum
   691  }
   692  
   693  // sharedFeatures returns a bool slice which acts as a bitmask. Each item in
   694  // the slice represents a feature, indexed by 'featureIndex' enum.  The bit is
   695  // set to 1 if the corresponding feature does not have multiple value, so is
   696  // shared amongst all benchmarks.
   697  func sharedFeatures(featuresNum []int) []bool {
   698  	result := make([]bool, len(featuresNum))
   699  	for i, num := range featuresNum {
   700  		if num <= 1 {
   701  			result[i] = true
   702  		}
   703  	}
   704  	return result
   705  }
   706  
   707  // generateFeatures generates all combinations of the provided feature options.
   708  // While all the feature options are stored in the benchOpts struct, the input
   709  // parameter 'featuresNum' is a slice indexed by 'featureIndex' enum containing
   710  // the number of values for each feature.
   711  // For example, let's say the user sets -workloads=all and
   712  // -maxConcurrentCalls=1,100, this would end up with the following
   713  // combinations:
   714  // [workloads: unary, maxConcurrentCalls=1]
   715  // [workloads: unary, maxConcurrentCalls=1]
   716  // [workloads: streaming, maxConcurrentCalls=100]
   717  // [workloads: streaming, maxConcurrentCalls=100]
   718  // [workloads: unconstrained, maxConcurrentCalls=1]
   719  // [workloads: unconstrained, maxConcurrentCalls=100]
   720  func (b *benchOpts) generateFeatures(featuresNum []int) []stats.Features {
   721  	// curPos and initialPos are two slices where each value acts as an index
   722  	// into the appropriate feature slice maintained in benchOpts.features. This
   723  	// loop generates all possible combinations of features by changing one value
   724  	// at a time, and once curPos becomes equal to initialPos, we have explored
   725  	// all options.
   726  	var result []stats.Features
   727  	var curPos []int
   728  	initialPos := make([]int, stats.MaxFeatureIndex)
   729  	for !reflect.DeepEqual(initialPos, curPos) {
   730  		if curPos == nil {
   731  			curPos = make([]int, stats.MaxFeatureIndex)
   732  		}
   733  		f := stats.Features{
   734  			// These features stay the same for each iteration.
   735  			NetworkMode:     b.networkMode,
   736  			UseBufConn:      b.useBufconn,
   737  			EnableKeepalive: b.enableKeepalive,
   738  			BenchTime:       b.benchTime,
   739  			Connections:     b.connections,
   740  			// These features can potentially change for each iteration.
   741  			EnableTrace:           b.features.enableTrace[curPos[stats.EnableTraceIndex]],
   742  			Latency:               b.features.readLatencies[curPos[stats.ReadLatenciesIndex]],
   743  			Kbps:                  b.features.readKbps[curPos[stats.ReadKbpsIndex]],
   744  			MTU:                   b.features.readMTU[curPos[stats.ReadMTUIndex]],
   745  			MaxConcurrentCalls:    b.features.maxConcurrentCalls[curPos[stats.MaxConcurrentCallsIndex]],
   746  			ModeCompressor:        b.features.compModes[curPos[stats.CompModesIndex]],
   747  			EnableChannelz:        b.features.enableChannelz[curPos[stats.EnableChannelzIndex]],
   748  			EnablePreloader:       b.features.enablePreloader[curPos[stats.EnablePreloaderIndex]],
   749  			ClientReadBufferSize:  b.features.clientReadBufferSize[curPos[stats.ClientReadBufferSize]],
   750  			ClientWriteBufferSize: b.features.clientWriteBufferSize[curPos[stats.ClientWriteBufferSize]],
   751  			ServerReadBufferSize:  b.features.serverReadBufferSize[curPos[stats.ServerReadBufferSize]],
   752  			ServerWriteBufferSize: b.features.serverWriteBufferSize[curPos[stats.ServerWriteBufferSize]],
   753  			SleepBetweenRPCs:      b.features.sleepBetweenRPCs[curPos[stats.SleepBetweenRPCs]],
   754  			RecvBufferPool:        b.features.recvBufferPools[curPos[stats.RecvBufferPool]],
   755  			SharedWriteBuffer:     b.features.sharedWriteBuffer[curPos[stats.SharedWriteBuffer]],
   756  		}
   757  		if len(b.features.reqPayloadCurves) == 0 {
   758  			f.ReqSizeBytes = b.features.reqSizeBytes[curPos[stats.ReqSizeBytesIndex]]
   759  		} else {
   760  			f.ReqPayloadCurve = b.features.reqPayloadCurves[curPos[stats.ReqPayloadCurveIndex]]
   761  		}
   762  		if len(b.features.respPayloadCurves) == 0 {
   763  			f.RespSizeBytes = b.features.respSizeBytes[curPos[stats.RespSizeBytesIndex]]
   764  		} else {
   765  			f.RespPayloadCurve = b.features.respPayloadCurves[curPos[stats.RespPayloadCurveIndex]]
   766  		}
   767  		result = append(result, f)
   768  		addOne(curPos, featuresNum)
   769  	}
   770  	return result
   771  }
   772  
   773  // addOne mutates the input slice 'features' by changing one feature, thus
   774  // arriving at the next combination of feature values. 'featuresMaxPosition'
   775  // provides the numbers of allowed values for each feature, indexed by
   776  // 'featureIndex' enum.
   777  func addOne(features []int, featuresMaxPosition []int) {
   778  	for i := len(features) - 1; i >= 0; i-- {
   779  		if featuresMaxPosition[i] == 0 {
   780  			continue
   781  		}
   782  		features[i] = (features[i] + 1)
   783  		if features[i]/featuresMaxPosition[i] == 0 {
   784  			break
   785  		}
   786  		features[i] = features[i] % featuresMaxPosition[i]
   787  	}
   788  }
   789  
   790  // processFlags reads the command line flags and builds benchOpts. Specifying
   791  // invalid values for certain flags will cause flag.Parse() to fail, and the
   792  // program to terminate.
   793  // This *SHOULD* be the only place where the flags are accessed. All other
   794  // parts of the benchmark code should rely on the returned benchOpts.
   795  func processFlags() *benchOpts {
   796  	flag.Parse()
   797  	if flag.NArg() != 0 {
   798  		log.Fatal("Error: unparsed arguments: ", flag.Args())
   799  	}
   800  
   801  	opts := &benchOpts{
   802  		rModes:              runModesFromWorkloads(*workloads),
   803  		benchTime:           *benchTime,
   804  		memProfileRate:      *memProfileRate,
   805  		memProfile:          *memProfile,
   806  		cpuProfile:          *cpuProfile,
   807  		networkMode:         *networkMode,
   808  		benchmarkResultFile: *benchmarkResultFile,
   809  		useBufconn:          *useBufconn,
   810  		enableKeepalive:     *enableKeepalive,
   811  		connections:         *connections,
   812  		features: &featureOpts{
   813  			enableTrace:           setToggleMode(*traceMode),
   814  			readLatencies:         append([]time.Duration(nil), *readLatency...),
   815  			readKbps:              append([]int(nil), *readKbps...),
   816  			readMTU:               append([]int(nil), *readMTU...),
   817  			maxConcurrentCalls:    append([]int(nil), *maxConcurrentCalls...),
   818  			reqSizeBytes:          append([]int(nil), *readReqSizeBytes...),
   819  			respSizeBytes:         append([]int(nil), *readRespSizeBytes...),
   820  			compModes:             setCompressorMode(*compressorMode),
   821  			enableChannelz:        setToggleMode(*channelzOn),
   822  			enablePreloader:       setToggleMode(*preloaderMode),
   823  			clientReadBufferSize:  append([]int(nil), *clientReadBufferSize...),
   824  			clientWriteBufferSize: append([]int(nil), *clientWriteBufferSize...),
   825  			serverReadBufferSize:  append([]int(nil), *serverReadBufferSize...),
   826  			serverWriteBufferSize: append([]int(nil), *serverWriteBufferSize...),
   827  			sleepBetweenRPCs:      append([]time.Duration(nil), *sleepBetweenRPCs...),
   828  			recvBufferPools:       setRecvBufferPool(*recvBufferPool),
   829  			sharedWriteBuffer:     setToggleMode(*sharedWriteBuffer),
   830  		},
   831  	}
   832  
   833  	if len(*reqPayloadCurveFiles) == 0 {
   834  		if len(opts.features.reqSizeBytes) == 0 {
   835  			opts.features.reqSizeBytes = defaultReqSizeBytes
   836  		}
   837  	} else {
   838  		if len(opts.features.reqSizeBytes) != 0 {
   839  			log.Fatalf("you may not specify -reqPayloadCurveFiles and -reqSizeBytes at the same time")
   840  		}
   841  		if len(opts.features.enablePreloader) != 0 {
   842  			log.Fatalf("you may not specify -reqPayloadCurveFiles and -preloader at the same time")
   843  		}
   844  		for _, file := range *reqPayloadCurveFiles {
   845  			pc, err := stats.NewPayloadCurve(file)
   846  			if err != nil {
   847  				log.Fatalf("cannot load payload curve file %s: %v", file, err)
   848  			}
   849  			opts.features.reqPayloadCurves = append(opts.features.reqPayloadCurves, pc)
   850  		}
   851  		opts.features.reqSizeBytes = nil
   852  	}
   853  	if len(*respPayloadCurveFiles) == 0 {
   854  		if len(opts.features.respSizeBytes) == 0 {
   855  			opts.features.respSizeBytes = defaultRespSizeBytes
   856  		}
   857  	} else {
   858  		if len(opts.features.respSizeBytes) != 0 {
   859  			log.Fatalf("you may not specify -respPayloadCurveFiles and -respSizeBytes at the same time")
   860  		}
   861  		if len(opts.features.enablePreloader) != 0 {
   862  			log.Fatalf("you may not specify -respPayloadCurveFiles and -preloader at the same time")
   863  		}
   864  		for _, file := range *respPayloadCurveFiles {
   865  			pc, err := stats.NewPayloadCurve(file)
   866  			if err != nil {
   867  				log.Fatalf("cannot load payload curve file %s: %v", file, err)
   868  			}
   869  			opts.features.respPayloadCurves = append(opts.features.respPayloadCurves, pc)
   870  		}
   871  		opts.features.respSizeBytes = nil
   872  	}
   873  
   874  	// Re-write latency, kpbs and mtu if network mode is set.
   875  	if network, ok := networks[opts.networkMode]; ok {
   876  		opts.features.readLatencies = []time.Duration{network.Latency}
   877  		opts.features.readKbps = []int{network.Kbps}
   878  		opts.features.readMTU = []int{network.MTU}
   879  	}
   880  	return opts
   881  }
   882  
   883  func setToggleMode(val string) []bool {
   884  	switch val {
   885  	case toggleModeOn:
   886  		return []bool{true}
   887  	case toggleModeOff:
   888  		return []bool{false}
   889  	case toggleModeBoth:
   890  		return []bool{false, true}
   891  	default:
   892  		// This should never happen because a wrong value passed to this flag would
   893  		// be caught during flag.Parse().
   894  		return []bool{}
   895  	}
   896  }
   897  
   898  func setCompressorMode(val string) []string {
   899  	switch val {
   900  	case compModeNop, compModeGzip, compModeOff:
   901  		return []string{val}
   902  	case compModeAll:
   903  		return []string{compModeNop, compModeGzip, compModeOff}
   904  	default:
   905  		// This should never happen because a wrong value passed to this flag would
   906  		// be caught during flag.Parse().
   907  		return []string{}
   908  	}
   909  }
   910  
   911  func setRecvBufferPool(val string) []string {
   912  	switch val {
   913  	case recvBufferPoolNil, recvBufferPoolSimple:
   914  		return []string{val}
   915  	case recvBufferPoolAll:
   916  		return []string{recvBufferPoolNil, recvBufferPoolSimple}
   917  	default:
   918  		// This should never happen because a wrong value passed to this flag would
   919  		// be caught during flag.Parse().
   920  		return []string{}
   921  	}
   922  }
   923  
   924  func main() {
   925  	opts := processFlags()
   926  	before(opts)
   927  
   928  	s := stats.NewStats(numStatsBuckets)
   929  	featuresNum := makeFeaturesNum(opts)
   930  	sf := sharedFeatures(featuresNum)
   931  
   932  	var (
   933  		start  = func(mode string, bf stats.Features) { s.StartRun(mode, bf, sf) }
   934  		stop   = func(count uint64) { s.EndRun(count) }
   935  		ucStop = func(req uint64, resp uint64) { s.EndUnconstrainedRun(req, resp) }
   936  	)
   937  
   938  	for _, bf := range opts.generateFeatures(featuresNum) {
   939  		grpc.EnableTracing = bf.EnableTrace
   940  		if bf.EnableChannelz {
   941  			channelz.TurnOn()
   942  		}
   943  		if opts.rModes.unary {
   944  			unaryBenchmark(start, stop, bf, s)
   945  		}
   946  		if opts.rModes.streaming {
   947  			streamBenchmark(start, stop, bf, s)
   948  		}
   949  		if opts.rModes.unconstrained {
   950  			unconstrainedStreamBenchmark(start, ucStop, bf)
   951  		}
   952  	}
   953  	after(opts, s.GetResults())
   954  }
   955  
   956  func before(opts *benchOpts) {
   957  	if opts.memProfile != "" {
   958  		runtime.MemProfileRate = opts.memProfileRate
   959  	}
   960  	if opts.cpuProfile != "" {
   961  		f, err := os.Create(opts.cpuProfile)
   962  		if err != nil {
   963  			fmt.Fprintf(os.Stderr, "testing: %s\n", err)
   964  			return
   965  		}
   966  		if err := pprof.StartCPUProfile(f); err != nil {
   967  			fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s\n", err)
   968  			f.Close()
   969  			return
   970  		}
   971  	}
   972  }
   973  
   974  func after(opts *benchOpts, data []stats.BenchResults) {
   975  	if opts.cpuProfile != "" {
   976  		pprof.StopCPUProfile() // flushes profile to disk
   977  	}
   978  	if opts.memProfile != "" {
   979  		f, err := os.Create(opts.memProfile)
   980  		if err != nil {
   981  			fmt.Fprintf(os.Stderr, "testing: %s\n", err)
   982  			os.Exit(2)
   983  		}
   984  		runtime.GC() // materialize all statistics
   985  		if err = pprof.WriteHeapProfile(f); err != nil {
   986  			fmt.Fprintf(os.Stderr, "testing: can't write heap profile %s: %s\n", opts.memProfile, err)
   987  			os.Exit(2)
   988  		}
   989  		f.Close()
   990  	}
   991  	if opts.benchmarkResultFile != "" {
   992  		f, err := os.Create(opts.benchmarkResultFile)
   993  		if err != nil {
   994  			log.Fatalf("testing: can't write benchmark result %s: %s\n", opts.benchmarkResultFile, err)
   995  		}
   996  		dataEncoder := gob.NewEncoder(f)
   997  		dataEncoder.Encode(data)
   998  		f.Close()
   999  	}
  1000  }
  1001  
  1002  // nopCompressor is a compressor that just copies data.
  1003  type nopCompressor struct{}
  1004  
  1005  func (nopCompressor) Do(w io.Writer, p []byte) error {
  1006  	n, err := w.Write(p)
  1007  	if err != nil {
  1008  		return err
  1009  	}
  1010  	if n != len(p) {
  1011  		return fmt.Errorf("nopCompressor.Write: wrote %d bytes; want %d", n, len(p))
  1012  	}
  1013  	return nil
  1014  }
  1015  
  1016  func (nopCompressor) Type() string { return compModeNop }
  1017  
  1018  // nopDecompressor is a decompressor that just copies data.
  1019  type nopDecompressor struct{}
  1020  
  1021  func (nopDecompressor) Do(r io.Reader) ([]byte, error) { return io.ReadAll(r) }
  1022  func (nopDecompressor) Type() string                   { return compModeNop }