google.golang.org/grpc@v1.62.1/test/end2end_test.go (about)

     1  /*
     2   *
     3   * Copyright 2014 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package test
    20  
    21  import (
    22  	"bufio"
    23  	"bytes"
    24  	"context"
    25  	"crypto/tls"
    26  	"encoding/json"
    27  	"errors"
    28  	"flag"
    29  	"fmt"
    30  	"io"
    31  	"math"
    32  	"net"
    33  	"net/http"
    34  	"os"
    35  	"reflect"
    36  	"runtime"
    37  	"strings"
    38  	"sync"
    39  	"sync/atomic"
    40  	"syscall"
    41  	"testing"
    42  	"time"
    43  
    44  	"golang.org/x/net/http2"
    45  	"golang.org/x/net/http2/hpack"
    46  	"google.golang.org/grpc"
    47  	"google.golang.org/grpc/balancer"
    48  	"google.golang.org/grpc/balancer/roundrobin"
    49  	"google.golang.org/grpc/codes"
    50  	"google.golang.org/grpc/connectivity"
    51  	"google.golang.org/grpc/credentials"
    52  	"google.golang.org/grpc/credentials/insecure"
    53  	"google.golang.org/grpc/health"
    54  	"google.golang.org/grpc/internal"
    55  	"google.golang.org/grpc/internal/binarylog"
    56  	"google.golang.org/grpc/internal/channelz"
    57  	"google.golang.org/grpc/internal/grpcsync"
    58  	"google.golang.org/grpc/internal/grpctest"
    59  	"google.golang.org/grpc/internal/stubserver"
    60  	"google.golang.org/grpc/internal/testutils"
    61  	"google.golang.org/grpc/internal/transport"
    62  	"google.golang.org/grpc/metadata"
    63  	"google.golang.org/grpc/peer"
    64  	"google.golang.org/grpc/resolver"
    65  	"google.golang.org/grpc/resolver/manual"
    66  	"google.golang.org/grpc/serviceconfig"
    67  	"google.golang.org/grpc/stats"
    68  	"google.golang.org/grpc/status"
    69  	"google.golang.org/grpc/tap"
    70  	"google.golang.org/grpc/test/bufconn"
    71  	"google.golang.org/grpc/testdata"
    72  
    73  	spb "google.golang.org/genproto/googleapis/rpc/status"
    74  	healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
    75  	healthpb "google.golang.org/grpc/health/grpc_health_v1"
    76  	testgrpc "google.golang.org/grpc/interop/grpc_testing"
    77  	testpb "google.golang.org/grpc/interop/grpc_testing"
    78  	"google.golang.org/protobuf/proto"
    79  	"google.golang.org/protobuf/types/known/anypb"
    80  
    81  	_ "google.golang.org/grpc/encoding/gzip"
    82  )
    83  
    84  const defaultHealthService = "grpc.health.v1.Health"
    85  
    86  func init() {
    87  	channelz.TurnOn()
    88  	balancer.Register(triggerRPCBlockPickerBalancerBuilder{})
    89  }
    90  
    91  type s struct {
    92  	grpctest.Tester
    93  }
    94  
    95  func Test(t *testing.T) {
    96  	grpctest.RunSubTests(t, s{})
    97  }
    98  
    99  var (
   100  	// For headers:
   101  	testMetadata = metadata.MD{
   102  		"key1":     []string{"value1"},
   103  		"key2":     []string{"value2"},
   104  		"key3-bin": []string{"binvalue1", string([]byte{1, 2, 3})},
   105  	}
   106  	testMetadata2 = metadata.MD{
   107  		"key1": []string{"value12"},
   108  		"key2": []string{"value22"},
   109  	}
   110  	// For trailers:
   111  	testTrailerMetadata = metadata.MD{
   112  		"tkey1":     []string{"trailerValue1"},
   113  		"tkey2":     []string{"trailerValue2"},
   114  		"tkey3-bin": []string{"trailerbinvalue1", string([]byte{3, 2, 1})},
   115  	}
   116  	testTrailerMetadata2 = metadata.MD{
   117  		"tkey1": []string{"trailerValue12"},
   118  		"tkey2": []string{"trailerValue22"},
   119  	}
   120  	// capital "Key" is illegal in HTTP/2.
   121  	malformedHTTP2Metadata = metadata.MD{
   122  		"Key": []string{"foo"},
   123  	}
   124  	testAppUA     = "myApp1/1.0 myApp2/0.9"
   125  	failAppUA     = "fail-this-RPC"
   126  	detailedError = status.ErrorProto(&spb.Status{
   127  		Code:    int32(codes.DataLoss),
   128  		Message: "error for testing: " + failAppUA,
   129  		Details: []*anypb.Any{{
   130  			TypeUrl: "url",
   131  			Value:   []byte{6, 0, 0, 6, 1, 3},
   132  		}},
   133  	})
   134  )
   135  
   136  var raceMode bool // set by race.go in race mode
   137  
   138  type testServer struct {
   139  	testgrpc.UnimplementedTestServiceServer
   140  
   141  	security           string // indicate the authentication protocol used by this server.
   142  	earlyFail          bool   // whether to error out the execution of a service handler prematurely.
   143  	setAndSendHeader   bool   // whether to call setHeader and sendHeader.
   144  	setHeaderOnly      bool   // whether to only call setHeader, not sendHeader.
   145  	multipleSetTrailer bool   // whether to call setTrailer multiple times.
   146  	unaryCallSleepTime time.Duration
   147  }
   148  
   149  func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
   150  	if md, ok := metadata.FromIncomingContext(ctx); ok {
   151  		// For testing purpose, returns an error if user-agent is failAppUA.
   152  		// To test that client gets the correct error.
   153  		if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) {
   154  			return nil, detailedError
   155  		}
   156  		var str []string
   157  		for _, entry := range md["user-agent"] {
   158  			str = append(str, "ua", entry)
   159  		}
   160  		grpc.SendHeader(ctx, metadata.Pairs(str...))
   161  	}
   162  	return new(testpb.Empty), nil
   163  }
   164  
   165  func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) {
   166  	if size < 0 {
   167  		return nil, fmt.Errorf("requested a response with invalid length %d", size)
   168  	}
   169  	body := make([]byte, size)
   170  	switch t {
   171  	case testpb.PayloadType_COMPRESSABLE:
   172  	default:
   173  		return nil, fmt.Errorf("unsupported payload type: %d", t)
   174  	}
   175  	return &testpb.Payload{
   176  		Type: t,
   177  		Body: body,
   178  	}, nil
   179  }
   180  
   181  func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
   182  	md, ok := metadata.FromIncomingContext(ctx)
   183  	if ok {
   184  		if _, exists := md[":authority"]; !exists {
   185  			return nil, status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md)
   186  		}
   187  		if s.setAndSendHeader {
   188  			if err := grpc.SetHeader(ctx, md); err != nil {
   189  				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", md, err)
   190  			}
   191  			if err := grpc.SendHeader(ctx, testMetadata2); err != nil {
   192  				return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", testMetadata2, err)
   193  			}
   194  		} else if s.setHeaderOnly {
   195  			if err := grpc.SetHeader(ctx, md); err != nil {
   196  				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", md, err)
   197  			}
   198  			if err := grpc.SetHeader(ctx, testMetadata2); err != nil {
   199  				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", testMetadata2, err)
   200  			}
   201  		} else {
   202  			if err := grpc.SendHeader(ctx, md); err != nil {
   203  				return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", md, err)
   204  			}
   205  		}
   206  		if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil {
   207  			return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata, err)
   208  		}
   209  		if s.multipleSetTrailer {
   210  			if err := grpc.SetTrailer(ctx, testTrailerMetadata2); err != nil {
   211  				return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata2, err)
   212  			}
   213  		}
   214  	}
   215  	pr, ok := peer.FromContext(ctx)
   216  	if !ok {
   217  		return nil, status.Error(codes.DataLoss, "failed to get peer from ctx")
   218  	}
   219  	if pr.Addr == net.Addr(nil) {
   220  		return nil, status.Error(codes.DataLoss, "failed to get peer address")
   221  	}
   222  	if s.security != "" {
   223  		// Check Auth info
   224  		var authType, serverName string
   225  		switch info := pr.AuthInfo.(type) {
   226  		case credentials.TLSInfo:
   227  			authType = info.AuthType()
   228  			serverName = info.State.ServerName
   229  		default:
   230  			return nil, status.Error(codes.Unauthenticated, "Unknown AuthInfo type")
   231  		}
   232  		if authType != s.security {
   233  			return nil, status.Errorf(codes.Unauthenticated, "Wrong auth type: got %q, want %q", authType, s.security)
   234  		}
   235  		if serverName != "x.test.example.com" {
   236  			return nil, status.Errorf(codes.Unauthenticated, "Unknown server name %q", serverName)
   237  		}
   238  	}
   239  	// Simulate some service delay.
   240  	time.Sleep(s.unaryCallSleepTime)
   241  
   242  	payload, err := newPayload(in.GetResponseType(), in.GetResponseSize())
   243  	if err != nil {
   244  		return nil, err
   245  	}
   246  
   247  	return &testpb.SimpleResponse{
   248  		Payload: payload,
   249  	}, nil
   250  }
   251  
   252  func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testgrpc.TestService_StreamingOutputCallServer) error {
   253  	if md, ok := metadata.FromIncomingContext(stream.Context()); ok {
   254  		if _, exists := md[":authority"]; !exists {
   255  			return status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md)
   256  		}
   257  		// For testing purpose, returns an error if user-agent is failAppUA.
   258  		// To test that client gets the correct error.
   259  		if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) {
   260  			return status.Error(codes.DataLoss, "error for testing: "+failAppUA)
   261  		}
   262  	}
   263  	cs := args.GetResponseParameters()
   264  	for _, c := range cs {
   265  		if us := c.GetIntervalUs(); us > 0 {
   266  			time.Sleep(time.Duration(us) * time.Microsecond)
   267  		}
   268  
   269  		payload, err := newPayload(args.GetResponseType(), c.GetSize())
   270  		if err != nil {
   271  			return err
   272  		}
   273  
   274  		if err := stream.Send(&testpb.StreamingOutputCallResponse{
   275  			Payload: payload,
   276  		}); err != nil {
   277  			return err
   278  		}
   279  	}
   280  	return nil
   281  }
   282  
   283  func (s *testServer) StreamingInputCall(stream testgrpc.TestService_StreamingInputCallServer) error {
   284  	var sum int
   285  	for {
   286  		in, err := stream.Recv()
   287  		if err == io.EOF {
   288  			return stream.SendAndClose(&testpb.StreamingInputCallResponse{
   289  				AggregatedPayloadSize: int32(sum),
   290  			})
   291  		}
   292  		if err != nil {
   293  			return err
   294  		}
   295  		p := in.GetPayload().GetBody()
   296  		sum += len(p)
   297  		if s.earlyFail {
   298  			return status.Error(codes.NotFound, "not found")
   299  		}
   300  	}
   301  }
   302  
   303  func (s *testServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error {
   304  	md, ok := metadata.FromIncomingContext(stream.Context())
   305  	if ok {
   306  		if s.setAndSendHeader {
   307  			if err := stream.SetHeader(md); err != nil {
   308  				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, md, err)
   309  			}
   310  			if err := stream.SendHeader(testMetadata2); err != nil {
   311  				return status.Errorf(status.Code(err), "%v.SendHeader(_, %v) = %v, want <nil>", stream, testMetadata2, err)
   312  			}
   313  		} else if s.setHeaderOnly {
   314  			if err := stream.SetHeader(md); err != nil {
   315  				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, md, err)
   316  			}
   317  			if err := stream.SetHeader(testMetadata2); err != nil {
   318  				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, testMetadata2, err)
   319  			}
   320  		} else {
   321  			if err := stream.SendHeader(md); err != nil {
   322  				return status.Errorf(status.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil)
   323  			}
   324  		}
   325  		stream.SetTrailer(testTrailerMetadata)
   326  		if s.multipleSetTrailer {
   327  			stream.SetTrailer(testTrailerMetadata2)
   328  		}
   329  	}
   330  	for {
   331  		in, err := stream.Recv()
   332  		if err == io.EOF {
   333  			// read done.
   334  			return nil
   335  		}
   336  		if err != nil {
   337  			// to facilitate testSvrWriteStatusEarlyWrite
   338  			if status.Code(err) == codes.ResourceExhausted {
   339  				return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error())
   340  			}
   341  			return err
   342  		}
   343  		cs := in.GetResponseParameters()
   344  		for _, c := range cs {
   345  			if us := c.GetIntervalUs(); us > 0 {
   346  				time.Sleep(time.Duration(us) * time.Microsecond)
   347  			}
   348  
   349  			payload, err := newPayload(in.GetResponseType(), c.GetSize())
   350  			if err != nil {
   351  				return err
   352  			}
   353  
   354  			if err := stream.Send(&testpb.StreamingOutputCallResponse{
   355  				Payload: payload,
   356  			}); err != nil {
   357  				// to facilitate testSvrWriteStatusEarlyWrite
   358  				if status.Code(err) == codes.ResourceExhausted {
   359  					return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error())
   360  				}
   361  				return err
   362  			}
   363  		}
   364  	}
   365  }
   366  
   367  func (s *testServer) HalfDuplexCall(stream testgrpc.TestService_HalfDuplexCallServer) error {
   368  	var msgBuf []*testpb.StreamingOutputCallRequest
   369  	for {
   370  		in, err := stream.Recv()
   371  		if err == io.EOF {
   372  			// read done.
   373  			break
   374  		}
   375  		if err != nil {
   376  			return err
   377  		}
   378  		msgBuf = append(msgBuf, in)
   379  	}
   380  	for _, m := range msgBuf {
   381  		cs := m.GetResponseParameters()
   382  		for _, c := range cs {
   383  			if us := c.GetIntervalUs(); us > 0 {
   384  				time.Sleep(time.Duration(us) * time.Microsecond)
   385  			}
   386  
   387  			payload, err := newPayload(m.GetResponseType(), c.GetSize())
   388  			if err != nil {
   389  				return err
   390  			}
   391  
   392  			if err := stream.Send(&testpb.StreamingOutputCallResponse{
   393  				Payload: payload,
   394  			}); err != nil {
   395  				return err
   396  			}
   397  		}
   398  	}
   399  	return nil
   400  }
   401  
   402  type env struct {
   403  	name         string
   404  	network      string // The type of network such as tcp, unix, etc.
   405  	security     string // The security protocol such as TLS, SSH, etc.
   406  	httpHandler  bool   // whether to use the http.Handler ServerTransport; requires TLS
   407  	balancer     string // One of "round_robin", "pick_first", or "".
   408  	customDialer func(string, string, time.Duration) (net.Conn, error)
   409  }
   410  
   411  func (e env) runnable() bool {
   412  	if runtime.GOOS == "windows" && e.network == "unix" {
   413  		return false
   414  	}
   415  	return true
   416  }
   417  
   418  func (e env) dialer(addr string, timeout time.Duration) (net.Conn, error) {
   419  	if e.customDialer != nil {
   420  		return e.customDialer(e.network, addr, timeout)
   421  	}
   422  	return net.DialTimeout(e.network, addr, timeout)
   423  }
   424  
   425  var (
   426  	tcpClearEnv   = env{name: "tcp-clear-v1-balancer", network: "tcp"}
   427  	tcpTLSEnv     = env{name: "tcp-tls-v1-balancer", network: "tcp", security: "tls"}
   428  	tcpClearRREnv = env{name: "tcp-clear", network: "tcp", balancer: "round_robin"}
   429  	tcpTLSRREnv   = env{name: "tcp-tls", network: "tcp", security: "tls", balancer: "round_robin"}
   430  	handlerEnv    = env{name: "handler-tls", network: "tcp", security: "tls", httpHandler: true, balancer: "round_robin"}
   431  	noBalancerEnv = env{name: "no-balancer", network: "tcp", security: "tls"}
   432  	allEnv        = []env{tcpClearEnv, tcpTLSEnv, tcpClearRREnv, tcpTLSRREnv, handlerEnv, noBalancerEnv}
   433  )
   434  
   435  var onlyEnv = flag.String("only_env", "", "If non-empty, one of 'tcp-clear', 'tcp-tls', 'unix-clear', 'unix-tls', or 'handler-tls' to only run the tests for that environment. Empty means all.")
   436  
   437  func listTestEnv() (envs []env) {
   438  	if *onlyEnv != "" {
   439  		for _, e := range allEnv {
   440  			if e.name == *onlyEnv {
   441  				if !e.runnable() {
   442  					panic(fmt.Sprintf("--only_env environment %q does not run on %s", *onlyEnv, runtime.GOOS))
   443  				}
   444  				return []env{e}
   445  			}
   446  		}
   447  		panic(fmt.Sprintf("invalid --only_env value %q", *onlyEnv))
   448  	}
   449  	for _, e := range allEnv {
   450  		if e.runnable() {
   451  			envs = append(envs, e)
   452  		}
   453  	}
   454  	return envs
   455  }
   456  
   457  // test is an end-to-end test. It should be created with the newTest
   458  // func, modified as needed, and then started with its startServer method.
   459  // It should be cleaned up with the tearDown method.
   460  type test struct {
   461  	// The following are setup in newTest().
   462  	t      *testing.T
   463  	e      env
   464  	ctx    context.Context // valid for life of test, before tearDown
   465  	cancel context.CancelFunc
   466  
   467  	// The following knobs are for the server-side, and should be set after
   468  	// calling newTest() and before calling startServer().
   469  
   470  	// whether or not to expose the server's health via the default health
   471  	// service implementation.
   472  	enableHealthServer bool
   473  	// In almost all cases, one should set the 'enableHealthServer' flag above to
   474  	// expose the server's health using the default health service
   475  	// implementation. This should only be used when a non-default health service
   476  	// implementation is required.
   477  	healthServer            healthgrpc.HealthServer
   478  	maxStream               uint32
   479  	tapHandle               tap.ServerInHandle
   480  	maxServerMsgSize        *int
   481  	maxServerReceiveMsgSize *int
   482  	maxServerSendMsgSize    *int
   483  	maxServerHeaderListSize *uint32
   484  	// Used to test the deprecated API WithCompressor and WithDecompressor.
   485  	serverCompression           bool
   486  	unknownHandler              grpc.StreamHandler
   487  	unaryServerInt              grpc.UnaryServerInterceptor
   488  	streamServerInt             grpc.StreamServerInterceptor
   489  	serverInitialWindowSize     int32
   490  	serverInitialConnWindowSize int32
   491  	customServerOptions         []grpc.ServerOption
   492  
   493  	// The following knobs are for the client-side, and should be set after
   494  	// calling newTest() and before calling clientConn().
   495  	maxClientMsgSize        *int
   496  	maxClientReceiveMsgSize *int
   497  	maxClientSendMsgSize    *int
   498  	maxClientHeaderListSize *uint32
   499  	userAgent               string
   500  	// Used to test the deprecated API WithCompressor and WithDecompressor.
   501  	clientCompression bool
   502  	// Used to test the new compressor registration API UseCompressor.
   503  	clientUseCompression bool
   504  	// clientNopCompression is set to create a compressor whose type is not supported.
   505  	clientNopCompression        bool
   506  	unaryClientInt              grpc.UnaryClientInterceptor
   507  	streamClientInt             grpc.StreamClientInterceptor
   508  	clientInitialWindowSize     int32
   509  	clientInitialConnWindowSize int32
   510  	perRPCCreds                 credentials.PerRPCCredentials
   511  	customDialOptions           []grpc.DialOption
   512  	resolverScheme              string
   513  
   514  	// These are are set once startServer is called. The common case is to have
   515  	// only one testServer.
   516  	srv     stopper
   517  	hSrv    healthgrpc.HealthServer
   518  	srvAddr string
   519  
   520  	// These are are set once startServers is called.
   521  	srvs     []stopper
   522  	hSrvs    []healthgrpc.HealthServer
   523  	srvAddrs []string
   524  
   525  	cc          *grpc.ClientConn // nil until requested via clientConn
   526  	restoreLogs func()           // nil unless declareLogNoise is used
   527  }
   528  
   529  type stopper interface {
   530  	Stop()
   531  	GracefulStop()
   532  }
   533  
   534  func (te *test) tearDown() {
   535  	if te.cancel != nil {
   536  		te.cancel()
   537  		te.cancel = nil
   538  	}
   539  
   540  	if te.cc != nil {
   541  		te.cc.Close()
   542  		te.cc = nil
   543  	}
   544  
   545  	if te.restoreLogs != nil {
   546  		te.restoreLogs()
   547  		te.restoreLogs = nil
   548  	}
   549  
   550  	if te.srv != nil {
   551  		te.srv.Stop()
   552  	}
   553  	for _, s := range te.srvs {
   554  		s.Stop()
   555  	}
   556  }
   557  
   558  // newTest returns a new test using the provided testing.T and
   559  // environment.  It is returned with default values. Tests should
   560  // modify it before calling its startServer and clientConn methods.
   561  func newTest(t *testing.T, e env) *test {
   562  	te := &test{
   563  		t:         t,
   564  		e:         e,
   565  		maxStream: math.MaxUint32,
   566  	}
   567  	te.ctx, te.cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
   568  	return te
   569  }
   570  
   571  func (te *test) listenAndServe(ts testgrpc.TestServiceServer, listen func(network, address string) (net.Listener, error)) net.Listener {
   572  	te.t.Helper()
   573  	te.t.Logf("Running test in %s environment...", te.e.name)
   574  	sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)}
   575  	if te.maxServerMsgSize != nil {
   576  		sopts = append(sopts, grpc.MaxMsgSize(*te.maxServerMsgSize))
   577  	}
   578  	if te.maxServerReceiveMsgSize != nil {
   579  		sopts = append(sopts, grpc.MaxRecvMsgSize(*te.maxServerReceiveMsgSize))
   580  	}
   581  	if te.maxServerSendMsgSize != nil {
   582  		sopts = append(sopts, grpc.MaxSendMsgSize(*te.maxServerSendMsgSize))
   583  	}
   584  	if te.maxServerHeaderListSize != nil {
   585  		sopts = append(sopts, grpc.MaxHeaderListSize(*te.maxServerHeaderListSize))
   586  	}
   587  	if te.tapHandle != nil {
   588  		sopts = append(sopts, grpc.InTapHandle(te.tapHandle))
   589  	}
   590  	if te.serverCompression {
   591  		sopts = append(sopts,
   592  			grpc.RPCCompressor(grpc.NewGZIPCompressor()),
   593  			grpc.RPCDecompressor(grpc.NewGZIPDecompressor()),
   594  		)
   595  	}
   596  	if te.unaryServerInt != nil {
   597  		sopts = append(sopts, grpc.UnaryInterceptor(te.unaryServerInt))
   598  	}
   599  	if te.streamServerInt != nil {
   600  		sopts = append(sopts, grpc.StreamInterceptor(te.streamServerInt))
   601  	}
   602  	if te.unknownHandler != nil {
   603  		sopts = append(sopts, grpc.UnknownServiceHandler(te.unknownHandler))
   604  	}
   605  	if te.serverInitialWindowSize > 0 {
   606  		sopts = append(sopts, grpc.InitialWindowSize(te.serverInitialWindowSize))
   607  	}
   608  	if te.serverInitialConnWindowSize > 0 {
   609  		sopts = append(sopts, grpc.InitialConnWindowSize(te.serverInitialConnWindowSize))
   610  	}
   611  	la := "localhost:0"
   612  	switch te.e.network {
   613  	case "unix":
   614  		la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now().UnixNano())
   615  		syscall.Unlink(la)
   616  	}
   617  	lis, err := listen(te.e.network, la)
   618  	if err != nil {
   619  		te.t.Fatalf("Failed to listen: %v", err)
   620  	}
   621  	if te.e.security == "tls" {
   622  		creds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem"))
   623  		if err != nil {
   624  			te.t.Fatalf("Failed to generate credentials %v", err)
   625  		}
   626  		sopts = append(sopts, grpc.Creds(creds))
   627  	}
   628  	sopts = append(sopts, te.customServerOptions...)
   629  	s := grpc.NewServer(sopts...)
   630  	if ts != nil {
   631  		testgrpc.RegisterTestServiceServer(s, ts)
   632  	}
   633  
   634  	// Create a new default health server if enableHealthServer is set, or use
   635  	// the provided one.
   636  	hs := te.healthServer
   637  	if te.enableHealthServer {
   638  		hs = health.NewServer()
   639  	}
   640  	if hs != nil {
   641  		healthgrpc.RegisterHealthServer(s, hs)
   642  	}
   643  
   644  	addr := la
   645  	switch te.e.network {
   646  	case "unix":
   647  	default:
   648  		_, port, err := net.SplitHostPort(lis.Addr().String())
   649  		if err != nil {
   650  			te.t.Fatalf("Failed to parse listener address: %v", err)
   651  		}
   652  		addr = "localhost:" + port
   653  	}
   654  
   655  	te.srv = s
   656  	te.hSrv = hs
   657  	te.srvAddr = addr
   658  
   659  	if te.e.httpHandler {
   660  		if te.e.security != "tls" {
   661  			te.t.Fatalf("unsupported environment settings")
   662  		}
   663  		cert, err := tls.LoadX509KeyPair(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem"))
   664  		if err != nil {
   665  			te.t.Fatal("tls.LoadX509KeyPair(server1.pem, server1.key) failed: ", err)
   666  		}
   667  		hs := &http.Server{
   668  			Handler:   s,
   669  			TLSConfig: &tls.Config{Certificates: []tls.Certificate{cert}},
   670  		}
   671  		if err := http2.ConfigureServer(hs, &http2.Server{MaxConcurrentStreams: te.maxStream}); err != nil {
   672  			te.t.Fatal("http2.ConfigureServer(_, _) failed: ", err)
   673  		}
   674  		te.srv = wrapHS{hs}
   675  		tlsListener := tls.NewListener(lis, hs.TLSConfig)
   676  		go hs.Serve(tlsListener)
   677  		return lis
   678  	}
   679  
   680  	go s.Serve(lis)
   681  	return lis
   682  }
   683  
   684  type wrapHS struct {
   685  	s *http.Server
   686  }
   687  
   688  func (w wrapHS) GracefulStop() {
   689  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   690  	defer cancel()
   691  	w.s.Shutdown(ctx)
   692  }
   693  
   694  func (w wrapHS) Stop() {
   695  	w.s.Close()
   696  	w.s.Handler.(*grpc.Server).Stop()
   697  }
   698  
   699  func (te *test) startServerWithConnControl(ts testgrpc.TestServiceServer) *listenerWrapper {
   700  	l := te.listenAndServe(ts, listenWithConnControl)
   701  	return l.(*listenerWrapper)
   702  }
   703  
   704  // startServer starts a gRPC server exposing the provided TestService
   705  // implementation. Callers should defer a call to te.tearDown to clean up
   706  func (te *test) startServer(ts testgrpc.TestServiceServer) {
   707  	te.t.Helper()
   708  	te.listenAndServe(ts, net.Listen)
   709  }
   710  
   711  // startServers starts 'num' gRPC servers exposing the provided TestService.
   712  func (te *test) startServers(ts testgrpc.TestServiceServer, num int) {
   713  	for i := 0; i < num; i++ {
   714  		te.startServer(ts)
   715  		te.srvs = append(te.srvs, te.srv.(*grpc.Server))
   716  		te.hSrvs = append(te.hSrvs, te.hSrv)
   717  		te.srvAddrs = append(te.srvAddrs, te.srvAddr)
   718  		te.srv = nil
   719  		te.hSrv = nil
   720  		te.srvAddr = ""
   721  	}
   722  }
   723  
   724  // setHealthServingStatus is a helper function to set the health status.
   725  func (te *test) setHealthServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) {
   726  	hs, ok := te.hSrv.(*health.Server)
   727  	if !ok {
   728  		panic(fmt.Sprintf("SetServingStatus(%v, %v) called for health server of type %T", service, status, hs))
   729  	}
   730  	hs.SetServingStatus(service, status)
   731  }
   732  
   733  type nopCompressor struct {
   734  	grpc.Compressor
   735  }
   736  
   737  // newNopCompressor creates a compressor to test the case that type is not supported.
   738  func newNopCompressor() grpc.Compressor {
   739  	return &nopCompressor{grpc.NewGZIPCompressor()}
   740  }
   741  
   742  func (c *nopCompressor) Type() string {
   743  	return "nop"
   744  }
   745  
   746  type nopDecompressor struct {
   747  	grpc.Decompressor
   748  }
   749  
   750  // newNopDecompressor creates a decompressor to test the case that type is not supported.
   751  func newNopDecompressor() grpc.Decompressor {
   752  	return &nopDecompressor{grpc.NewGZIPDecompressor()}
   753  }
   754  
   755  func (d *nopDecompressor) Type() string {
   756  	return "nop"
   757  }
   758  
   759  func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string) {
   760  	opts = append(opts, grpc.WithDialer(te.e.dialer), grpc.WithUserAgent(te.userAgent))
   761  
   762  	if te.clientCompression {
   763  		opts = append(opts,
   764  			grpc.WithCompressor(grpc.NewGZIPCompressor()),
   765  			grpc.WithDecompressor(grpc.NewGZIPDecompressor()),
   766  		)
   767  	}
   768  	if te.clientUseCompression {
   769  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor("gzip")))
   770  	}
   771  	if te.clientNopCompression {
   772  		opts = append(opts,
   773  			grpc.WithCompressor(newNopCompressor()),
   774  			grpc.WithDecompressor(newNopDecompressor()),
   775  		)
   776  	}
   777  	if te.unaryClientInt != nil {
   778  		opts = append(opts, grpc.WithUnaryInterceptor(te.unaryClientInt))
   779  	}
   780  	if te.streamClientInt != nil {
   781  		opts = append(opts, grpc.WithStreamInterceptor(te.streamClientInt))
   782  	}
   783  	if te.maxClientMsgSize != nil {
   784  		opts = append(opts, grpc.WithMaxMsgSize(*te.maxClientMsgSize))
   785  	}
   786  	if te.maxClientReceiveMsgSize != nil {
   787  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(*te.maxClientReceiveMsgSize)))
   788  	}
   789  	if te.maxClientSendMsgSize != nil {
   790  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(*te.maxClientSendMsgSize)))
   791  	}
   792  	if te.maxClientHeaderListSize != nil {
   793  		opts = append(opts, grpc.WithMaxHeaderListSize(*te.maxClientHeaderListSize))
   794  	}
   795  	switch te.e.security {
   796  	case "tls":
   797  		creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "x.test.example.com")
   798  		if err != nil {
   799  			te.t.Fatalf("Failed to load credentials: %v", err)
   800  		}
   801  		opts = append(opts, grpc.WithTransportCredentials(creds))
   802  	case "empty":
   803  		// Don't add any transport creds option.
   804  	default:
   805  		opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
   806  	}
   807  	// TODO(bar) switch balancer case "pick_first".
   808  	var scheme string
   809  	if te.resolverScheme == "" {
   810  		scheme = "passthrough:///"
   811  	} else {
   812  		scheme = te.resolverScheme + ":///"
   813  	}
   814  	if te.e.balancer != "" {
   815  		opts = append(opts, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, te.e.balancer)))
   816  	}
   817  	if te.clientInitialWindowSize > 0 {
   818  		opts = append(opts, grpc.WithInitialWindowSize(te.clientInitialWindowSize))
   819  	}
   820  	if te.clientInitialConnWindowSize > 0 {
   821  		opts = append(opts, grpc.WithInitialConnWindowSize(te.clientInitialConnWindowSize))
   822  	}
   823  	if te.perRPCCreds != nil {
   824  		opts = append(opts, grpc.WithPerRPCCredentials(te.perRPCCreds))
   825  	}
   826  	if te.srvAddr == "" {
   827  		te.srvAddr = "client.side.only.test"
   828  	}
   829  	opts = append(opts, te.customDialOptions...)
   830  	return opts, scheme
   831  }
   832  
   833  func (te *test) clientConnWithConnControl() (*grpc.ClientConn, *dialerWrapper) {
   834  	if te.cc != nil {
   835  		return te.cc, nil
   836  	}
   837  	opts, scheme := te.configDial()
   838  	dw := &dialerWrapper{}
   839  	// overwrite the dialer before
   840  	opts = append(opts, grpc.WithDialer(dw.dialer))
   841  	var err error
   842  	te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...)
   843  	if err != nil {
   844  		te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err)
   845  	}
   846  	return te.cc, dw
   847  }
   848  
   849  func (te *test) clientConn(opts ...grpc.DialOption) *grpc.ClientConn {
   850  	if te.cc != nil {
   851  		return te.cc
   852  	}
   853  	var scheme string
   854  	opts, scheme = te.configDial(opts...)
   855  	var err error
   856  	te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...)
   857  	if err != nil {
   858  		te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err)
   859  	}
   860  	return te.cc
   861  }
   862  
   863  func (te *test) declareLogNoise(phrases ...string) {
   864  	te.restoreLogs = declareLogNoise(te.t, phrases...)
   865  }
   866  
   867  func (te *test) withServerTester(fn func(st *serverTester)) {
   868  	c, err := te.e.dialer(te.srvAddr, 10*time.Second)
   869  	if err != nil {
   870  		te.t.Fatal(err)
   871  	}
   872  	defer c.Close()
   873  	if te.e.security == "tls" {
   874  		c = tls.Client(c, &tls.Config{
   875  			InsecureSkipVerify: true,
   876  			NextProtos:         []string{http2.NextProtoTLS},
   877  		})
   878  	}
   879  	st := newServerTesterFromConn(te.t, c)
   880  	st.greet()
   881  	fn(st)
   882  }
   883  
   884  type lazyConn struct {
   885  	net.Conn
   886  	beLazy int32
   887  }
   888  
   889  // possible conn closed errors.
   890  const possibleConnResetMsg = "connection reset by peer"
   891  const possibleEOFMsg = "error reading from server: EOF"
   892  
   893  // isConnClosedErr checks the error msg for possible conn closed messages. There
   894  // is a raceyness in the timing of when TCP packets are sent from client to
   895  // server, and when we tell the server to stop, so we need to check for both of
   896  // these possible error messages:
   897  //  1. If the call to ss.S.Stop() causes the server's sockets to close while
   898  //     there's still in-fight data from the client on the TCP connection, then
   899  //     the kernel can send an RST back to the client (also see
   900  //     https://stackoverflow.com/questions/33053507/econnreset-in-send-linux-c).
   901  //     Note that while this condition is expected to be rare due to the
   902  //     test httpServer start synchronization, in theory it should be possible,
   903  //     e.g. if the client sends a BDP ping at the right time.
   904  //  2. If, for example, the call to ss.S.Stop() happens after the RPC headers
   905  //     have been received at the server, then the TCP connection can shutdown
   906  //     gracefully when the server's socket closes.
   907  //  3. If there is an actual io.EOF received because the client stopped the stream.
   908  func isConnClosedErr(err error) bool {
   909  	errContainsConnResetMsg := strings.Contains(err.Error(), possibleConnResetMsg)
   910  	errContainsEOFMsg := strings.Contains(err.Error(), possibleEOFMsg)
   911  
   912  	return errContainsConnResetMsg || errContainsEOFMsg || err == io.EOF
   913  }
   914  
   915  func (l *lazyConn) Write(b []byte) (int, error) {
   916  	if atomic.LoadInt32(&(l.beLazy)) == 1 {
   917  		time.Sleep(time.Second)
   918  	}
   919  	return l.Conn.Write(b)
   920  }
   921  
   922  func (s) TestContextDeadlineNotIgnored(t *testing.T) {
   923  	e := noBalancerEnv
   924  	var lc *lazyConn
   925  	e.customDialer = func(network, addr string, timeout time.Duration) (net.Conn, error) {
   926  		conn, err := net.DialTimeout(network, addr, timeout)
   927  		if err != nil {
   928  			return nil, err
   929  		}
   930  		lc = &lazyConn{Conn: conn}
   931  		return lc, nil
   932  	}
   933  
   934  	te := newTest(t, e)
   935  	te.startServer(&testServer{security: e.security})
   936  	defer te.tearDown()
   937  
   938  	cc := te.clientConn()
   939  	tc := testgrpc.NewTestServiceClient(cc)
   940  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   941  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
   942  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
   943  	}
   944  	cancel()
   945  	atomic.StoreInt32(&(lc.beLazy), 1)
   946  	ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout)
   947  	defer cancel()
   948  	t1 := time.Now()
   949  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
   950  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, context.DeadlineExceeded", err)
   951  	}
   952  	if time.Since(t1) > 2*time.Second {
   953  		t.Fatalf("TestService/EmptyCall(_, _) ran over the deadline")
   954  	}
   955  }
   956  
   957  func (s) TestTimeoutOnDeadServer(t *testing.T) {
   958  	for _, e := range listTestEnv() {
   959  		testTimeoutOnDeadServer(t, e)
   960  	}
   961  }
   962  
   963  func testTimeoutOnDeadServer(t *testing.T, e env) {
   964  	te := newTest(t, e)
   965  	te.userAgent = testAppUA
   966  	te.startServer(&testServer{security: e.security})
   967  	defer te.tearDown()
   968  
   969  	cc := te.clientConn()
   970  	tc := testgrpc.NewTestServiceClient(cc)
   971  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   972  	defer cancel()
   973  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
   974  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
   975  	}
   976  	// Wait for the client to report READY, stop the server, then wait for the
   977  	// client to notice the connection is gone.
   978  	testutils.AwaitState(ctx, t, cc, connectivity.Ready)
   979  	te.srv.Stop()
   980  	testutils.AwaitNotState(ctx, t, cc, connectivity.Ready)
   981  	ctx, cancel = context.WithTimeout(ctx, defaultTestShortTimeout)
   982  	defer cancel()
   983  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
   984  		t.Fatalf("TestService/EmptyCall(%v, _) = _, %v, want _, error code: %s", ctx, err, codes.DeadlineExceeded)
   985  	}
   986  	awaitNewConnLogOutput()
   987  }
   988  
   989  func (s) TestServerGracefulStopIdempotent(t *testing.T) {
   990  	for _, e := range listTestEnv() {
   991  		if e.name == "handler-tls" {
   992  			continue
   993  		}
   994  		testServerGracefulStopIdempotent(t, e)
   995  	}
   996  }
   997  
   998  func testServerGracefulStopIdempotent(t *testing.T, e env) {
   999  	te := newTest(t, e)
  1000  	te.userAgent = testAppUA
  1001  	te.startServer(&testServer{security: e.security})
  1002  	defer te.tearDown()
  1003  
  1004  	for i := 0; i < 3; i++ {
  1005  		te.srv.GracefulStop()
  1006  	}
  1007  }
  1008  
  1009  func (s) TestDetailedConnectionCloseErrorPropagatesToRpcError(t *testing.T) {
  1010  	rpcStartedOnServer := make(chan struct{})
  1011  	rpcDoneOnClient := make(chan struct{})
  1012  	ss := &stubserver.StubServer{
  1013  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  1014  			close(rpcStartedOnServer)
  1015  			<-rpcDoneOnClient
  1016  			return status.Error(codes.Internal, "arbitrary status")
  1017  		},
  1018  	}
  1019  	if err := ss.Start(nil); err != nil {
  1020  		t.Fatalf("Error starting endpoint server: %v", err)
  1021  	}
  1022  	defer ss.Stop()
  1023  
  1024  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1025  	defer cancel()
  1026  
  1027  	// Start an RPC. Then, while the RPC is still being accepted or handled at the server, abruptly
  1028  	// stop the server, killing the connection. The RPC error message should include details about the specific
  1029  	// connection error that was encountered.
  1030  	stream, err := ss.Client.FullDuplexCall(ctx)
  1031  	if err != nil {
  1032  		t.Fatalf("%v.FullDuplexCall = _, %v, want _, <nil>", ss.Client, err)
  1033  	}
  1034  	// Block until the RPC has been started on the server. This ensures that the ClientConn will find a healthy
  1035  	// connection for the RPC to go out on initially, and that the TCP connection will shut down strictly after
  1036  	// the RPC has been started on it.
  1037  	<-rpcStartedOnServer
  1038  	ss.S.Stop()
  1039  	// The precise behavior of this test is subject to raceyness around the timing
  1040  	// of when TCP packets are sent from client to server, and when we tell the
  1041  	// server to stop, so we need to account for both possible error messages.
  1042  	if _, err := stream.Recv(); err == io.EOF || !isConnClosedErr(err) {
  1043  		t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q OR %q", stream, err, possibleConnResetMsg, possibleEOFMsg)
  1044  	}
  1045  	close(rpcDoneOnClient)
  1046  }
  1047  
  1048  func (s) TestFailFast(t *testing.T) {
  1049  	for _, e := range listTestEnv() {
  1050  		testFailFast(t, e)
  1051  	}
  1052  }
  1053  
  1054  func testFailFast(t *testing.T, e env) {
  1055  	te := newTest(t, e)
  1056  	te.userAgent = testAppUA
  1057  	te.startServer(&testServer{security: e.security})
  1058  	defer te.tearDown()
  1059  
  1060  	cc := te.clientConn()
  1061  	tc := testgrpc.NewTestServiceClient(cc)
  1062  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1063  	defer cancel()
  1064  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  1065  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  1066  	}
  1067  	// Stop the server and tear down all the existing connections.
  1068  	te.srv.Stop()
  1069  	// Loop until the server teardown is propagated to the client.
  1070  	for {
  1071  		if err := ctx.Err(); err != nil {
  1072  			t.Fatalf("EmptyCall did not return UNAVAILABLE before timeout")
  1073  		}
  1074  		_, err := tc.EmptyCall(ctx, &testpb.Empty{})
  1075  		if status.Code(err) == codes.Unavailable {
  1076  			break
  1077  		}
  1078  		t.Logf("%v.EmptyCall(_, _) = _, %v", tc, err)
  1079  		time.Sleep(10 * time.Millisecond)
  1080  	}
  1081  	// The client keeps reconnecting and ongoing fail-fast RPCs should fail with code.Unavailable.
  1082  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
  1083  		t.Fatalf("TestService/EmptyCall(_, _, _) = _, %v, want _, error code: %s", err, codes.Unavailable)
  1084  	}
  1085  	if _, err := tc.StreamingInputCall(ctx); status.Code(err) != codes.Unavailable {
  1086  		t.Fatalf("TestService/StreamingInputCall(_) = _, %v, want _, error code: %s", err, codes.Unavailable)
  1087  	}
  1088  
  1089  	awaitNewConnLogOutput()
  1090  }
  1091  
  1092  func testServiceConfigSetup(t *testing.T, e env) *test {
  1093  	te := newTest(t, e)
  1094  	te.userAgent = testAppUA
  1095  	te.declareLogNoise(
  1096  		"Failed to dial : context canceled; please retry.",
  1097  	)
  1098  	return te
  1099  }
  1100  
  1101  func newInt(b int) (a *int) {
  1102  	return &b
  1103  }
  1104  
  1105  func (s) TestGetMethodConfig(t *testing.T) {
  1106  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1107  	defer te.tearDown()
  1108  	r := manual.NewBuilderWithScheme("whatever")
  1109  
  1110  	te.resolverScheme = r.Scheme()
  1111  	cc := te.clientConn(grpc.WithResolvers(r))
  1112  	addrs := []resolver.Address{{Addr: te.srvAddr}}
  1113  	r.UpdateState(resolver.State{
  1114  		Addresses: addrs,
  1115  		ServiceConfig: parseServiceConfig(t, r, `{
  1116      "methodConfig": [
  1117          {
  1118              "name": [
  1119                  {
  1120                      "service": "grpc.testing.TestService",
  1121                      "method": "EmptyCall"
  1122                  }
  1123              ],
  1124              "waitForReady": true,
  1125              "timeout": ".001s"
  1126          },
  1127          {
  1128              "name": [
  1129                  {
  1130                      "service": "grpc.testing.TestService"
  1131                  }
  1132              ],
  1133              "waitForReady": false
  1134          }
  1135      ]
  1136  }`)})
  1137  
  1138  	tc := testgrpc.NewTestServiceClient(cc)
  1139  
  1140  	// Make sure service config has been processed by grpc.
  1141  	for {
  1142  		if cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil {
  1143  			break
  1144  		}
  1145  		time.Sleep(time.Millisecond)
  1146  	}
  1147  
  1148  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1149  	defer cancel()
  1150  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  1151  	var err error
  1152  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  1153  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1154  	}
  1155  
  1156  	r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseServiceConfig(t, r, `{
  1157      "methodConfig": [
  1158          {
  1159              "name": [
  1160                  {
  1161                      "service": "grpc.testing.TestService",
  1162                      "method": "UnaryCall"
  1163                  }
  1164              ],
  1165              "waitForReady": true,
  1166              "timeout": ".001s"
  1167          },
  1168          {
  1169              "name": [
  1170                  {
  1171                      "service": "grpc.testing.TestService"
  1172                  }
  1173              ],
  1174              "waitForReady": false
  1175          }
  1176      ]
  1177  }`)})
  1178  
  1179  	// Make sure service config has been processed by grpc.
  1180  	for {
  1181  		if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && !*mc.WaitForReady {
  1182  			break
  1183  		}
  1184  		time.Sleep(time.Millisecond)
  1185  	}
  1186  	// The following RPCs are expected to become fail-fast.
  1187  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
  1188  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable)
  1189  	}
  1190  }
  1191  
  1192  func (s) TestServiceConfigWaitForReady(t *testing.T) {
  1193  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1194  	defer te.tearDown()
  1195  	r := manual.NewBuilderWithScheme("whatever")
  1196  
  1197  	// Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds.
  1198  	te.resolverScheme = r.Scheme()
  1199  	cc := te.clientConn(grpc.WithResolvers(r))
  1200  	addrs := []resolver.Address{{Addr: te.srvAddr}}
  1201  	r.UpdateState(resolver.State{
  1202  		Addresses: addrs,
  1203  		ServiceConfig: parseServiceConfig(t, r, `{
  1204      "methodConfig": [
  1205          {
  1206              "name": [
  1207                  {
  1208                      "service": "grpc.testing.TestService",
  1209                      "method": "EmptyCall"
  1210                  },
  1211                  {
  1212                      "service": "grpc.testing.TestService",
  1213                      "method": "FullDuplexCall"
  1214                  }
  1215              ],
  1216              "waitForReady": false,
  1217              "timeout": ".001s"
  1218          }
  1219      ]
  1220  }`)})
  1221  
  1222  	tc := testgrpc.NewTestServiceClient(cc)
  1223  
  1224  	// Make sure service config has been processed by grpc.
  1225  	for {
  1226  		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").WaitForReady != nil {
  1227  			break
  1228  		}
  1229  		time.Sleep(time.Millisecond)
  1230  	}
  1231  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1232  	defer cancel()
  1233  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  1234  	var err error
  1235  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1236  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1237  	}
  1238  	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1239  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1240  	}
  1241  
  1242  	// Generate a service config update.
  1243  	// Case2:Client API set failfast to be false, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds.
  1244  	r.UpdateState(resolver.State{
  1245  		Addresses: addrs,
  1246  		ServiceConfig: parseServiceConfig(t, r, `{
  1247      "methodConfig": [
  1248          {
  1249              "name": [
  1250                  {
  1251                      "service": "grpc.testing.TestService",
  1252                      "method": "EmptyCall"
  1253                  },
  1254                  {
  1255                      "service": "grpc.testing.TestService",
  1256                      "method": "FullDuplexCall"
  1257                  }
  1258              ],
  1259              "waitForReady": true,
  1260              "timeout": ".001s"
  1261          }
  1262      ]
  1263  }`)})
  1264  
  1265  	// Wait for the new service config to take effect.
  1266  	for {
  1267  		if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && *mc.WaitForReady {
  1268  			break
  1269  		}
  1270  		time.Sleep(time.Millisecond)
  1271  	}
  1272  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  1273  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  1274  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1275  	}
  1276  	if _, err := tc.FullDuplexCall(ctx); status.Code(err) != codes.DeadlineExceeded {
  1277  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1278  	}
  1279  }
  1280  
  1281  func (s) TestServiceConfigTimeout(t *testing.T) {
  1282  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1283  	defer te.tearDown()
  1284  	r := manual.NewBuilderWithScheme("whatever")
  1285  
  1286  	// Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
  1287  	te.resolverScheme = r.Scheme()
  1288  	cc := te.clientConn(grpc.WithResolvers(r))
  1289  	addrs := []resolver.Address{{Addr: te.srvAddr}}
  1290  	r.UpdateState(resolver.State{
  1291  		Addresses: addrs,
  1292  		ServiceConfig: parseServiceConfig(t, r, `{
  1293      "methodConfig": [
  1294          {
  1295              "name": [
  1296                  {
  1297                      "service": "grpc.testing.TestService",
  1298                      "method": "EmptyCall"
  1299                  },
  1300                  {
  1301                      "service": "grpc.testing.TestService",
  1302                      "method": "FullDuplexCall"
  1303                  }
  1304              ],
  1305              "waitForReady": true,
  1306              "timeout": "3600s"
  1307          }
  1308      ]
  1309  }`)})
  1310  
  1311  	tc := testgrpc.NewTestServiceClient(cc)
  1312  
  1313  	// Make sure service config has been processed by grpc.
  1314  	for {
  1315  		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil {
  1316  			break
  1317  		}
  1318  		time.Sleep(time.Millisecond)
  1319  	}
  1320  
  1321  	// The following RPCs are expected to become non-fail-fast ones with 1ns deadline.
  1322  	var err error
  1323  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  1324  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1325  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1326  	}
  1327  	cancel()
  1328  
  1329  	ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout)
  1330  	if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1331  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1332  	}
  1333  	cancel()
  1334  
  1335  	// Generate a service config update.
  1336  	// Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
  1337  	r.UpdateState(resolver.State{
  1338  		Addresses: addrs,
  1339  		ServiceConfig: parseServiceConfig(t, r, `{
  1340      "methodConfig": [
  1341          {
  1342              "name": [
  1343                  {
  1344                      "service": "grpc.testing.TestService",
  1345                      "method": "EmptyCall"
  1346                  },
  1347                  {
  1348                      "service": "grpc.testing.TestService",
  1349                      "method": "FullDuplexCall"
  1350                  }
  1351              ],
  1352              "waitForReady": true,
  1353              "timeout": ".000000001s"
  1354          }
  1355      ]
  1356  }`)})
  1357  
  1358  	// Wait for the new service config to take effect.
  1359  	for {
  1360  		if mc := cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall"); mc.Timeout != nil && *mc.Timeout == time.Nanosecond {
  1361  			break
  1362  		}
  1363  		time.Sleep(time.Millisecond)
  1364  	}
  1365  
  1366  	ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
  1367  	defer cancel()
  1368  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1369  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1370  	}
  1371  
  1372  	if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1373  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1374  	}
  1375  }
  1376  
  1377  func (s) TestServiceConfigMaxMsgSize(t *testing.T) {
  1378  	e := tcpClearRREnv
  1379  	r := manual.NewBuilderWithScheme("whatever")
  1380  
  1381  	// Setting up values and objects shared across all test cases.
  1382  	const smallSize = 1
  1383  	const largeSize = 1024
  1384  	const extraLargeSize = 2048
  1385  
  1386  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  1387  	if err != nil {
  1388  		t.Fatal(err)
  1389  	}
  1390  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  1391  	if err != nil {
  1392  		t.Fatal(err)
  1393  	}
  1394  	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
  1395  	if err != nil {
  1396  		t.Fatal(err)
  1397  	}
  1398  
  1399  	// Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  1400  	te1 := testServiceConfigSetup(t, e)
  1401  	defer te1.tearDown()
  1402  
  1403  	te1.resolverScheme = r.Scheme()
  1404  	te1.startServer(&testServer{security: e.security})
  1405  	cc1 := te1.clientConn(grpc.WithResolvers(r))
  1406  
  1407  	addrs := []resolver.Address{{Addr: te1.srvAddr}}
  1408  	sc := parseServiceConfig(t, r, `{
  1409      "methodConfig": [
  1410          {
  1411              "name": [
  1412                  {
  1413                      "service": "grpc.testing.TestService",
  1414                      "method": "UnaryCall"
  1415                  },
  1416                  {
  1417                      "service": "grpc.testing.TestService",
  1418                      "method": "FullDuplexCall"
  1419                  }
  1420              ],
  1421              "maxRequestMessageBytes": 2048,
  1422              "maxResponseMessageBytes": 2048
  1423          }
  1424      ]
  1425  }`)
  1426  	r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: sc})
  1427  	tc := testgrpc.NewTestServiceClient(cc1)
  1428  
  1429  	req := &testpb.SimpleRequest{
  1430  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  1431  		ResponseSize: int32(extraLargeSize),
  1432  		Payload:      smallPayload,
  1433  	}
  1434  
  1435  	for {
  1436  		if cc1.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
  1437  			break
  1438  		}
  1439  		time.Sleep(time.Millisecond)
  1440  	}
  1441  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1442  	defer cancel()
  1443  	// Test for unary RPC recv.
  1444  	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted {
  1445  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1446  	}
  1447  
  1448  	// Test for unary RPC send.
  1449  	req.Payload = extraLargePayload
  1450  	req.ResponseSize = int32(smallSize)
  1451  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1452  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1453  	}
  1454  
  1455  	// Test for streaming RPC recv.
  1456  	respParam := []*testpb.ResponseParameters{
  1457  		{
  1458  			Size: int32(extraLargeSize),
  1459  		},
  1460  	}
  1461  	sreq := &testpb.StreamingOutputCallRequest{
  1462  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1463  		ResponseParameters: respParam,
  1464  		Payload:            smallPayload,
  1465  	}
  1466  	stream, err := tc.FullDuplexCall(te1.ctx)
  1467  	if err != nil {
  1468  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1469  	}
  1470  	if err = stream.Send(sreq); err != nil {
  1471  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1472  	}
  1473  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  1474  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  1475  	}
  1476  
  1477  	// Test for streaming RPC send.
  1478  	respParam[0].Size = int32(smallSize)
  1479  	sreq.Payload = extraLargePayload
  1480  	stream, err = tc.FullDuplexCall(te1.ctx)
  1481  	if err != nil {
  1482  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1483  	}
  1484  	if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  1485  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  1486  	}
  1487  
  1488  	// Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  1489  	te2 := testServiceConfigSetup(t, e)
  1490  	te2.resolverScheme = r.Scheme()
  1491  	te2.maxClientReceiveMsgSize = newInt(1024)
  1492  	te2.maxClientSendMsgSize = newInt(1024)
  1493  
  1494  	te2.startServer(&testServer{security: e.security})
  1495  	defer te2.tearDown()
  1496  	cc2 := te2.clientConn(grpc.WithResolvers(r))
  1497  	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te2.srvAddr}}, ServiceConfig: sc})
  1498  	tc = testgrpc.NewTestServiceClient(cc2)
  1499  
  1500  	for {
  1501  		if cc2.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
  1502  			break
  1503  		}
  1504  		time.Sleep(time.Millisecond)
  1505  	}
  1506  
  1507  	// Test for unary RPC recv.
  1508  	req.Payload = smallPayload
  1509  	req.ResponseSize = int32(largeSize)
  1510  
  1511  	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted {
  1512  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1513  	}
  1514  
  1515  	// Test for unary RPC send.
  1516  	req.Payload = largePayload
  1517  	req.ResponseSize = int32(smallSize)
  1518  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1519  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1520  	}
  1521  
  1522  	// Test for streaming RPC recv.
  1523  	stream, err = tc.FullDuplexCall(te2.ctx)
  1524  	respParam[0].Size = int32(largeSize)
  1525  	sreq.Payload = smallPayload
  1526  	if err != nil {
  1527  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1528  	}
  1529  	if err = stream.Send(sreq); err != nil {
  1530  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1531  	}
  1532  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  1533  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  1534  	}
  1535  
  1536  	// Test for streaming RPC send.
  1537  	respParam[0].Size = int32(smallSize)
  1538  	sreq.Payload = largePayload
  1539  	stream, err = tc.FullDuplexCall(te2.ctx)
  1540  	if err != nil {
  1541  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1542  	}
  1543  	if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  1544  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  1545  	}
  1546  
  1547  	// Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  1548  	te3 := testServiceConfigSetup(t, e)
  1549  	te3.resolverScheme = r.Scheme()
  1550  	te3.maxClientReceiveMsgSize = newInt(4096)
  1551  	te3.maxClientSendMsgSize = newInt(4096)
  1552  
  1553  	te3.startServer(&testServer{security: e.security})
  1554  	defer te3.tearDown()
  1555  
  1556  	cc3 := te3.clientConn(grpc.WithResolvers(r))
  1557  	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te3.srvAddr}}, ServiceConfig: sc})
  1558  	tc = testgrpc.NewTestServiceClient(cc3)
  1559  
  1560  	for {
  1561  		if cc3.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
  1562  			break
  1563  		}
  1564  		time.Sleep(time.Millisecond)
  1565  	}
  1566  
  1567  	// Test for unary RPC recv.
  1568  	req.Payload = smallPayload
  1569  	req.ResponseSize = int32(largeSize)
  1570  
  1571  	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err != nil {
  1572  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
  1573  	}
  1574  
  1575  	req.ResponseSize = int32(extraLargeSize)
  1576  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1577  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1578  	}
  1579  
  1580  	// Test for unary RPC send.
  1581  	req.Payload = largePayload
  1582  	req.ResponseSize = int32(smallSize)
  1583  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  1584  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
  1585  	}
  1586  
  1587  	req.Payload = extraLargePayload
  1588  	if _, err = tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1589  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1590  	}
  1591  
  1592  	// Test for streaming RPC recv.
  1593  	stream, err = tc.FullDuplexCall(te3.ctx)
  1594  	if err != nil {
  1595  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1596  	}
  1597  	respParam[0].Size = int32(largeSize)
  1598  	sreq.Payload = smallPayload
  1599  
  1600  	if err = stream.Send(sreq); err != nil {
  1601  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1602  	}
  1603  	if _, err = stream.Recv(); err != nil {
  1604  		t.Fatalf("%v.Recv() = _, %v, want <nil>", stream, err)
  1605  	}
  1606  
  1607  	respParam[0].Size = int32(extraLargeSize)
  1608  
  1609  	if err = stream.Send(sreq); err != nil {
  1610  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1611  	}
  1612  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  1613  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  1614  	}
  1615  
  1616  	// Test for streaming RPC send.
  1617  	respParam[0].Size = int32(smallSize)
  1618  	sreq.Payload = largePayload
  1619  	stream, err = tc.FullDuplexCall(te3.ctx)
  1620  	if err != nil {
  1621  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1622  	}
  1623  	if err := stream.Send(sreq); err != nil {
  1624  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1625  	}
  1626  	sreq.Payload = extraLargePayload
  1627  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  1628  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  1629  	}
  1630  }
  1631  
  1632  // Reading from a streaming RPC may fail with context canceled if timeout was
  1633  // set by service config (https://github.com/grpc/grpc-go/issues/1818). This
  1634  // test makes sure read from streaming RPC doesn't fail in this case.
  1635  func (s) TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) {
  1636  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1637  	te.startServer(&testServer{security: tcpClearRREnv.security})
  1638  	defer te.tearDown()
  1639  	r := manual.NewBuilderWithScheme("whatever")
  1640  
  1641  	te.resolverScheme = r.Scheme()
  1642  	cc := te.clientConn(grpc.WithResolvers(r))
  1643  	tc := testgrpc.NewTestServiceClient(cc)
  1644  
  1645  	r.UpdateState(resolver.State{
  1646  		Addresses: []resolver.Address{{Addr: te.srvAddr}},
  1647  		ServiceConfig: parseServiceConfig(t, r, `{
  1648  	    "methodConfig": [
  1649  	        {
  1650  	            "name": [
  1651  	                {
  1652  	                    "service": "grpc.testing.TestService",
  1653  	                    "method": "FullDuplexCall"
  1654  	                }
  1655  	            ],
  1656  	            "waitForReady": true,
  1657  	            "timeout": "10s"
  1658  	        }
  1659  	    ]
  1660  	}`)})
  1661  	// Make sure service config has been processed by grpc.
  1662  	for {
  1663  		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil {
  1664  			break
  1665  		}
  1666  		time.Sleep(time.Millisecond)
  1667  	}
  1668  
  1669  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1670  	defer cancel()
  1671  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  1672  	if err != nil {
  1673  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want <nil>", err)
  1674  	}
  1675  
  1676  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 0)
  1677  	if err != nil {
  1678  		t.Fatalf("failed to newPayload: %v", err)
  1679  	}
  1680  	req := &testpb.StreamingOutputCallRequest{
  1681  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1682  		ResponseParameters: []*testpb.ResponseParameters{{Size: 0}},
  1683  		Payload:            payload,
  1684  	}
  1685  	if err := stream.Send(req); err != nil {
  1686  		t.Fatalf("stream.Send(%v) = %v, want <nil>", req, err)
  1687  	}
  1688  	stream.CloseSend()
  1689  	time.Sleep(time.Second)
  1690  	// Sleep 1 second before recv to make sure the final status is received
  1691  	// before the recv.
  1692  	if _, err := stream.Recv(); err != nil {
  1693  		t.Fatalf("stream.Recv = _, %v, want _, <nil>", err)
  1694  	}
  1695  	// Keep reading to drain the stream.
  1696  	for {
  1697  		if _, err := stream.Recv(); err != nil {
  1698  			break
  1699  		}
  1700  	}
  1701  }
  1702  
  1703  func (s) TestPreloaderClientSend(t *testing.T) {
  1704  	for _, e := range listTestEnv() {
  1705  		testPreloaderClientSend(t, e)
  1706  	}
  1707  }
  1708  
  1709  func testPreloaderClientSend(t *testing.T, e env) {
  1710  	te := newTest(t, e)
  1711  	te.userAgent = testAppUA
  1712  	te.declareLogNoise(
  1713  		"Failed to dial : context canceled; please retry.",
  1714  	)
  1715  	te.startServer(&testServer{security: e.security})
  1716  
  1717  	defer te.tearDown()
  1718  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  1719  
  1720  	// Test for streaming RPC recv.
  1721  	// Set context for send with proper RPC Information
  1722  	stream, err := tc.FullDuplexCall(te.ctx, grpc.UseCompressor("gzip"))
  1723  	if err != nil {
  1724  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1725  	}
  1726  	var index int
  1727  	for index < len(reqSizes) {
  1728  		respParam := []*testpb.ResponseParameters{
  1729  			{
  1730  				Size: int32(respSizes[index]),
  1731  			},
  1732  		}
  1733  
  1734  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
  1735  		if err != nil {
  1736  			t.Fatal(err)
  1737  		}
  1738  
  1739  		req := &testpb.StreamingOutputCallRequest{
  1740  			ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1741  			ResponseParameters: respParam,
  1742  			Payload:            payload,
  1743  		}
  1744  		preparedMsg := &grpc.PreparedMsg{}
  1745  		err = preparedMsg.Encode(stream, req)
  1746  		if err != nil {
  1747  			t.Fatalf("PrepareMsg failed for size %d : %v", reqSizes[index], err)
  1748  		}
  1749  		if err := stream.SendMsg(preparedMsg); err != nil {
  1750  			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  1751  		}
  1752  		reply, err := stream.Recv()
  1753  		if err != nil {
  1754  			t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  1755  		}
  1756  		pt := reply.GetPayload().GetType()
  1757  		if pt != testpb.PayloadType_COMPRESSABLE {
  1758  			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
  1759  		}
  1760  		size := len(reply.GetPayload().GetBody())
  1761  		if size != int(respSizes[index]) {
  1762  			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
  1763  		}
  1764  		index++
  1765  	}
  1766  	if err := stream.CloseSend(); err != nil {
  1767  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  1768  	}
  1769  	if _, err := stream.Recv(); err != io.EOF {
  1770  		t.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
  1771  	}
  1772  }
  1773  
  1774  func (s) TestPreloaderSenderSend(t *testing.T) {
  1775  	ss := &stubserver.StubServer{
  1776  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  1777  			for i := 0; i < 10; i++ {
  1778  				preparedMsg := &grpc.PreparedMsg{}
  1779  				err := preparedMsg.Encode(stream, &testpb.StreamingOutputCallResponse{
  1780  					Payload: &testpb.Payload{
  1781  						Body: []byte{'0' + uint8(i)},
  1782  					},
  1783  				})
  1784  				if err != nil {
  1785  					return err
  1786  				}
  1787  				stream.SendMsg(preparedMsg)
  1788  			}
  1789  			return nil
  1790  		},
  1791  	}
  1792  	if err := ss.Start(nil); err != nil {
  1793  		t.Fatalf("Error starting endpoint server: %v", err)
  1794  	}
  1795  	defer ss.Stop()
  1796  
  1797  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1798  	defer cancel()
  1799  
  1800  	stream, err := ss.Client.FullDuplexCall(ctx)
  1801  	if err != nil {
  1802  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  1803  	}
  1804  
  1805  	var ngot int
  1806  	var buf bytes.Buffer
  1807  	for {
  1808  		reply, err := stream.Recv()
  1809  		if err == io.EOF {
  1810  			break
  1811  		}
  1812  		if err != nil {
  1813  			t.Fatal(err)
  1814  		}
  1815  		ngot++
  1816  		if buf.Len() > 0 {
  1817  			buf.WriteByte(',')
  1818  		}
  1819  		buf.Write(reply.GetPayload().GetBody())
  1820  	}
  1821  	if want := 10; ngot != want {
  1822  		t.Errorf("Got %d replies, want %d", ngot, want)
  1823  	}
  1824  	if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want {
  1825  		t.Errorf("Got replies %q; want %q", got, want)
  1826  	}
  1827  }
  1828  
  1829  func (s) TestMaxMsgSizeClientDefault(t *testing.T) {
  1830  	for _, e := range listTestEnv() {
  1831  		testMaxMsgSizeClientDefault(t, e)
  1832  	}
  1833  }
  1834  
  1835  func testMaxMsgSizeClientDefault(t *testing.T, e env) {
  1836  	te := newTest(t, e)
  1837  	te.userAgent = testAppUA
  1838  	te.declareLogNoise(
  1839  		"Failed to dial : context canceled; please retry.",
  1840  	)
  1841  	te.startServer(&testServer{security: e.security})
  1842  
  1843  	defer te.tearDown()
  1844  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  1845  
  1846  	const smallSize = 1
  1847  	const largeSize = 4 * 1024 * 1024
  1848  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  1849  	if err != nil {
  1850  		t.Fatal(err)
  1851  	}
  1852  	req := &testpb.SimpleRequest{
  1853  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  1854  		ResponseSize: int32(largeSize),
  1855  		Payload:      smallPayload,
  1856  	}
  1857  
  1858  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1859  	defer cancel()
  1860  	// Test for unary RPC recv.
  1861  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1862  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1863  	}
  1864  
  1865  	respParam := []*testpb.ResponseParameters{
  1866  		{
  1867  			Size: int32(largeSize),
  1868  		},
  1869  	}
  1870  	sreq := &testpb.StreamingOutputCallRequest{
  1871  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1872  		ResponseParameters: respParam,
  1873  		Payload:            smallPayload,
  1874  	}
  1875  
  1876  	// Test for streaming RPC recv.
  1877  	stream, err := tc.FullDuplexCall(te.ctx)
  1878  	if err != nil {
  1879  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1880  	}
  1881  	if err := stream.Send(sreq); err != nil {
  1882  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1883  	}
  1884  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  1885  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  1886  	}
  1887  }
  1888  
  1889  func (s) TestMaxMsgSizeClientAPI(t *testing.T) {
  1890  	for _, e := range listTestEnv() {
  1891  		testMaxMsgSizeClientAPI(t, e)
  1892  	}
  1893  }
  1894  
  1895  func testMaxMsgSizeClientAPI(t *testing.T, e env) {
  1896  	te := newTest(t, e)
  1897  	te.userAgent = testAppUA
  1898  	// To avoid error on server side.
  1899  	te.maxServerSendMsgSize = newInt(5 * 1024 * 1024)
  1900  	te.maxClientReceiveMsgSize = newInt(1024)
  1901  	te.maxClientSendMsgSize = newInt(1024)
  1902  	te.declareLogNoise(
  1903  		"Failed to dial : context canceled; please retry.",
  1904  	)
  1905  	te.startServer(&testServer{security: e.security})
  1906  
  1907  	defer te.tearDown()
  1908  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  1909  
  1910  	const smallSize = 1
  1911  	const largeSize = 1024
  1912  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  1913  	if err != nil {
  1914  		t.Fatal(err)
  1915  	}
  1916  
  1917  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  1918  	if err != nil {
  1919  		t.Fatal(err)
  1920  	}
  1921  	req := &testpb.SimpleRequest{
  1922  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  1923  		ResponseSize: int32(largeSize),
  1924  		Payload:      smallPayload,
  1925  	}
  1926  
  1927  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1928  	defer cancel()
  1929  	// Test for unary RPC recv.
  1930  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1931  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1932  	}
  1933  
  1934  	// Test for unary RPC send.
  1935  	req.Payload = largePayload
  1936  	req.ResponseSize = int32(smallSize)
  1937  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1938  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1939  	}
  1940  
  1941  	respParam := []*testpb.ResponseParameters{
  1942  		{
  1943  			Size: int32(largeSize),
  1944  		},
  1945  	}
  1946  	sreq := &testpb.StreamingOutputCallRequest{
  1947  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1948  		ResponseParameters: respParam,
  1949  		Payload:            smallPayload,
  1950  	}
  1951  
  1952  	// Test for streaming RPC recv.
  1953  	stream, err := tc.FullDuplexCall(te.ctx)
  1954  	if err != nil {
  1955  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1956  	}
  1957  	if err := stream.Send(sreq); err != nil {
  1958  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1959  	}
  1960  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  1961  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  1962  	}
  1963  
  1964  	// Test for streaming RPC send.
  1965  	respParam[0].Size = int32(smallSize)
  1966  	sreq.Payload = largePayload
  1967  	stream, err = tc.FullDuplexCall(te.ctx)
  1968  	if err != nil {
  1969  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1970  	}
  1971  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  1972  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  1973  	}
  1974  }
  1975  
  1976  func (s) TestMaxMsgSizeServerAPI(t *testing.T) {
  1977  	for _, e := range listTestEnv() {
  1978  		testMaxMsgSizeServerAPI(t, e)
  1979  	}
  1980  }
  1981  
  1982  func testMaxMsgSizeServerAPI(t *testing.T, e env) {
  1983  	te := newTest(t, e)
  1984  	te.userAgent = testAppUA
  1985  	te.maxServerReceiveMsgSize = newInt(1024)
  1986  	te.maxServerSendMsgSize = newInt(1024)
  1987  	te.declareLogNoise(
  1988  		"Failed to dial : context canceled; please retry.",
  1989  	)
  1990  	te.startServer(&testServer{security: e.security})
  1991  
  1992  	defer te.tearDown()
  1993  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  1994  
  1995  	const smallSize = 1
  1996  	const largeSize = 1024
  1997  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  1998  	if err != nil {
  1999  		t.Fatal(err)
  2000  	}
  2001  
  2002  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  2003  	if err != nil {
  2004  		t.Fatal(err)
  2005  	}
  2006  	req := &testpb.SimpleRequest{
  2007  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2008  		ResponseSize: int32(largeSize),
  2009  		Payload:      smallPayload,
  2010  	}
  2011  
  2012  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2013  	defer cancel()
  2014  	// Test for unary RPC send.
  2015  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2016  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2017  	}
  2018  
  2019  	// Test for unary RPC recv.
  2020  	req.Payload = largePayload
  2021  	req.ResponseSize = int32(smallSize)
  2022  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2023  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2024  	}
  2025  
  2026  	respParam := []*testpb.ResponseParameters{
  2027  		{
  2028  			Size: int32(largeSize),
  2029  		},
  2030  	}
  2031  	sreq := &testpb.StreamingOutputCallRequest{
  2032  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2033  		ResponseParameters: respParam,
  2034  		Payload:            smallPayload,
  2035  	}
  2036  
  2037  	// Test for streaming RPC send.
  2038  	stream, err := tc.FullDuplexCall(te.ctx)
  2039  	if err != nil {
  2040  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2041  	}
  2042  	if err := stream.Send(sreq); err != nil {
  2043  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2044  	}
  2045  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2046  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2047  	}
  2048  
  2049  	// Test for streaming RPC recv.
  2050  	respParam[0].Size = int32(smallSize)
  2051  	sreq.Payload = largePayload
  2052  	stream, err = tc.FullDuplexCall(te.ctx)
  2053  	if err != nil {
  2054  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2055  	}
  2056  	if err := stream.Send(sreq); err != nil {
  2057  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2058  	}
  2059  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2060  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2061  	}
  2062  }
  2063  
  2064  func (s) TestTap(t *testing.T) {
  2065  	for _, e := range listTestEnv() {
  2066  		if e.name == "handler-tls" {
  2067  			continue
  2068  		}
  2069  		testTap(t, e)
  2070  	}
  2071  }
  2072  
  2073  type myTap struct {
  2074  	cnt int
  2075  }
  2076  
  2077  func (t *myTap) handle(ctx context.Context, info *tap.Info) (context.Context, error) {
  2078  	if info != nil {
  2079  		switch info.FullMethodName {
  2080  		case "/grpc.testing.TestService/EmptyCall":
  2081  			t.cnt++
  2082  
  2083  			if vals := info.Header.Get("return-error"); len(vals) > 0 && vals[0] == "true" {
  2084  				return nil, status.Errorf(codes.Unknown, "tap error")
  2085  			}
  2086  		case "/grpc.testing.TestService/UnaryCall":
  2087  			return nil, fmt.Errorf("tap error")
  2088  		case "/grpc.testing.TestService/FullDuplexCall":
  2089  			return nil, status.Errorf(codes.FailedPrecondition, "test custom error")
  2090  		}
  2091  	}
  2092  	return ctx, nil
  2093  }
  2094  
  2095  func testTap(t *testing.T, e env) {
  2096  	te := newTest(t, e)
  2097  	te.userAgent = testAppUA
  2098  	ttap := &myTap{}
  2099  	te.tapHandle = ttap.handle
  2100  	te.startServer(&testServer{security: e.security})
  2101  	defer te.tearDown()
  2102  
  2103  	cc := te.clientConn()
  2104  	tc := testgrpc.NewTestServiceClient(cc)
  2105  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2106  	defer cancel()
  2107  
  2108  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  2109  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  2110  	}
  2111  	if ttap.cnt != 1 {
  2112  		t.Fatalf("Get the count in ttap %d, want 1", ttap.cnt)
  2113  	}
  2114  
  2115  	if _, err := tc.EmptyCall(metadata.AppendToOutgoingContext(ctx, "return-error", "false"), &testpb.Empty{}); err != nil {
  2116  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  2117  	}
  2118  	if ttap.cnt != 2 {
  2119  		t.Fatalf("Get the count in ttap %d, want 2", ttap.cnt)
  2120  	}
  2121  
  2122  	if _, err := tc.EmptyCall(metadata.AppendToOutgoingContext(ctx, "return-error", "true"), &testpb.Empty{}); status.Code(err) != codes.Unknown {
  2123  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unknown)
  2124  	}
  2125  	if ttap.cnt != 3 {
  2126  		t.Fatalf("Get the count in ttap %d, want 3", ttap.cnt)
  2127  	}
  2128  
  2129  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 31)
  2130  	if err != nil {
  2131  		t.Fatal(err)
  2132  	}
  2133  
  2134  	req := &testpb.SimpleRequest{
  2135  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2136  		ResponseSize: 45,
  2137  		Payload:      payload,
  2138  	}
  2139  	if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.PermissionDenied {
  2140  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.PermissionDenied)
  2141  	}
  2142  	str, err := tc.FullDuplexCall(ctx)
  2143  	if err != nil {
  2144  		t.Fatalf("Unexpected error creating stream: %v", err)
  2145  	}
  2146  	if _, err := str.Recv(); status.Code(err) != codes.FailedPrecondition {
  2147  		t.Fatalf("FullDuplexCall Recv() = _, %v, want _, %s", err, codes.FailedPrecondition)
  2148  	}
  2149  }
  2150  
  2151  func (s) TestEmptyUnaryWithUserAgent(t *testing.T) {
  2152  	for _, e := range listTestEnv() {
  2153  		testEmptyUnaryWithUserAgent(t, e)
  2154  	}
  2155  }
  2156  
  2157  func testEmptyUnaryWithUserAgent(t *testing.T, e env) {
  2158  	te := newTest(t, e)
  2159  	te.userAgent = testAppUA
  2160  	te.startServer(&testServer{security: e.security})
  2161  	defer te.tearDown()
  2162  
  2163  	cc := te.clientConn()
  2164  	tc := testgrpc.NewTestServiceClient(cc)
  2165  	var header metadata.MD
  2166  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2167  	defer cancel()
  2168  	reply, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Header(&header))
  2169  	if err != nil || !proto.Equal(&testpb.Empty{}, reply) {
  2170  		t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{})
  2171  	}
  2172  	if v, ok := header["ua"]; !ok || !strings.HasPrefix(v[0], testAppUA) {
  2173  		t.Fatalf("header[\"ua\"] = %q, %t, want string with prefix %q, true", v, ok, testAppUA)
  2174  	}
  2175  
  2176  	te.srv.Stop()
  2177  }
  2178  
  2179  func (s) TestFailedEmptyUnary(t *testing.T) {
  2180  	for _, e := range listTestEnv() {
  2181  		if e.name == "handler-tls" {
  2182  			// This test covers status details, but
  2183  			// Grpc-Status-Details-Bin is not support in handler_server.
  2184  			continue
  2185  		}
  2186  		testFailedEmptyUnary(t, e)
  2187  	}
  2188  }
  2189  
  2190  func testFailedEmptyUnary(t *testing.T, e env) {
  2191  	te := newTest(t, e)
  2192  	te.userAgent = failAppUA
  2193  	te.startServer(&testServer{security: e.security})
  2194  	defer te.tearDown()
  2195  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2196  
  2197  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2198  	defer cancel()
  2199  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2200  	wantErr := detailedError
  2201  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !testutils.StatusErrEqual(err, wantErr) {
  2202  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr)
  2203  	}
  2204  }
  2205  
  2206  func (s) TestLargeUnary(t *testing.T) {
  2207  	for _, e := range listTestEnv() {
  2208  		testLargeUnary(t, e)
  2209  	}
  2210  }
  2211  
  2212  func testLargeUnary(t *testing.T, e env) {
  2213  	te := newTest(t, e)
  2214  	te.startServer(&testServer{security: e.security})
  2215  	defer te.tearDown()
  2216  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2217  
  2218  	const argSize = 271828
  2219  	const respSize = 314159
  2220  
  2221  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2222  	if err != nil {
  2223  		t.Fatal(err)
  2224  	}
  2225  
  2226  	req := &testpb.SimpleRequest{
  2227  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2228  		ResponseSize: respSize,
  2229  		Payload:      payload,
  2230  	}
  2231  
  2232  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2233  	defer cancel()
  2234  	reply, err := tc.UnaryCall(ctx, req)
  2235  	if err != nil {
  2236  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
  2237  	}
  2238  	pt := reply.GetPayload().GetType()
  2239  	ps := len(reply.GetPayload().GetBody())
  2240  	if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize {
  2241  		t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize)
  2242  	}
  2243  }
  2244  
  2245  // Test backward-compatibility API for setting msg size limit.
  2246  func (s) TestExceedMsgLimit(t *testing.T) {
  2247  	for _, e := range listTestEnv() {
  2248  		testExceedMsgLimit(t, e)
  2249  	}
  2250  }
  2251  
  2252  func testExceedMsgLimit(t *testing.T, e env) {
  2253  	te := newTest(t, e)
  2254  	maxMsgSize := 1024
  2255  	te.maxServerMsgSize, te.maxClientMsgSize = newInt(maxMsgSize), newInt(maxMsgSize)
  2256  	te.startServer(&testServer{security: e.security})
  2257  	defer te.tearDown()
  2258  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2259  
  2260  	largeSize := int32(maxMsgSize + 1)
  2261  	const smallSize = 1
  2262  
  2263  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  2264  	if err != nil {
  2265  		t.Fatal(err)
  2266  	}
  2267  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  2268  	if err != nil {
  2269  		t.Fatal(err)
  2270  	}
  2271  
  2272  	// Make sure the server cannot receive a unary RPC of largeSize.
  2273  	req := &testpb.SimpleRequest{
  2274  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2275  		ResponseSize: smallSize,
  2276  		Payload:      largePayload,
  2277  	}
  2278  
  2279  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2280  	defer cancel()
  2281  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2282  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2283  	}
  2284  	// Make sure the client cannot receive a unary RPC of largeSize.
  2285  	req.ResponseSize = largeSize
  2286  	req.Payload = smallPayload
  2287  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2288  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2289  	}
  2290  
  2291  	// Make sure the server cannot receive a streaming RPC of largeSize.
  2292  	stream, err := tc.FullDuplexCall(te.ctx)
  2293  	if err != nil {
  2294  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2295  	}
  2296  	respParam := []*testpb.ResponseParameters{
  2297  		{
  2298  			Size: 1,
  2299  		},
  2300  	}
  2301  
  2302  	sreq := &testpb.StreamingOutputCallRequest{
  2303  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2304  		ResponseParameters: respParam,
  2305  		Payload:            largePayload,
  2306  	}
  2307  	if err := stream.Send(sreq); err != nil {
  2308  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2309  	}
  2310  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2311  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2312  	}
  2313  
  2314  	// Test on client side for streaming RPC.
  2315  	stream, err = tc.FullDuplexCall(te.ctx)
  2316  	if err != nil {
  2317  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2318  	}
  2319  	respParam[0].Size = largeSize
  2320  	sreq.Payload = smallPayload
  2321  	if err := stream.Send(sreq); err != nil {
  2322  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2323  	}
  2324  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2325  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2326  	}
  2327  }
  2328  
  2329  func (s) TestPeerClientSide(t *testing.T) {
  2330  	for _, e := range listTestEnv() {
  2331  		testPeerClientSide(t, e)
  2332  	}
  2333  }
  2334  
  2335  func testPeerClientSide(t *testing.T, e env) {
  2336  	te := newTest(t, e)
  2337  	te.userAgent = testAppUA
  2338  	te.startServer(&testServer{security: e.security})
  2339  	defer te.tearDown()
  2340  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2341  	peer := new(peer.Peer)
  2342  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2343  	defer cancel()
  2344  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil {
  2345  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  2346  	}
  2347  	pa := peer.Addr.String()
  2348  	if e.network == "unix" {
  2349  		if pa != te.srvAddr {
  2350  			t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr)
  2351  		}
  2352  		return
  2353  	}
  2354  	_, pp, err := net.SplitHostPort(pa)
  2355  	if err != nil {
  2356  		t.Fatalf("Failed to parse address from peer.")
  2357  	}
  2358  	_, sp, err := net.SplitHostPort(te.srvAddr)
  2359  	if err != nil {
  2360  		t.Fatalf("Failed to parse address of test server.")
  2361  	}
  2362  	if pp != sp {
  2363  		t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp)
  2364  	}
  2365  }
  2366  
  2367  // TestPeerNegative tests that if call fails setting peer
  2368  // doesn't cause a segmentation fault.
  2369  // issue#1141 https://github.com/grpc/grpc-go/issues/1141
  2370  func (s) TestPeerNegative(t *testing.T) {
  2371  	for _, e := range listTestEnv() {
  2372  		testPeerNegative(t, e)
  2373  	}
  2374  }
  2375  
  2376  func testPeerNegative(t *testing.T, e env) {
  2377  	te := newTest(t, e)
  2378  	te.startServer(&testServer{security: e.security})
  2379  	defer te.tearDown()
  2380  
  2381  	cc := te.clientConn()
  2382  	tc := testgrpc.NewTestServiceClient(cc)
  2383  	peer := new(peer.Peer)
  2384  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2385  	cancel()
  2386  	tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer))
  2387  }
  2388  
  2389  func (s) TestPeerFailedRPC(t *testing.T) {
  2390  	for _, e := range listTestEnv() {
  2391  		testPeerFailedRPC(t, e)
  2392  	}
  2393  }
  2394  
  2395  func testPeerFailedRPC(t *testing.T, e env) {
  2396  	te := newTest(t, e)
  2397  	te.maxServerReceiveMsgSize = newInt(1 * 1024)
  2398  	te.startServer(&testServer{security: e.security})
  2399  
  2400  	defer te.tearDown()
  2401  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2402  
  2403  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2404  	defer cancel()
  2405  	// first make a successful request to the server
  2406  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  2407  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  2408  	}
  2409  
  2410  	// make a second request that will be rejected by the server
  2411  	const largeSize = 5 * 1024
  2412  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  2413  	if err != nil {
  2414  		t.Fatal(err)
  2415  	}
  2416  	req := &testpb.SimpleRequest{
  2417  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2418  		Payload:      largePayload,
  2419  	}
  2420  
  2421  	peer := new(peer.Peer)
  2422  	if _, err := tc.UnaryCall(ctx, req, grpc.Peer(peer)); err == nil || status.Code(err) != codes.ResourceExhausted {
  2423  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2424  	} else {
  2425  		pa := peer.Addr.String()
  2426  		if e.network == "unix" {
  2427  			if pa != te.srvAddr {
  2428  				t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr)
  2429  			}
  2430  			return
  2431  		}
  2432  		_, pp, err := net.SplitHostPort(pa)
  2433  		if err != nil {
  2434  			t.Fatalf("Failed to parse address from peer.")
  2435  		}
  2436  		_, sp, err := net.SplitHostPort(te.srvAddr)
  2437  		if err != nil {
  2438  			t.Fatalf("Failed to parse address of test server.")
  2439  		}
  2440  		if pp != sp {
  2441  			t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp)
  2442  		}
  2443  	}
  2444  }
  2445  
  2446  func (s) TestMetadataUnaryRPC(t *testing.T) {
  2447  	for _, e := range listTestEnv() {
  2448  		testMetadataUnaryRPC(t, e)
  2449  	}
  2450  }
  2451  
  2452  func testMetadataUnaryRPC(t *testing.T, e env) {
  2453  	te := newTest(t, e)
  2454  	te.startServer(&testServer{security: e.security})
  2455  	defer te.tearDown()
  2456  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2457  
  2458  	const argSize = 2718
  2459  	const respSize = 314
  2460  
  2461  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2462  	if err != nil {
  2463  		t.Fatal(err)
  2464  	}
  2465  
  2466  	req := &testpb.SimpleRequest{
  2467  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2468  		ResponseSize: respSize,
  2469  		Payload:      payload,
  2470  	}
  2471  	var header, trailer metadata.MD
  2472  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2473  	defer cancel()
  2474  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2475  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil {
  2476  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  2477  	}
  2478  	// Ignore optional response headers that Servers may set:
  2479  	if header != nil {
  2480  		delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers
  2481  		delete(header, "date")    // the Date header is also optional
  2482  		delete(header, "user-agent")
  2483  		delete(header, "content-type")
  2484  		delete(header, "grpc-accept-encoding")
  2485  	}
  2486  	if !reflect.DeepEqual(header, testMetadata) {
  2487  		t.Fatalf("Received header metadata %v, want %v", header, testMetadata)
  2488  	}
  2489  	if !reflect.DeepEqual(trailer, testTrailerMetadata) {
  2490  		t.Fatalf("Received trailer metadata %v, want %v", trailer, testTrailerMetadata)
  2491  	}
  2492  }
  2493  
  2494  func (s) TestMetadataOrderUnaryRPC(t *testing.T) {
  2495  	for _, e := range listTestEnv() {
  2496  		testMetadataOrderUnaryRPC(t, e)
  2497  	}
  2498  }
  2499  
  2500  func testMetadataOrderUnaryRPC(t *testing.T, e env) {
  2501  	te := newTest(t, e)
  2502  	te.startServer(&testServer{security: e.security})
  2503  	defer te.tearDown()
  2504  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2505  
  2506  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2507  	defer cancel()
  2508  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2509  	ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value2")
  2510  	ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value3")
  2511  
  2512  	// using Join to built expected metadata instead of FromOutgoingContext
  2513  	newMetadata := metadata.Join(testMetadata, metadata.Pairs("key1", "value2", "key1", "value3"))
  2514  
  2515  	var header metadata.MD
  2516  	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}, grpc.Header(&header)); err != nil {
  2517  		t.Fatal(err)
  2518  	}
  2519  
  2520  	// Ignore optional response headers that Servers may set:
  2521  	if header != nil {
  2522  		delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers
  2523  		delete(header, "date")    // the Date header is also optional
  2524  		delete(header, "user-agent")
  2525  		delete(header, "content-type")
  2526  		delete(header, "grpc-accept-encoding")
  2527  	}
  2528  
  2529  	if !reflect.DeepEqual(header, newMetadata) {
  2530  		t.Fatalf("Received header metadata %v, want %v", header, newMetadata)
  2531  	}
  2532  }
  2533  
  2534  func (s) TestMultipleSetTrailerUnaryRPC(t *testing.T) {
  2535  	for _, e := range listTestEnv() {
  2536  		testMultipleSetTrailerUnaryRPC(t, e)
  2537  	}
  2538  }
  2539  
  2540  func testMultipleSetTrailerUnaryRPC(t *testing.T, e env) {
  2541  	te := newTest(t, e)
  2542  	te.startServer(&testServer{security: e.security, multipleSetTrailer: true})
  2543  	defer te.tearDown()
  2544  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2545  
  2546  	const (
  2547  		argSize  = 1
  2548  		respSize = 1
  2549  	)
  2550  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2551  	if err != nil {
  2552  		t.Fatal(err)
  2553  	}
  2554  
  2555  	req := &testpb.SimpleRequest{
  2556  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2557  		ResponseSize: respSize,
  2558  		Payload:      payload,
  2559  	}
  2560  	var trailer metadata.MD
  2561  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2562  	defer cancel()
  2563  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2564  	if _, err := tc.UnaryCall(ctx, req, grpc.Trailer(&trailer), grpc.WaitForReady(true)); err != nil {
  2565  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  2566  	}
  2567  	expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2)
  2568  	if !reflect.DeepEqual(trailer, expectedTrailer) {
  2569  		t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer)
  2570  	}
  2571  }
  2572  
  2573  func (s) TestMultipleSetTrailerStreamingRPC(t *testing.T) {
  2574  	for _, e := range listTestEnv() {
  2575  		testMultipleSetTrailerStreamingRPC(t, e)
  2576  	}
  2577  }
  2578  
  2579  func testMultipleSetTrailerStreamingRPC(t *testing.T, e env) {
  2580  	te := newTest(t, e)
  2581  	te.startServer(&testServer{security: e.security, multipleSetTrailer: true})
  2582  	defer te.tearDown()
  2583  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2584  
  2585  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2586  	defer cancel()
  2587  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2588  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  2589  	if err != nil {
  2590  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2591  	}
  2592  	if err := stream.CloseSend(); err != nil {
  2593  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  2594  	}
  2595  	if _, err := stream.Recv(); err != io.EOF {
  2596  		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
  2597  	}
  2598  
  2599  	trailer := stream.Trailer()
  2600  	expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2)
  2601  	if !reflect.DeepEqual(trailer, expectedTrailer) {
  2602  		t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer)
  2603  	}
  2604  }
  2605  
  2606  func (s) TestSetAndSendHeaderUnaryRPC(t *testing.T) {
  2607  	for _, e := range listTestEnv() {
  2608  		if e.name == "handler-tls" {
  2609  			continue
  2610  		}
  2611  		testSetAndSendHeaderUnaryRPC(t, e)
  2612  	}
  2613  }
  2614  
  2615  // To test header metadata is sent on SendHeader().
  2616  func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) {
  2617  	te := newTest(t, e)
  2618  	te.startServer(&testServer{security: e.security, setAndSendHeader: true})
  2619  	defer te.tearDown()
  2620  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2621  
  2622  	const (
  2623  		argSize  = 1
  2624  		respSize = 1
  2625  	)
  2626  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2627  	if err != nil {
  2628  		t.Fatal(err)
  2629  	}
  2630  
  2631  	req := &testpb.SimpleRequest{
  2632  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2633  		ResponseSize: respSize,
  2634  		Payload:      payload,
  2635  	}
  2636  	var header metadata.MD
  2637  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2638  	defer cancel()
  2639  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2640  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
  2641  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  2642  	}
  2643  	delete(header, "user-agent")
  2644  	delete(header, "content-type")
  2645  	delete(header, "grpc-accept-encoding")
  2646  
  2647  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  2648  	if !reflect.DeepEqual(header, expectedHeader) {
  2649  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  2650  	}
  2651  }
  2652  
  2653  func (s) TestMultipleSetHeaderUnaryRPC(t *testing.T) {
  2654  	for _, e := range listTestEnv() {
  2655  		if e.name == "handler-tls" {
  2656  			continue
  2657  		}
  2658  		testMultipleSetHeaderUnaryRPC(t, e)
  2659  	}
  2660  }
  2661  
  2662  // To test header metadata is sent when sending response.
  2663  func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) {
  2664  	te := newTest(t, e)
  2665  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  2666  	defer te.tearDown()
  2667  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2668  
  2669  	const (
  2670  		argSize  = 1
  2671  		respSize = 1
  2672  	)
  2673  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2674  	if err != nil {
  2675  		t.Fatal(err)
  2676  	}
  2677  
  2678  	req := &testpb.SimpleRequest{
  2679  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2680  		ResponseSize: respSize,
  2681  		Payload:      payload,
  2682  	}
  2683  
  2684  	var header metadata.MD
  2685  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2686  	defer cancel()
  2687  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2688  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
  2689  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  2690  	}
  2691  	delete(header, "user-agent")
  2692  	delete(header, "content-type")
  2693  	delete(header, "grpc-accept-encoding")
  2694  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  2695  	if !reflect.DeepEqual(header, expectedHeader) {
  2696  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  2697  	}
  2698  }
  2699  
  2700  func (s) TestMultipleSetHeaderUnaryRPCError(t *testing.T) {
  2701  	for _, e := range listTestEnv() {
  2702  		if e.name == "handler-tls" {
  2703  			continue
  2704  		}
  2705  		testMultipleSetHeaderUnaryRPCError(t, e)
  2706  	}
  2707  }
  2708  
  2709  // To test header metadata is sent when sending status.
  2710  func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) {
  2711  	te := newTest(t, e)
  2712  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  2713  	defer te.tearDown()
  2714  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2715  
  2716  	const (
  2717  		argSize  = 1
  2718  		respSize = -1 // Invalid respSize to make RPC fail.
  2719  	)
  2720  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2721  	if err != nil {
  2722  		t.Fatal(err)
  2723  	}
  2724  
  2725  	req := &testpb.SimpleRequest{
  2726  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2727  		ResponseSize: respSize,
  2728  		Payload:      payload,
  2729  	}
  2730  	var header metadata.MD
  2731  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2732  	defer cancel()
  2733  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2734  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err == nil {
  2735  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <non-nil>", ctx, err)
  2736  	}
  2737  	delete(header, "user-agent")
  2738  	delete(header, "content-type")
  2739  	delete(header, "grpc-accept-encoding")
  2740  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  2741  	if !reflect.DeepEqual(header, expectedHeader) {
  2742  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  2743  	}
  2744  }
  2745  
  2746  func (s) TestSetAndSendHeaderStreamingRPC(t *testing.T) {
  2747  	for _, e := range listTestEnv() {
  2748  		if e.name == "handler-tls" {
  2749  			continue
  2750  		}
  2751  		testSetAndSendHeaderStreamingRPC(t, e)
  2752  	}
  2753  }
  2754  
  2755  // To test header metadata is sent on SendHeader().
  2756  func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) {
  2757  	te := newTest(t, e)
  2758  	te.startServer(&testServer{security: e.security, setAndSendHeader: true})
  2759  	defer te.tearDown()
  2760  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2761  
  2762  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2763  	defer cancel()
  2764  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2765  	stream, err := tc.FullDuplexCall(ctx)
  2766  	if err != nil {
  2767  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2768  	}
  2769  	if err := stream.CloseSend(); err != nil {
  2770  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  2771  	}
  2772  	if _, err := stream.Recv(); err != io.EOF {
  2773  		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
  2774  	}
  2775  
  2776  	header, err := stream.Header()
  2777  	if err != nil {
  2778  		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
  2779  	}
  2780  	delete(header, "user-agent")
  2781  	delete(header, "content-type")
  2782  	delete(header, "grpc-accept-encoding")
  2783  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  2784  	if !reflect.DeepEqual(header, expectedHeader) {
  2785  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  2786  	}
  2787  }
  2788  
  2789  func (s) TestMultipleSetHeaderStreamingRPC(t *testing.T) {
  2790  	for _, e := range listTestEnv() {
  2791  		if e.name == "handler-tls" {
  2792  			continue
  2793  		}
  2794  		testMultipleSetHeaderStreamingRPC(t, e)
  2795  	}
  2796  }
  2797  
  2798  // To test header metadata is sent when sending response.
  2799  func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) {
  2800  	te := newTest(t, e)
  2801  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  2802  	defer te.tearDown()
  2803  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2804  
  2805  	const (
  2806  		argSize  = 1
  2807  		respSize = 1
  2808  	)
  2809  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2810  	defer cancel()
  2811  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2812  	stream, err := tc.FullDuplexCall(ctx)
  2813  	if err != nil {
  2814  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2815  	}
  2816  
  2817  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2818  	if err != nil {
  2819  		t.Fatal(err)
  2820  	}
  2821  
  2822  	req := &testpb.StreamingOutputCallRequest{
  2823  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2824  		ResponseParameters: []*testpb.ResponseParameters{
  2825  			{Size: respSize},
  2826  		},
  2827  		Payload: payload,
  2828  	}
  2829  	if err := stream.Send(req); err != nil {
  2830  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  2831  	}
  2832  	if _, err := stream.Recv(); err != nil {
  2833  		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  2834  	}
  2835  	if err := stream.CloseSend(); err != nil {
  2836  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  2837  	}
  2838  	if _, err := stream.Recv(); err != io.EOF {
  2839  		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
  2840  	}
  2841  
  2842  	header, err := stream.Header()
  2843  	if err != nil {
  2844  		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
  2845  	}
  2846  	delete(header, "user-agent")
  2847  	delete(header, "content-type")
  2848  	delete(header, "grpc-accept-encoding")
  2849  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  2850  	if !reflect.DeepEqual(header, expectedHeader) {
  2851  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  2852  	}
  2853  
  2854  }
  2855  
  2856  func (s) TestMultipleSetHeaderStreamingRPCError(t *testing.T) {
  2857  	for _, e := range listTestEnv() {
  2858  		if e.name == "handler-tls" {
  2859  			continue
  2860  		}
  2861  		testMultipleSetHeaderStreamingRPCError(t, e)
  2862  	}
  2863  }
  2864  
  2865  // To test header metadata is sent when sending status.
  2866  func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) {
  2867  	te := newTest(t, e)
  2868  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  2869  	defer te.tearDown()
  2870  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2871  
  2872  	const (
  2873  		argSize  = 1
  2874  		respSize = -1
  2875  	)
  2876  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2877  	defer cancel()
  2878  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2879  	stream, err := tc.FullDuplexCall(ctx)
  2880  	if err != nil {
  2881  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2882  	}
  2883  
  2884  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2885  	if err != nil {
  2886  		t.Fatal(err)
  2887  	}
  2888  
  2889  	req := &testpb.StreamingOutputCallRequest{
  2890  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2891  		ResponseParameters: []*testpb.ResponseParameters{
  2892  			{Size: respSize},
  2893  		},
  2894  		Payload: payload,
  2895  	}
  2896  	if err := stream.Send(req); err != nil {
  2897  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  2898  	}
  2899  	if _, err := stream.Recv(); err == nil {
  2900  		t.Fatalf("%v.Recv() = %v, want <non-nil>", stream, err)
  2901  	}
  2902  
  2903  	header, err := stream.Header()
  2904  	if err != nil {
  2905  		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
  2906  	}
  2907  	delete(header, "user-agent")
  2908  	delete(header, "content-type")
  2909  	delete(header, "grpc-accept-encoding")
  2910  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  2911  	if !reflect.DeepEqual(header, expectedHeader) {
  2912  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  2913  	}
  2914  	if err := stream.CloseSend(); err != nil {
  2915  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  2916  	}
  2917  }
  2918  
  2919  // TestMalformedHTTP2Metadata verfies the returned error when the client
  2920  // sends an illegal metadata.
  2921  func (s) TestMalformedHTTP2Metadata(t *testing.T) {
  2922  	for _, e := range listTestEnv() {
  2923  		if e.name == "handler-tls" {
  2924  			// Failed with "server stops accepting new RPCs".
  2925  			// Server stops accepting new RPCs when the client sends an illegal http2 header.
  2926  			continue
  2927  		}
  2928  		testMalformedHTTP2Metadata(t, e)
  2929  	}
  2930  }
  2931  
  2932  func testMalformedHTTP2Metadata(t *testing.T, e env) {
  2933  	te := newTest(t, e)
  2934  	te.startServer(&testServer{security: e.security})
  2935  	defer te.tearDown()
  2936  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2937  
  2938  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 2718)
  2939  	if err != nil {
  2940  		t.Fatal(err)
  2941  	}
  2942  
  2943  	req := &testpb.SimpleRequest{
  2944  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2945  		ResponseSize: 314,
  2946  		Payload:      payload,
  2947  	}
  2948  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2949  	defer cancel()
  2950  	ctx = metadata.NewOutgoingContext(ctx, malformedHTTP2Metadata)
  2951  	if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Internal {
  2952  		t.Fatalf("TestService.UnaryCall(%v, _) = _, %v; want _, %s", ctx, err, codes.Internal)
  2953  	}
  2954  }
  2955  
  2956  // Tests that the client transparently retries correctly when receiving a
  2957  // RST_STREAM with code REFUSED_STREAM.
  2958  func (s) TestTransparentRetry(t *testing.T) {
  2959  	testCases := []struct {
  2960  		failFast bool
  2961  		errCode  codes.Code
  2962  	}{{
  2963  		// success attempt: 1, (stream ID 1)
  2964  	}, {
  2965  		// success attempt: 2, (stream IDs 3, 5)
  2966  	}, {
  2967  		// no success attempt (stream IDs 7, 9)
  2968  		errCode: codes.Unavailable,
  2969  	}, {
  2970  		// success attempt: 1 (stream ID 11),
  2971  		failFast: true,
  2972  	}, {
  2973  		// success attempt: 2 (stream IDs 13, 15),
  2974  		failFast: true,
  2975  	}, {
  2976  		// no success attempt (stream IDs 17, 19)
  2977  		failFast: true,
  2978  		errCode:  codes.Unavailable,
  2979  	}}
  2980  
  2981  	lis, err := net.Listen("tcp", "localhost:0")
  2982  	if err != nil {
  2983  		t.Fatalf("Failed to listen. Err: %v", err)
  2984  	}
  2985  	defer lis.Close()
  2986  	server := &httpServer{
  2987  		responses: []httpServerResponse{{
  2988  			trailers: [][]string{{
  2989  				":status", "200",
  2990  				"content-type", "application/grpc",
  2991  				"grpc-status", "0",
  2992  			}},
  2993  		}},
  2994  		refuseStream: func(i uint32) bool {
  2995  			switch i {
  2996  			case 1, 5, 11, 15: // these stream IDs succeed
  2997  				return false
  2998  			}
  2999  			return true // these are refused
  3000  		},
  3001  	}
  3002  	server.start(t, lis)
  3003  	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
  3004  	if err != nil {
  3005  		t.Fatalf("failed to dial due to err: %v", err)
  3006  	}
  3007  	defer cc.Close()
  3008  
  3009  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3010  	defer cancel()
  3011  
  3012  	client := testgrpc.NewTestServiceClient(cc)
  3013  
  3014  	for i, tc := range testCases {
  3015  		stream, err := client.FullDuplexCall(ctx)
  3016  		if err != nil {
  3017  			t.Fatalf("error creating stream due to err: %v", err)
  3018  		}
  3019  		code := func(err error) codes.Code {
  3020  			if err == io.EOF {
  3021  				return codes.OK
  3022  			}
  3023  			return status.Code(err)
  3024  		}
  3025  		if _, err := stream.Recv(); code(err) != tc.errCode {
  3026  			t.Fatalf("%v: stream.Recv() = _, %v, want error code: %v", i, err, tc.errCode)
  3027  		}
  3028  
  3029  	}
  3030  }
  3031  
  3032  func (s) TestCancel(t *testing.T) {
  3033  	for _, e := range listTestEnv() {
  3034  		t.Run(e.name, func(t *testing.T) {
  3035  			testCancel(t, e)
  3036  		})
  3037  	}
  3038  }
  3039  
  3040  func testCancel(t *testing.T, e env) {
  3041  	te := newTest(t, e)
  3042  	te.declareLogNoise("grpc: the client connection is closing; please retry")
  3043  	te.startServer(&testServer{security: e.security, unaryCallSleepTime: time.Second})
  3044  	defer te.tearDown()
  3045  
  3046  	cc := te.clientConn()
  3047  	tc := testgrpc.NewTestServiceClient(cc)
  3048  
  3049  	const argSize = 2718
  3050  	const respSize = 314
  3051  
  3052  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3053  	if err != nil {
  3054  		t.Fatal(err)
  3055  	}
  3056  
  3057  	req := &testpb.SimpleRequest{
  3058  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3059  		ResponseSize: respSize,
  3060  		Payload:      payload,
  3061  	}
  3062  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3063  	time.AfterFunc(1*time.Millisecond, cancel)
  3064  	if r, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Canceled {
  3065  		t.Fatalf("TestService/UnaryCall(_, _) = %v, %v; want _, error code: %s", r, err, codes.Canceled)
  3066  	}
  3067  	awaitNewConnLogOutput()
  3068  }
  3069  
  3070  func (s) TestCancelNoIO(t *testing.T) {
  3071  	for _, e := range listTestEnv() {
  3072  		testCancelNoIO(t, e)
  3073  	}
  3074  }
  3075  
  3076  func testCancelNoIO(t *testing.T, e env) {
  3077  	te := newTest(t, e)
  3078  	te.declareLogNoise("http2Client.notifyError got notified that the client transport was broken")
  3079  	te.maxStream = 1 // Only allows 1 live stream per server transport.
  3080  	te.startServer(&testServer{security: e.security})
  3081  	defer te.tearDown()
  3082  
  3083  	cc := te.clientConn()
  3084  	tc := testgrpc.NewTestServiceClient(cc)
  3085  
  3086  	// Start one blocked RPC for which we'll never send streaming
  3087  	// input. This will consume the 1 maximum concurrent streams,
  3088  	// causing future RPCs to hang.
  3089  	ctx, cancelFirst := context.WithTimeout(context.Background(), defaultTestTimeout)
  3090  	_, err := tc.StreamingInputCall(ctx)
  3091  	if err != nil {
  3092  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
  3093  	}
  3094  
  3095  	// Loop until the ClientConn receives the initial settings
  3096  	// frame from the server, notifying it about the maximum
  3097  	// concurrent streams. We know when it's received it because
  3098  	// an RPC will fail with codes.DeadlineExceeded instead of
  3099  	// succeeding.
  3100  	// TODO(bradfitz): add internal test hook for this (Issue 534)
  3101  	for {
  3102  		ctx, cancelSecond := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  3103  		_, err := tc.StreamingInputCall(ctx)
  3104  		cancelSecond()
  3105  		if err == nil {
  3106  			continue
  3107  		}
  3108  		if status.Code(err) == codes.DeadlineExceeded {
  3109  			break
  3110  		}
  3111  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded)
  3112  	}
  3113  	// If there are any RPCs in flight before the client receives
  3114  	// the max streams setting, let them be expired.
  3115  	// TODO(bradfitz): add internal test hook for this (Issue 534)
  3116  	time.Sleep(50 * time.Millisecond)
  3117  
  3118  	go func() {
  3119  		time.Sleep(50 * time.Millisecond)
  3120  		cancelFirst()
  3121  	}()
  3122  
  3123  	// This should be blocked until the 1st is canceled, then succeed.
  3124  	ctx, cancelThird := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  3125  	if _, err := tc.StreamingInputCall(ctx); err != nil {
  3126  		t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
  3127  	}
  3128  	cancelThird()
  3129  }
  3130  
  3131  // The following tests the gRPC streaming RPC implementations.
  3132  // TODO(zhaoq): Have better coverage on error cases.
  3133  var (
  3134  	reqSizes  = []int{27182, 8, 1828, 45904}
  3135  	respSizes = []int{31415, 9, 2653, 58979}
  3136  )
  3137  
  3138  func (s) TestNoService(t *testing.T) {
  3139  	for _, e := range listTestEnv() {
  3140  		testNoService(t, e)
  3141  	}
  3142  }
  3143  
  3144  func testNoService(t *testing.T, e env) {
  3145  	te := newTest(t, e)
  3146  	te.startServer(nil)
  3147  	defer te.tearDown()
  3148  
  3149  	cc := te.clientConn()
  3150  	tc := testgrpc.NewTestServiceClient(cc)
  3151  
  3152  	stream, err := tc.FullDuplexCall(te.ctx, grpc.WaitForReady(true))
  3153  	if err != nil {
  3154  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3155  	}
  3156  	if _, err := stream.Recv(); status.Code(err) != codes.Unimplemented {
  3157  		t.Fatalf("stream.Recv() = _, %v, want _, error code %s", err, codes.Unimplemented)
  3158  	}
  3159  }
  3160  
  3161  func (s) TestPingPong(t *testing.T) {
  3162  	for _, e := range listTestEnv() {
  3163  		testPingPong(t, e)
  3164  	}
  3165  }
  3166  
  3167  func testPingPong(t *testing.T, e env) {
  3168  	te := newTest(t, e)
  3169  	te.startServer(&testServer{security: e.security})
  3170  	defer te.tearDown()
  3171  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3172  
  3173  	stream, err := tc.FullDuplexCall(te.ctx)
  3174  	if err != nil {
  3175  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3176  	}
  3177  	var index int
  3178  	for index < len(reqSizes) {
  3179  		respParam := []*testpb.ResponseParameters{
  3180  			{
  3181  				Size: int32(respSizes[index]),
  3182  			},
  3183  		}
  3184  
  3185  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
  3186  		if err != nil {
  3187  			t.Fatal(err)
  3188  		}
  3189  
  3190  		req := &testpb.StreamingOutputCallRequest{
  3191  			ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3192  			ResponseParameters: respParam,
  3193  			Payload:            payload,
  3194  		}
  3195  		if err := stream.Send(req); err != nil {
  3196  			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  3197  		}
  3198  		reply, err := stream.Recv()
  3199  		if err != nil {
  3200  			t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  3201  		}
  3202  		pt := reply.GetPayload().GetType()
  3203  		if pt != testpb.PayloadType_COMPRESSABLE {
  3204  			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
  3205  		}
  3206  		size := len(reply.GetPayload().GetBody())
  3207  		if size != int(respSizes[index]) {
  3208  			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
  3209  		}
  3210  		index++
  3211  	}
  3212  	if err := stream.CloseSend(); err != nil {
  3213  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  3214  	}
  3215  	if _, err := stream.Recv(); err != io.EOF {
  3216  		t.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
  3217  	}
  3218  }
  3219  
  3220  func (s) TestMetadataStreamingRPC(t *testing.T) {
  3221  	for _, e := range listTestEnv() {
  3222  		testMetadataStreamingRPC(t, e)
  3223  	}
  3224  }
  3225  
  3226  func testMetadataStreamingRPC(t *testing.T, e env) {
  3227  	te := newTest(t, e)
  3228  	te.startServer(&testServer{security: e.security})
  3229  	defer te.tearDown()
  3230  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3231  
  3232  	ctx := metadata.NewOutgoingContext(te.ctx, testMetadata)
  3233  	stream, err := tc.FullDuplexCall(ctx)
  3234  	if err != nil {
  3235  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3236  	}
  3237  	go func() {
  3238  		headerMD, err := stream.Header()
  3239  		if e.security == "tls" {
  3240  			delete(headerMD, "transport_security_type")
  3241  		}
  3242  		delete(headerMD, "trailer") // ignore if present
  3243  		delete(headerMD, "user-agent")
  3244  		delete(headerMD, "content-type")
  3245  		delete(headerMD, "grpc-accept-encoding")
  3246  		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
  3247  			t.Errorf("#1 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
  3248  		}
  3249  		// test the cached value.
  3250  		headerMD, err = stream.Header()
  3251  		delete(headerMD, "trailer") // ignore if present
  3252  		delete(headerMD, "user-agent")
  3253  		delete(headerMD, "content-type")
  3254  		delete(headerMD, "grpc-accept-encoding")
  3255  		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
  3256  			t.Errorf("#2 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
  3257  		}
  3258  		err = func() error {
  3259  			for index := 0; index < len(reqSizes); index++ {
  3260  				respParam := []*testpb.ResponseParameters{
  3261  					{
  3262  						Size: int32(respSizes[index]),
  3263  					},
  3264  				}
  3265  
  3266  				payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
  3267  				if err != nil {
  3268  					return err
  3269  				}
  3270  
  3271  				req := &testpb.StreamingOutputCallRequest{
  3272  					ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3273  					ResponseParameters: respParam,
  3274  					Payload:            payload,
  3275  				}
  3276  				if err := stream.Send(req); err != nil {
  3277  					return fmt.Errorf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  3278  				}
  3279  			}
  3280  			return nil
  3281  		}()
  3282  		// Tell the server we're done sending args.
  3283  		stream.CloseSend()
  3284  		if err != nil {
  3285  			t.Error(err)
  3286  		}
  3287  	}()
  3288  	for {
  3289  		if _, err := stream.Recv(); err != nil {
  3290  			break
  3291  		}
  3292  	}
  3293  	trailerMD := stream.Trailer()
  3294  	if !reflect.DeepEqual(testTrailerMetadata, trailerMD) {
  3295  		t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testTrailerMetadata)
  3296  	}
  3297  }
  3298  
  3299  func (s) TestServerStreaming(t *testing.T) {
  3300  	for _, e := range listTestEnv() {
  3301  		testServerStreaming(t, e)
  3302  	}
  3303  }
  3304  
  3305  func testServerStreaming(t *testing.T, e env) {
  3306  	te := newTest(t, e)
  3307  	te.startServer(&testServer{security: e.security})
  3308  	defer te.tearDown()
  3309  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3310  
  3311  	respParam := make([]*testpb.ResponseParameters, len(respSizes))
  3312  	for i, s := range respSizes {
  3313  		respParam[i] = &testpb.ResponseParameters{
  3314  			Size: int32(s),
  3315  		}
  3316  	}
  3317  	req := &testpb.StreamingOutputCallRequest{
  3318  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3319  		ResponseParameters: respParam,
  3320  	}
  3321  
  3322  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3323  	defer cancel()
  3324  	stream, err := tc.StreamingOutputCall(ctx, req)
  3325  	if err != nil {
  3326  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
  3327  	}
  3328  	var rpcStatus error
  3329  	var respCnt int
  3330  	var index int
  3331  	for {
  3332  		reply, err := stream.Recv()
  3333  		if err != nil {
  3334  			rpcStatus = err
  3335  			break
  3336  		}
  3337  		pt := reply.GetPayload().GetType()
  3338  		if pt != testpb.PayloadType_COMPRESSABLE {
  3339  			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
  3340  		}
  3341  		size := len(reply.GetPayload().GetBody())
  3342  		if size != int(respSizes[index]) {
  3343  			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
  3344  		}
  3345  		index++
  3346  		respCnt++
  3347  	}
  3348  	if rpcStatus != io.EOF {
  3349  		t.Fatalf("Failed to finish the server streaming rpc: %v, want <EOF>", rpcStatus)
  3350  	}
  3351  	if respCnt != len(respSizes) {
  3352  		t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt)
  3353  	}
  3354  }
  3355  
  3356  func (s) TestFailedServerStreaming(t *testing.T) {
  3357  	for _, e := range listTestEnv() {
  3358  		testFailedServerStreaming(t, e)
  3359  	}
  3360  }
  3361  
  3362  func testFailedServerStreaming(t *testing.T, e env) {
  3363  	te := newTest(t, e)
  3364  	te.userAgent = failAppUA
  3365  	te.startServer(&testServer{security: e.security})
  3366  	defer te.tearDown()
  3367  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3368  
  3369  	respParam := make([]*testpb.ResponseParameters, len(respSizes))
  3370  	for i, s := range respSizes {
  3371  		respParam[i] = &testpb.ResponseParameters{
  3372  			Size: int32(s),
  3373  		}
  3374  	}
  3375  	req := &testpb.StreamingOutputCallRequest{
  3376  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3377  		ResponseParameters: respParam,
  3378  	}
  3379  	ctx := metadata.NewOutgoingContext(te.ctx, testMetadata)
  3380  	stream, err := tc.StreamingOutputCall(ctx, req)
  3381  	if err != nil {
  3382  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
  3383  	}
  3384  	wantErr := status.Error(codes.DataLoss, "error for testing: "+failAppUA)
  3385  	if _, err := stream.Recv(); !equalError(err, wantErr) {
  3386  		t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, wantErr)
  3387  	}
  3388  }
  3389  
  3390  func equalError(x, y error) bool {
  3391  	return x == y || (x != nil && y != nil && x.Error() == y.Error())
  3392  }
  3393  
  3394  // concurrentSendServer is a TestServiceServer whose
  3395  // StreamingOutputCall makes ten serial Send calls, sending payloads
  3396  // "0".."9", inclusive.  TestServerStreamingConcurrent verifies they
  3397  // were received in the correct order, and that there were no races.
  3398  //
  3399  // All other TestServiceServer methods crash if called.
  3400  type concurrentSendServer struct {
  3401  	testgrpc.TestServiceServer
  3402  }
  3403  
  3404  func (s concurrentSendServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testgrpc.TestService_StreamingOutputCallServer) error {
  3405  	for i := 0; i < 10; i++ {
  3406  		stream.Send(&testpb.StreamingOutputCallResponse{
  3407  			Payload: &testpb.Payload{
  3408  				Body: []byte{'0' + uint8(i)},
  3409  			},
  3410  		})
  3411  	}
  3412  	return nil
  3413  }
  3414  
  3415  // Tests doing a bunch of concurrent streaming output calls.
  3416  func (s) TestServerStreamingConcurrent(t *testing.T) {
  3417  	for _, e := range listTestEnv() {
  3418  		testServerStreamingConcurrent(t, e)
  3419  	}
  3420  }
  3421  
  3422  func testServerStreamingConcurrent(t *testing.T, e env) {
  3423  	te := newTest(t, e)
  3424  	te.startServer(concurrentSendServer{})
  3425  	defer te.tearDown()
  3426  
  3427  	cc := te.clientConn()
  3428  	tc := testgrpc.NewTestServiceClient(cc)
  3429  
  3430  	doStreamingCall := func() {
  3431  		req := &testpb.StreamingOutputCallRequest{}
  3432  		ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3433  		defer cancel()
  3434  		stream, err := tc.StreamingOutputCall(ctx, req)
  3435  		if err != nil {
  3436  			t.Errorf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
  3437  			return
  3438  		}
  3439  		var ngot int
  3440  		var buf bytes.Buffer
  3441  		for {
  3442  			reply, err := stream.Recv()
  3443  			if err == io.EOF {
  3444  				break
  3445  			}
  3446  			if err != nil {
  3447  				t.Fatal(err)
  3448  			}
  3449  			ngot++
  3450  			if buf.Len() > 0 {
  3451  				buf.WriteByte(',')
  3452  			}
  3453  			buf.Write(reply.GetPayload().GetBody())
  3454  		}
  3455  		if want := 10; ngot != want {
  3456  			t.Errorf("Got %d replies, want %d", ngot, want)
  3457  		}
  3458  		if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want {
  3459  			t.Errorf("Got replies %q; want %q", got, want)
  3460  		}
  3461  	}
  3462  
  3463  	var wg sync.WaitGroup
  3464  	for i := 0; i < 20; i++ {
  3465  		wg.Add(1)
  3466  		go func() {
  3467  			defer wg.Done()
  3468  			doStreamingCall()
  3469  		}()
  3470  	}
  3471  	wg.Wait()
  3472  
  3473  }
  3474  
  3475  func generatePayloadSizes() [][]int {
  3476  	reqSizes := [][]int{
  3477  		{27182, 8, 1828, 45904},
  3478  	}
  3479  
  3480  	num8KPayloads := 1024
  3481  	eightKPayloads := []int{}
  3482  	for i := 0; i < num8KPayloads; i++ {
  3483  		eightKPayloads = append(eightKPayloads, (1 << 13))
  3484  	}
  3485  	reqSizes = append(reqSizes, eightKPayloads)
  3486  
  3487  	num2MPayloads := 8
  3488  	twoMPayloads := []int{}
  3489  	for i := 0; i < num2MPayloads; i++ {
  3490  		twoMPayloads = append(twoMPayloads, (1 << 21))
  3491  	}
  3492  	reqSizes = append(reqSizes, twoMPayloads)
  3493  
  3494  	return reqSizes
  3495  }
  3496  
  3497  func (s) TestClientStreaming(t *testing.T) {
  3498  	for _, s := range generatePayloadSizes() {
  3499  		for _, e := range listTestEnv() {
  3500  			testClientStreaming(t, e, s)
  3501  		}
  3502  	}
  3503  }
  3504  
  3505  func testClientStreaming(t *testing.T, e env, sizes []int) {
  3506  	te := newTest(t, e)
  3507  	te.startServer(&testServer{security: e.security})
  3508  	defer te.tearDown()
  3509  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3510  
  3511  	ctx, cancel := context.WithTimeout(te.ctx, defaultTestTimeout)
  3512  	defer cancel()
  3513  	stream, err := tc.StreamingInputCall(ctx)
  3514  	if err != nil {
  3515  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err)
  3516  	}
  3517  
  3518  	var sum int
  3519  	for _, s := range sizes {
  3520  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s))
  3521  		if err != nil {
  3522  			t.Fatal(err)
  3523  		}
  3524  
  3525  		req := &testpb.StreamingInputCallRequest{
  3526  			Payload: payload,
  3527  		}
  3528  		if err := stream.Send(req); err != nil {
  3529  			t.Fatalf("%v.Send(_) = %v, want <nil>", stream, err)
  3530  		}
  3531  		sum += s
  3532  	}
  3533  	reply, err := stream.CloseAndRecv()
  3534  	if err != nil {
  3535  		t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil)
  3536  	}
  3537  	if reply.GetAggregatedPayloadSize() != int32(sum) {
  3538  		t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum)
  3539  	}
  3540  }
  3541  
  3542  func (s) TestClientStreamingError(t *testing.T) {
  3543  	for _, e := range listTestEnv() {
  3544  		if e.name == "handler-tls" {
  3545  			continue
  3546  		}
  3547  		testClientStreamingError(t, e)
  3548  	}
  3549  }
  3550  
  3551  func testClientStreamingError(t *testing.T, e env) {
  3552  	te := newTest(t, e)
  3553  	te.startServer(&testServer{security: e.security, earlyFail: true})
  3554  	defer te.tearDown()
  3555  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3556  
  3557  	stream, err := tc.StreamingInputCall(te.ctx)
  3558  	if err != nil {
  3559  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err)
  3560  	}
  3561  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1)
  3562  	if err != nil {
  3563  		t.Fatal(err)
  3564  	}
  3565  
  3566  	req := &testpb.StreamingInputCallRequest{
  3567  		Payload: payload,
  3568  	}
  3569  	// The 1st request should go through.
  3570  	if err := stream.Send(req); err != nil {
  3571  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  3572  	}
  3573  	for {
  3574  		if err := stream.Send(req); err != io.EOF {
  3575  			continue
  3576  		}
  3577  		if _, err := stream.CloseAndRecv(); status.Code(err) != codes.NotFound {
  3578  			t.Fatalf("%v.CloseAndRecv() = %v, want error %s", stream, err, codes.NotFound)
  3579  		}
  3580  		break
  3581  	}
  3582  }
  3583  
  3584  func (s) TestExceedMaxStreamsLimit(t *testing.T) {
  3585  	for _, e := range listTestEnv() {
  3586  		testExceedMaxStreamsLimit(t, e)
  3587  	}
  3588  }
  3589  
  3590  func testExceedMaxStreamsLimit(t *testing.T, e env) {
  3591  	te := newTest(t, e)
  3592  	te.declareLogNoise(
  3593  		"http2Client.notifyError got notified that the client transport was broken",
  3594  		"Conn.resetTransport failed to create client transport",
  3595  		"grpc: the connection is closing",
  3596  	)
  3597  	te.maxStream = 1 // Only allows 1 live stream per server transport.
  3598  	te.startServer(&testServer{security: e.security})
  3599  	defer te.tearDown()
  3600  
  3601  	cc := te.clientConn()
  3602  	tc := testgrpc.NewTestServiceClient(cc)
  3603  
  3604  	_, err := tc.StreamingInputCall(te.ctx)
  3605  	if err != nil {
  3606  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
  3607  	}
  3608  	// Loop until receiving the new max stream setting from the server.
  3609  	for {
  3610  		ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  3611  		defer cancel()
  3612  		_, err := tc.StreamingInputCall(ctx)
  3613  		if err == nil {
  3614  			time.Sleep(50 * time.Millisecond)
  3615  			continue
  3616  		}
  3617  		if status.Code(err) == codes.DeadlineExceeded {
  3618  			break
  3619  		}
  3620  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded)
  3621  	}
  3622  }
  3623  
  3624  func (s) TestStreamsQuotaRecovery(t *testing.T) {
  3625  	for _, e := range listTestEnv() {
  3626  		testStreamsQuotaRecovery(t, e)
  3627  	}
  3628  }
  3629  
  3630  func testStreamsQuotaRecovery(t *testing.T, e env) {
  3631  	te := newTest(t, e)
  3632  	te.declareLogNoise(
  3633  		"http2Client.notifyError got notified that the client transport was broken",
  3634  		"Conn.resetTransport failed to create client transport",
  3635  		"grpc: the connection is closing",
  3636  	)
  3637  	te.maxStream = 1 // Allows 1 live stream.
  3638  	te.startServer(&testServer{security: e.security})
  3639  	defer te.tearDown()
  3640  
  3641  	cc := te.clientConn()
  3642  	tc := testgrpc.NewTestServiceClient(cc)
  3643  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3644  	defer cancel()
  3645  	if _, err := tc.StreamingInputCall(ctx); err != nil {
  3646  		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, <nil>", err)
  3647  	}
  3648  	// Loop until the new max stream setting is effective.
  3649  	for {
  3650  		ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  3651  		_, err := tc.StreamingInputCall(ctx)
  3652  		cancel()
  3653  		if err == nil {
  3654  			time.Sleep(5 * time.Millisecond)
  3655  			continue
  3656  		}
  3657  		if status.Code(err) == codes.DeadlineExceeded {
  3658  			break
  3659  		}
  3660  		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  3661  	}
  3662  
  3663  	var wg sync.WaitGroup
  3664  	for i := 0; i < 10; i++ {
  3665  		wg.Add(1)
  3666  		go func() {
  3667  			defer wg.Done()
  3668  			payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 314)
  3669  			if err != nil {
  3670  				t.Error(err)
  3671  				return
  3672  			}
  3673  			req := &testpb.SimpleRequest{
  3674  				ResponseType: testpb.PayloadType_COMPRESSABLE,
  3675  				ResponseSize: 1592,
  3676  				Payload:      payload,
  3677  			}
  3678  			// No rpc should go through due to the max streams limit.
  3679  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  3680  			defer cancel()
  3681  			if _, err := tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  3682  				t.Errorf("tc.UnaryCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  3683  			}
  3684  		}()
  3685  	}
  3686  	wg.Wait()
  3687  
  3688  	cancel()
  3689  	// A new stream should be allowed after canceling the first one.
  3690  	ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
  3691  	defer cancel()
  3692  	if _, err := tc.StreamingInputCall(ctx); err != nil {
  3693  		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %v", err, nil)
  3694  	}
  3695  }
  3696  
  3697  func (s) TestUnaryClientInterceptor(t *testing.T) {
  3698  	for _, e := range listTestEnv() {
  3699  		testUnaryClientInterceptor(t, e)
  3700  	}
  3701  }
  3702  
  3703  func failOkayRPC(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
  3704  	err := invoker(ctx, method, req, reply, cc, opts...)
  3705  	if err == nil {
  3706  		return status.Error(codes.NotFound, "")
  3707  	}
  3708  	return err
  3709  }
  3710  
  3711  func testUnaryClientInterceptor(t *testing.T, e env) {
  3712  	te := newTest(t, e)
  3713  	te.userAgent = testAppUA
  3714  	te.unaryClientInt = failOkayRPC
  3715  	te.startServer(&testServer{security: e.security})
  3716  	defer te.tearDown()
  3717  
  3718  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3719  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3720  	defer cancel()
  3721  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.NotFound {
  3722  		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.NotFound)
  3723  	}
  3724  }
  3725  
  3726  func (s) TestStreamClientInterceptor(t *testing.T) {
  3727  	for _, e := range listTestEnv() {
  3728  		testStreamClientInterceptor(t, e)
  3729  	}
  3730  }
  3731  
  3732  func failOkayStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
  3733  	s, err := streamer(ctx, desc, cc, method, opts...)
  3734  	if err == nil {
  3735  		return nil, status.Error(codes.NotFound, "")
  3736  	}
  3737  	return s, nil
  3738  }
  3739  
  3740  func testStreamClientInterceptor(t *testing.T, e env) {
  3741  	te := newTest(t, e)
  3742  	te.streamClientInt = failOkayStream
  3743  	te.startServer(&testServer{security: e.security})
  3744  	defer te.tearDown()
  3745  
  3746  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3747  	respParam := []*testpb.ResponseParameters{
  3748  		{
  3749  			Size: int32(1),
  3750  		},
  3751  	}
  3752  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1))
  3753  	if err != nil {
  3754  		t.Fatal(err)
  3755  	}
  3756  	req := &testpb.StreamingOutputCallRequest{
  3757  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3758  		ResponseParameters: respParam,
  3759  		Payload:            payload,
  3760  	}
  3761  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3762  	defer cancel()
  3763  	if _, err := tc.StreamingOutputCall(ctx, req); status.Code(err) != codes.NotFound {
  3764  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, error code %s", tc, err, codes.NotFound)
  3765  	}
  3766  }
  3767  
  3768  func (s) TestUnaryServerInterceptor(t *testing.T) {
  3769  	for _, e := range listTestEnv() {
  3770  		testUnaryServerInterceptor(t, e)
  3771  	}
  3772  }
  3773  
  3774  func errInjector(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
  3775  	return nil, status.Error(codes.PermissionDenied, "")
  3776  }
  3777  
  3778  func testUnaryServerInterceptor(t *testing.T, e env) {
  3779  	te := newTest(t, e)
  3780  	te.unaryServerInt = errInjector
  3781  	te.startServer(&testServer{security: e.security})
  3782  	defer te.tearDown()
  3783  
  3784  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3785  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3786  	defer cancel()
  3787  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.PermissionDenied {
  3788  		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied)
  3789  	}
  3790  }
  3791  
  3792  func (s) TestStreamServerInterceptor(t *testing.T) {
  3793  	for _, e := range listTestEnv() {
  3794  		// TODO(bradfitz): Temporarily skip this env due to #619.
  3795  		if e.name == "handler-tls" {
  3796  			continue
  3797  		}
  3798  		testStreamServerInterceptor(t, e)
  3799  	}
  3800  }
  3801  
  3802  func fullDuplexOnly(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
  3803  	if info.FullMethod == "/grpc.testing.TestService/FullDuplexCall" {
  3804  		return handler(srv, ss)
  3805  	}
  3806  	// Reject the other methods.
  3807  	return status.Error(codes.PermissionDenied, "")
  3808  }
  3809  
  3810  func testStreamServerInterceptor(t *testing.T, e env) {
  3811  	te := newTest(t, e)
  3812  	te.streamServerInt = fullDuplexOnly
  3813  	te.startServer(&testServer{security: e.security})
  3814  	defer te.tearDown()
  3815  
  3816  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3817  	respParam := []*testpb.ResponseParameters{
  3818  		{
  3819  			Size: int32(1),
  3820  		},
  3821  	}
  3822  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1))
  3823  	if err != nil {
  3824  		t.Fatal(err)
  3825  	}
  3826  	req := &testpb.StreamingOutputCallRequest{
  3827  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3828  		ResponseParameters: respParam,
  3829  		Payload:            payload,
  3830  	}
  3831  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3832  	defer cancel()
  3833  	s1, err := tc.StreamingOutputCall(ctx, req)
  3834  	if err != nil {
  3835  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, <nil>", tc, err)
  3836  	}
  3837  	if _, err := s1.Recv(); status.Code(err) != codes.PermissionDenied {
  3838  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied)
  3839  	}
  3840  	s2, err := tc.FullDuplexCall(ctx)
  3841  	if err != nil {
  3842  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3843  	}
  3844  	if err := s2.Send(req); err != nil {
  3845  		t.Fatalf("%v.Send(_) = %v, want <nil>", s2, err)
  3846  	}
  3847  	if _, err := s2.Recv(); err != nil {
  3848  		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", s2, err)
  3849  	}
  3850  }
  3851  
  3852  // funcServer implements methods of TestServiceServer using funcs,
  3853  // similar to an http.HandlerFunc.
  3854  // Any unimplemented method will crash. Tests implement the method(s)
  3855  // they need.
  3856  type funcServer struct {
  3857  	testgrpc.TestServiceServer
  3858  	unaryCall          func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error)
  3859  	streamingInputCall func(stream testgrpc.TestService_StreamingInputCallServer) error
  3860  	fullDuplexCall     func(stream testgrpc.TestService_FullDuplexCallServer) error
  3861  }
  3862  
  3863  func (s *funcServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  3864  	return s.unaryCall(ctx, in)
  3865  }
  3866  
  3867  func (s *funcServer) StreamingInputCall(stream testgrpc.TestService_StreamingInputCallServer) error {
  3868  	return s.streamingInputCall(stream)
  3869  }
  3870  
  3871  func (s *funcServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error {
  3872  	return s.fullDuplexCall(stream)
  3873  }
  3874  
  3875  func (s) TestClientRequestBodyErrorUnexpectedEOF(t *testing.T) {
  3876  	for _, e := range listTestEnv() {
  3877  		testClientRequestBodyErrorUnexpectedEOF(t, e)
  3878  	}
  3879  }
  3880  
  3881  func testClientRequestBodyErrorUnexpectedEOF(t *testing.T, e env) {
  3882  	te := newTest(t, e)
  3883  	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  3884  		errUnexpectedCall := errors.New("unexpected call func server method")
  3885  		t.Error(errUnexpectedCall)
  3886  		return nil, errUnexpectedCall
  3887  	}}
  3888  	te.startServer(ts)
  3889  	defer te.tearDown()
  3890  	te.withServerTester(func(st *serverTester) {
  3891  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
  3892  		// Say we have 5 bytes coming, but set END_STREAM flag:
  3893  		st.writeData(1, true, []byte{0, 0, 0, 0, 5})
  3894  		st.wantAnyFrame() // wait for server to crash (it used to crash)
  3895  	})
  3896  }
  3897  
  3898  func (s) TestClientRequestBodyErrorCloseAfterLength(t *testing.T) {
  3899  	for _, e := range listTestEnv() {
  3900  		testClientRequestBodyErrorCloseAfterLength(t, e)
  3901  	}
  3902  }
  3903  
  3904  func testClientRequestBodyErrorCloseAfterLength(t *testing.T, e env) {
  3905  	te := newTest(t, e)
  3906  	te.declareLogNoise("Server.processUnaryRPC failed to write status")
  3907  	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  3908  		errUnexpectedCall := errors.New("unexpected call func server method")
  3909  		t.Error(errUnexpectedCall)
  3910  		return nil, errUnexpectedCall
  3911  	}}
  3912  	te.startServer(ts)
  3913  	defer te.tearDown()
  3914  	te.withServerTester(func(st *serverTester) {
  3915  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
  3916  		// say we're sending 5 bytes, but then close the connection instead.
  3917  		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
  3918  		st.cc.Close()
  3919  	})
  3920  }
  3921  
  3922  func (s) TestClientRequestBodyErrorCancel(t *testing.T) {
  3923  	for _, e := range listTestEnv() {
  3924  		testClientRequestBodyErrorCancel(t, e)
  3925  	}
  3926  }
  3927  
  3928  func testClientRequestBodyErrorCancel(t *testing.T, e env) {
  3929  	te := newTest(t, e)
  3930  	gotCall := make(chan bool, 1)
  3931  	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  3932  		gotCall <- true
  3933  		return new(testpb.SimpleResponse), nil
  3934  	}}
  3935  	te.startServer(ts)
  3936  	defer te.tearDown()
  3937  	te.withServerTester(func(st *serverTester) {
  3938  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
  3939  		// Say we have 5 bytes coming, but cancel it instead.
  3940  		st.writeRSTStream(1, http2.ErrCodeCancel)
  3941  		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
  3942  
  3943  		// Verify we didn't a call yet.
  3944  		select {
  3945  		case <-gotCall:
  3946  			t.Fatal("unexpected call")
  3947  		default:
  3948  		}
  3949  
  3950  		// And now send an uncanceled (but still invalid), just to get a response.
  3951  		st.writeHeadersGRPC(3, "/grpc.testing.TestService/UnaryCall", false)
  3952  		st.writeData(3, true, []byte{0, 0, 0, 0, 0})
  3953  		<-gotCall
  3954  		st.wantAnyFrame()
  3955  	})
  3956  }
  3957  
  3958  func (s) TestClientRequestBodyErrorCancelStreamingInput(t *testing.T) {
  3959  	for _, e := range listTestEnv() {
  3960  		testClientRequestBodyErrorCancelStreamingInput(t, e)
  3961  	}
  3962  }
  3963  
  3964  func testClientRequestBodyErrorCancelStreamingInput(t *testing.T, e env) {
  3965  	te := newTest(t, e)
  3966  	recvErr := make(chan error, 1)
  3967  	ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error {
  3968  		_, err := stream.Recv()
  3969  		recvErr <- err
  3970  		return nil
  3971  	}}
  3972  	te.startServer(ts)
  3973  	defer te.tearDown()
  3974  	te.withServerTester(func(st *serverTester) {
  3975  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", false)
  3976  		// Say we have 5 bytes coming, but cancel it instead.
  3977  		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
  3978  		st.writeRSTStream(1, http2.ErrCodeCancel)
  3979  
  3980  		var got error
  3981  		select {
  3982  		case got = <-recvErr:
  3983  		case <-time.After(3 * time.Second):
  3984  			t.Fatal("timeout waiting for error")
  3985  		}
  3986  		if grpc.Code(got) != codes.Canceled {
  3987  			t.Errorf("error = %#v; want error code %s", got, codes.Canceled)
  3988  		}
  3989  	})
  3990  }
  3991  
  3992  func (s) TestClientInitialHeaderEndStream(t *testing.T) {
  3993  	for _, e := range listTestEnv() {
  3994  		if e.httpHandler {
  3995  			continue
  3996  		}
  3997  		testClientInitialHeaderEndStream(t, e)
  3998  	}
  3999  }
  4000  
  4001  func testClientInitialHeaderEndStream(t *testing.T, e env) {
  4002  	// To ensure RST_STREAM is sent for illegal data write and not normal stream
  4003  	// close.
  4004  	frameCheckingDone := make(chan struct{})
  4005  	// To ensure goroutine for test does not end before RPC handler performs error
  4006  	// checking.
  4007  	handlerDone := make(chan struct{})
  4008  	te := newTest(t, e)
  4009  	ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error {
  4010  		defer close(handlerDone)
  4011  		// Block on serverTester receiving RST_STREAM. This ensures server has closed
  4012  		// stream before stream.Recv().
  4013  		<-frameCheckingDone
  4014  		data, err := stream.Recv()
  4015  		if err == nil {
  4016  			t.Errorf("unexpected data received in func server method: '%v'", data)
  4017  		} else if status.Code(err) != codes.Canceled {
  4018  			t.Errorf("expected canceled error, instead received '%v'", err)
  4019  		}
  4020  		return nil
  4021  	}}
  4022  	te.startServer(ts)
  4023  	defer te.tearDown()
  4024  	te.withServerTester(func(st *serverTester) {
  4025  		// Send a headers with END_STREAM flag, but then write data.
  4026  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", true)
  4027  		st.writeData(1, false, []byte{0, 0, 0, 0, 0})
  4028  		st.wantAnyFrame()
  4029  		st.wantAnyFrame()
  4030  		st.wantRSTStream(http2.ErrCodeStreamClosed)
  4031  		close(frameCheckingDone)
  4032  		<-handlerDone
  4033  	})
  4034  }
  4035  
  4036  func (s) TestClientSendDataAfterCloseSend(t *testing.T) {
  4037  	for _, e := range listTestEnv() {
  4038  		if e.httpHandler {
  4039  			continue
  4040  		}
  4041  		testClientSendDataAfterCloseSend(t, e)
  4042  	}
  4043  }
  4044  
  4045  func testClientSendDataAfterCloseSend(t *testing.T, e env) {
  4046  	// To ensure RST_STREAM is sent for illegal data write prior to execution of RPC
  4047  	// handler.
  4048  	frameCheckingDone := make(chan struct{})
  4049  	// To ensure goroutine for test does not end before RPC handler performs error
  4050  	// checking.
  4051  	handlerDone := make(chan struct{})
  4052  	te := newTest(t, e)
  4053  	ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error {
  4054  		defer close(handlerDone)
  4055  		// Block on serverTester receiving RST_STREAM. This ensures server has closed
  4056  		// stream before stream.Recv().
  4057  		<-frameCheckingDone
  4058  		for {
  4059  			_, err := stream.Recv()
  4060  			if err == io.EOF {
  4061  				break
  4062  			}
  4063  			if err != nil {
  4064  				if status.Code(err) != codes.Canceled {
  4065  					t.Errorf("expected canceled error, instead received '%v'", err)
  4066  				}
  4067  				break
  4068  			}
  4069  		}
  4070  		if err := stream.SendMsg(nil); err == nil {
  4071  			t.Error("expected error sending message on stream after stream closed due to illegal data")
  4072  		} else if status.Code(err) != codes.Canceled {
  4073  			t.Errorf("expected cancel error, instead received '%v'", err)
  4074  		}
  4075  		return nil
  4076  	}}
  4077  	te.startServer(ts)
  4078  	defer te.tearDown()
  4079  	te.withServerTester(func(st *serverTester) {
  4080  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", false)
  4081  		// Send data with END_STREAM flag, but then write more data.
  4082  		st.writeData(1, true, []byte{0, 0, 0, 0, 0})
  4083  		st.writeData(1, false, []byte{0, 0, 0, 0, 0})
  4084  		st.wantAnyFrame()
  4085  		st.wantAnyFrame()
  4086  		st.wantRSTStream(http2.ErrCodeStreamClosed)
  4087  		close(frameCheckingDone)
  4088  		<-handlerDone
  4089  	})
  4090  }
  4091  
  4092  func (s) TestClientResourceExhaustedCancelFullDuplex(t *testing.T) {
  4093  	for _, e := range listTestEnv() {
  4094  		if e.httpHandler {
  4095  			// httpHandler write won't be blocked on flow control window.
  4096  			continue
  4097  		}
  4098  		testClientResourceExhaustedCancelFullDuplex(t, e)
  4099  	}
  4100  }
  4101  
  4102  func testClientResourceExhaustedCancelFullDuplex(t *testing.T, e env) {
  4103  	te := newTest(t, e)
  4104  	recvErr := make(chan error, 1)
  4105  	ts := &funcServer{fullDuplexCall: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  4106  		defer close(recvErr)
  4107  		_, err := stream.Recv()
  4108  		if err != nil {
  4109  			return status.Errorf(codes.Internal, "stream.Recv() got error: %v, want <nil>", err)
  4110  		}
  4111  		// create a payload that's larger than the default flow control window.
  4112  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 10)
  4113  		if err != nil {
  4114  			return err
  4115  		}
  4116  		resp := &testpb.StreamingOutputCallResponse{
  4117  			Payload: payload,
  4118  		}
  4119  		ce := make(chan error, 1)
  4120  		go func() {
  4121  			var err error
  4122  			for {
  4123  				if err = stream.Send(resp); err != nil {
  4124  					break
  4125  				}
  4126  			}
  4127  			ce <- err
  4128  		}()
  4129  		select {
  4130  		case err = <-ce:
  4131  		case <-time.After(10 * time.Second):
  4132  			err = errors.New("10s timeout reached")
  4133  		}
  4134  		recvErr <- err
  4135  		return err
  4136  	}}
  4137  	te.startServer(ts)
  4138  	defer te.tearDown()
  4139  	// set a low limit on receive message size to error with Resource Exhausted on
  4140  	// client side when server send a large message.
  4141  	te.maxClientReceiveMsgSize = newInt(10)
  4142  	cc := te.clientConn()
  4143  	tc := testgrpc.NewTestServiceClient(cc)
  4144  
  4145  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4146  	defer cancel()
  4147  	stream, err := tc.FullDuplexCall(ctx)
  4148  	if err != nil {
  4149  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4150  	}
  4151  	req := &testpb.StreamingOutputCallRequest{}
  4152  	if err := stream.Send(req); err != nil {
  4153  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  4154  	}
  4155  	if _, err := stream.Recv(); status.Code(err) != codes.ResourceExhausted {
  4156  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  4157  	}
  4158  	err = <-recvErr
  4159  	if status.Code(err) != codes.Canceled {
  4160  		t.Fatalf("server got error %v, want error code: %s", err, codes.Canceled)
  4161  	}
  4162  }
  4163  
  4164  type clientFailCreds struct{}
  4165  
  4166  func (c *clientFailCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
  4167  	return rawConn, nil, nil
  4168  }
  4169  func (c *clientFailCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
  4170  	return nil, nil, fmt.Errorf("client handshake fails with fatal error")
  4171  }
  4172  func (c *clientFailCreds) Info() credentials.ProtocolInfo {
  4173  	return credentials.ProtocolInfo{}
  4174  }
  4175  func (c *clientFailCreds) Clone() credentials.TransportCredentials {
  4176  	return c
  4177  }
  4178  func (c *clientFailCreds) OverrideServerName(s string) error {
  4179  	return nil
  4180  }
  4181  
  4182  // This test makes sure that failfast RPCs fail if client handshake fails with
  4183  // fatal errors.
  4184  func (s) TestFailfastRPCFailOnFatalHandshakeError(t *testing.T) {
  4185  	lis, err := net.Listen("tcp", "localhost:0")
  4186  	if err != nil {
  4187  		t.Fatalf("Failed to listen: %v", err)
  4188  	}
  4189  	defer lis.Close()
  4190  
  4191  	cc, err := grpc.Dial("passthrough:///"+lis.Addr().String(), grpc.WithTransportCredentials(&clientFailCreds{}))
  4192  	if err != nil {
  4193  		t.Fatalf("grpc.Dial(_) = %v", err)
  4194  	}
  4195  	defer cc.Close()
  4196  
  4197  	tc := testgrpc.NewTestServiceClient(cc)
  4198  	// This unary call should fail, but not timeout.
  4199  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4200  	defer cancel()
  4201  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(false)); status.Code(err) != codes.Unavailable {
  4202  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want <Unavailable>", err)
  4203  	}
  4204  }
  4205  
  4206  func (s) TestFlowControlLogicalRace(t *testing.T) {
  4207  	// Test for a regression of https://github.com/grpc/grpc-go/issues/632,
  4208  	// and other flow control bugs.
  4209  
  4210  	const (
  4211  		itemCount   = 100
  4212  		itemSize    = 1 << 10
  4213  		recvCount   = 2
  4214  		maxFailures = 3
  4215  	)
  4216  
  4217  	requestCount := 3000
  4218  	if raceMode {
  4219  		requestCount = 1000
  4220  	}
  4221  
  4222  	lis, err := net.Listen("tcp", "localhost:0")
  4223  	if err != nil {
  4224  		t.Fatalf("Failed to listen: %v", err)
  4225  	}
  4226  	defer lis.Close()
  4227  
  4228  	s := grpc.NewServer()
  4229  	testgrpc.RegisterTestServiceServer(s, &flowControlLogicalRaceServer{
  4230  		itemCount: itemCount,
  4231  		itemSize:  itemSize,
  4232  	})
  4233  	defer s.Stop()
  4234  
  4235  	go s.Serve(lis)
  4236  
  4237  	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
  4238  	if err != nil {
  4239  		t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err)
  4240  	}
  4241  	defer cc.Close()
  4242  	cl := testgrpc.NewTestServiceClient(cc)
  4243  
  4244  	failures := 0
  4245  	for i := 0; i < requestCount; i++ {
  4246  		ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4247  		output, err := cl.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{})
  4248  		if err != nil {
  4249  			t.Fatalf("StreamingOutputCall; err = %q", err)
  4250  		}
  4251  
  4252  		for j := 0; j < recvCount; j++ {
  4253  			if _, err := output.Recv(); err != nil {
  4254  				if err == io.EOF || status.Code(err) == codes.DeadlineExceeded {
  4255  					t.Errorf("got %d responses to request %d", j, i)
  4256  					failures++
  4257  					break
  4258  				}
  4259  				t.Fatalf("Recv; err = %q", err)
  4260  			}
  4261  		}
  4262  		cancel()
  4263  
  4264  		if failures >= maxFailures {
  4265  			// Continue past the first failure to see if the connection is
  4266  			// entirely broken, or if only a single RPC was affected
  4267  			t.Fatalf("Too many failures received; aborting")
  4268  		}
  4269  	}
  4270  }
  4271  
  4272  type flowControlLogicalRaceServer struct {
  4273  	testgrpc.TestServiceServer
  4274  
  4275  	itemSize  int
  4276  	itemCount int
  4277  }
  4278  
  4279  func (s *flowControlLogicalRaceServer) StreamingOutputCall(req *testpb.StreamingOutputCallRequest, srv testgrpc.TestService_StreamingOutputCallServer) error {
  4280  	for i := 0; i < s.itemCount; i++ {
  4281  		err := srv.Send(&testpb.StreamingOutputCallResponse{
  4282  			Payload: &testpb.Payload{
  4283  				// Sending a large stream of data which the client reject
  4284  				// helps to trigger some types of flow control bugs.
  4285  				//
  4286  				// Reallocating memory here is inefficient, but the stress it
  4287  				// puts on the GC leads to more frequent flow control
  4288  				// failures. The GC likely causes more variety in the
  4289  				// goroutine scheduling orders.
  4290  				Body: bytes.Repeat([]byte("a"), s.itemSize),
  4291  			},
  4292  		})
  4293  		if err != nil {
  4294  			return err
  4295  		}
  4296  	}
  4297  	return nil
  4298  }
  4299  
  4300  type lockingWriter struct {
  4301  	mu sync.Mutex
  4302  	w  io.Writer
  4303  }
  4304  
  4305  func (lw *lockingWriter) Write(p []byte) (n int, err error) {
  4306  	lw.mu.Lock()
  4307  	defer lw.mu.Unlock()
  4308  	return lw.w.Write(p)
  4309  }
  4310  
  4311  func (lw *lockingWriter) setWriter(w io.Writer) {
  4312  	lw.mu.Lock()
  4313  	defer lw.mu.Unlock()
  4314  	lw.w = w
  4315  }
  4316  
  4317  var testLogOutput = &lockingWriter{w: os.Stderr}
  4318  
  4319  // awaitNewConnLogOutput waits for any of grpc.NewConn's goroutines to
  4320  // terminate, if they're still running. It spams logs with this
  4321  // message.  We wait for it so our log filter is still
  4322  // active. Otherwise the "defer restore()" at the top of various test
  4323  // functions restores our log filter and then the goroutine spams.
  4324  func awaitNewConnLogOutput() {
  4325  	awaitLogOutput(50*time.Millisecond, "grpc: the client connection is closing; please retry")
  4326  }
  4327  
  4328  func awaitLogOutput(maxWait time.Duration, phrase string) {
  4329  	pb := []byte(phrase)
  4330  
  4331  	timer := time.NewTimer(maxWait)
  4332  	defer timer.Stop()
  4333  	wakeup := make(chan bool, 1)
  4334  	for {
  4335  		if logOutputHasContents(pb, wakeup) {
  4336  			return
  4337  		}
  4338  		select {
  4339  		case <-timer.C:
  4340  			// Too slow. Oh well.
  4341  			return
  4342  		case <-wakeup:
  4343  		}
  4344  	}
  4345  }
  4346  
  4347  func logOutputHasContents(v []byte, wakeup chan<- bool) bool {
  4348  	testLogOutput.mu.Lock()
  4349  	defer testLogOutput.mu.Unlock()
  4350  	fw, ok := testLogOutput.w.(*filterWriter)
  4351  	if !ok {
  4352  		return false
  4353  	}
  4354  	fw.mu.Lock()
  4355  	defer fw.mu.Unlock()
  4356  	if bytes.Contains(fw.buf.Bytes(), v) {
  4357  		return true
  4358  	}
  4359  	fw.wakeup = wakeup
  4360  	return false
  4361  }
  4362  
  4363  var verboseLogs = flag.Bool("verbose_logs", false, "show all log output, without filtering")
  4364  
  4365  func noop() {}
  4366  
  4367  // declareLogNoise declares that t is expected to emit the following noisy
  4368  // phrases, even on success. Those phrases will be filtered from log output and
  4369  // only be shown if *verbose_logs or t ends up failing. The returned restore
  4370  // function should be called with defer to be run before the test ends.
  4371  func declareLogNoise(t *testing.T, phrases ...string) (restore func()) {
  4372  	if *verboseLogs {
  4373  		return noop
  4374  	}
  4375  	fw := &filterWriter{dst: os.Stderr, filter: phrases}
  4376  	testLogOutput.setWriter(fw)
  4377  	return func() {
  4378  		if t.Failed() {
  4379  			fw.mu.Lock()
  4380  			defer fw.mu.Unlock()
  4381  			if fw.buf.Len() > 0 {
  4382  				t.Logf("Complete log output:\n%s", fw.buf.Bytes())
  4383  			}
  4384  		}
  4385  		testLogOutput.setWriter(os.Stderr)
  4386  	}
  4387  }
  4388  
  4389  type filterWriter struct {
  4390  	dst    io.Writer
  4391  	filter []string
  4392  
  4393  	mu     sync.Mutex
  4394  	buf    bytes.Buffer
  4395  	wakeup chan<- bool // if non-nil, gets true on write
  4396  }
  4397  
  4398  func (fw *filterWriter) Write(p []byte) (n int, err error) {
  4399  	fw.mu.Lock()
  4400  	fw.buf.Write(p)
  4401  	if fw.wakeup != nil {
  4402  		select {
  4403  		case fw.wakeup <- true:
  4404  		default:
  4405  		}
  4406  	}
  4407  	fw.mu.Unlock()
  4408  
  4409  	ps := string(p)
  4410  	for _, f := range fw.filter {
  4411  		if strings.Contains(ps, f) {
  4412  			return len(p), nil
  4413  		}
  4414  	}
  4415  	return fw.dst.Write(p)
  4416  }
  4417  
  4418  func (s) TestGRPCMethod(t *testing.T) {
  4419  	var method string
  4420  	var ok bool
  4421  
  4422  	ss := &stubserver.StubServer{
  4423  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  4424  			method, ok = grpc.Method(ctx)
  4425  			return &testpb.Empty{}, nil
  4426  		},
  4427  	}
  4428  	if err := ss.Start(nil); err != nil {
  4429  		t.Fatalf("Error starting endpoint server: %v", err)
  4430  	}
  4431  	defer ss.Stop()
  4432  
  4433  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4434  	defer cancel()
  4435  
  4436  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  4437  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  4438  	}
  4439  
  4440  	if want := "/grpc.testing.TestService/EmptyCall"; !ok || method != want {
  4441  		t.Fatalf("grpc.Method(_) = %q, %v; want %q, true", method, ok, want)
  4442  	}
  4443  }
  4444  
  4445  func (s) TestUnaryProxyDoesNotForwardMetadata(t *testing.T) {
  4446  	const mdkey = "somedata"
  4447  
  4448  	// endpoint ensures mdkey is NOT in metadata and returns an error if it is.
  4449  	endpoint := &stubserver.StubServer{
  4450  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  4451  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil {
  4452  				return nil, status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
  4453  			}
  4454  			return &testpb.Empty{}, nil
  4455  		},
  4456  	}
  4457  	if err := endpoint.Start(nil); err != nil {
  4458  		t.Fatalf("Error starting endpoint server: %v", err)
  4459  	}
  4460  	defer endpoint.Stop()
  4461  
  4462  	// proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint
  4463  	// without explicitly copying the metadata.
  4464  	proxy := &stubserver.StubServer{
  4465  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  4466  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil {
  4467  				return nil, status.Errorf(codes.Internal, "proxy: md=%v; want contains(%q)", md, mdkey)
  4468  			}
  4469  			return endpoint.Client.EmptyCall(ctx, in)
  4470  		},
  4471  	}
  4472  	if err := proxy.Start(nil); err != nil {
  4473  		t.Fatalf("Error starting proxy server: %v", err)
  4474  	}
  4475  	defer proxy.Stop()
  4476  
  4477  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4478  	defer cancel()
  4479  	md := metadata.Pairs(mdkey, "val")
  4480  	ctx = metadata.NewOutgoingContext(ctx, md)
  4481  
  4482  	// Sanity check that endpoint properly errors when it sees mdkey.
  4483  	_, err := endpoint.Client.EmptyCall(ctx, &testpb.Empty{})
  4484  	if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal {
  4485  		t.Fatalf("endpoint.Client.EmptyCall(_, _) = _, %v; want _, <status with Code()=Internal>", err)
  4486  	}
  4487  
  4488  	if _, err := proxy.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  4489  		t.Fatal(err.Error())
  4490  	}
  4491  }
  4492  
  4493  func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) {
  4494  	const mdkey = "somedata"
  4495  
  4496  	// doFDC performs a FullDuplexCall with client and returns the error from the
  4497  	// first stream.Recv call, or nil if that error is io.EOF.  Calls t.Fatal if
  4498  	// the stream cannot be established.
  4499  	doFDC := func(ctx context.Context, client testgrpc.TestServiceClient) error {
  4500  		stream, err := client.FullDuplexCall(ctx)
  4501  		if err != nil {
  4502  			t.Fatalf("Unwanted error: %v", err)
  4503  		}
  4504  		if _, err := stream.Recv(); err != io.EOF {
  4505  			return err
  4506  		}
  4507  		return nil
  4508  	}
  4509  
  4510  	// endpoint ensures mdkey is NOT in metadata and returns an error if it is.
  4511  	endpoint := &stubserver.StubServer{
  4512  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  4513  			ctx := stream.Context()
  4514  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil {
  4515  				return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
  4516  			}
  4517  			return nil
  4518  		},
  4519  	}
  4520  	if err := endpoint.Start(nil); err != nil {
  4521  		t.Fatalf("Error starting endpoint server: %v", err)
  4522  	}
  4523  	defer endpoint.Stop()
  4524  
  4525  	// proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint
  4526  	// without explicitly copying the metadata.
  4527  	proxy := &stubserver.StubServer{
  4528  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  4529  			ctx := stream.Context()
  4530  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil {
  4531  				return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
  4532  			}
  4533  			return doFDC(ctx, endpoint.Client)
  4534  		},
  4535  	}
  4536  	if err := proxy.Start(nil); err != nil {
  4537  		t.Fatalf("Error starting proxy server: %v", err)
  4538  	}
  4539  	defer proxy.Stop()
  4540  
  4541  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4542  	defer cancel()
  4543  	md := metadata.Pairs(mdkey, "val")
  4544  	ctx = metadata.NewOutgoingContext(ctx, md)
  4545  
  4546  	// Sanity check that endpoint properly errors when it sees mdkey in ctx.
  4547  	err := doFDC(ctx, endpoint.Client)
  4548  	if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal {
  4549  		t.Fatalf("stream.Recv() = _, %v; want _, <status with Code()=Internal>", err)
  4550  	}
  4551  
  4552  	if err := doFDC(ctx, proxy.Client); err != nil {
  4553  		t.Fatalf("doFDC(_, proxy.Client) = %v; want nil", err)
  4554  	}
  4555  }
  4556  
  4557  func (s) TestStatsTagsAndTrace(t *testing.T) {
  4558  	// Data added to context by client (typically in a stats handler).
  4559  	tags := []byte{1, 5, 2, 4, 3}
  4560  	trace := []byte{5, 2, 1, 3, 4}
  4561  
  4562  	// endpoint ensures Tags() and Trace() in context match those that were added
  4563  	// by the client and returns an error if not.
  4564  	endpoint := &stubserver.StubServer{
  4565  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  4566  			md, _ := metadata.FromIncomingContext(ctx)
  4567  			if tg := stats.Tags(ctx); !reflect.DeepEqual(tg, tags) {
  4568  				return nil, status.Errorf(codes.Internal, "stats.Tags(%v)=%v; want %v", ctx, tg, tags)
  4569  			}
  4570  			if !reflect.DeepEqual(md["grpc-tags-bin"], []string{string(tags)}) {
  4571  				return nil, status.Errorf(codes.Internal, "md['grpc-tags-bin']=%v; want %v", md["grpc-tags-bin"], tags)
  4572  			}
  4573  			if tr := stats.Trace(ctx); !reflect.DeepEqual(tr, trace) {
  4574  				return nil, status.Errorf(codes.Internal, "stats.Trace(%v)=%v; want %v", ctx, tr, trace)
  4575  			}
  4576  			if !reflect.DeepEqual(md["grpc-trace-bin"], []string{string(trace)}) {
  4577  				return nil, status.Errorf(codes.Internal, "md['grpc-trace-bin']=%v; want %v", md["grpc-trace-bin"], trace)
  4578  			}
  4579  			return &testpb.Empty{}, nil
  4580  		},
  4581  	}
  4582  	if err := endpoint.Start(nil); err != nil {
  4583  		t.Fatalf("Error starting endpoint server: %v", err)
  4584  	}
  4585  	defer endpoint.Stop()
  4586  
  4587  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4588  	defer cancel()
  4589  
  4590  	testCases := []struct {
  4591  		ctx  context.Context
  4592  		want codes.Code
  4593  	}{
  4594  		{ctx: ctx, want: codes.Internal},
  4595  		{ctx: stats.SetTags(ctx, tags), want: codes.Internal},
  4596  		{ctx: stats.SetTrace(ctx, trace), want: codes.Internal},
  4597  		{ctx: stats.SetTags(stats.SetTrace(ctx, tags), tags), want: codes.Internal},
  4598  		{ctx: stats.SetTags(stats.SetTrace(ctx, trace), tags), want: codes.OK},
  4599  	}
  4600  
  4601  	for _, tc := range testCases {
  4602  		_, err := endpoint.Client.EmptyCall(tc.ctx, &testpb.Empty{})
  4603  		if tc.want == codes.OK && err != nil {
  4604  			t.Fatalf("endpoint.Client.EmptyCall(%v, _) = _, %v; want _, nil", tc.ctx, err)
  4605  		}
  4606  		if s, ok := status.FromError(err); !ok || s.Code() != tc.want {
  4607  			t.Fatalf("endpoint.Client.EmptyCall(%v, _) = _, %v; want _, <status with Code()=%v>", tc.ctx, err, tc.want)
  4608  		}
  4609  	}
  4610  }
  4611  
  4612  func (s) TestTapTimeout(t *testing.T) {
  4613  	sopts := []grpc.ServerOption{
  4614  		grpc.InTapHandle(func(ctx context.Context, _ *tap.Info) (context.Context, error) {
  4615  			c, cancel := context.WithCancel(ctx)
  4616  			// Call cancel instead of setting a deadline so we can detect which error
  4617  			// occurred -- this cancellation (desired) or the client's deadline
  4618  			// expired (indicating this cancellation did not affect the RPC).
  4619  			time.AfterFunc(10*time.Millisecond, cancel)
  4620  			return c, nil
  4621  		}),
  4622  	}
  4623  
  4624  	ss := &stubserver.StubServer{
  4625  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  4626  			<-ctx.Done()
  4627  			return nil, status.Errorf(codes.Canceled, ctx.Err().Error())
  4628  		},
  4629  	}
  4630  	if err := ss.Start(sopts); err != nil {
  4631  		t.Fatalf("Error starting endpoint server: %v", err)
  4632  	}
  4633  	defer ss.Stop()
  4634  
  4635  	// This was known to be flaky; test several times.
  4636  	for i := 0; i < 10; i++ {
  4637  		// Set our own deadline in case the server hangs.
  4638  		ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4639  		res, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
  4640  		cancel()
  4641  		if s, ok := status.FromError(err); !ok || s.Code() != codes.Canceled {
  4642  			t.Fatalf("ss.Client.EmptyCall(ctx, _) = %v, %v; want nil, <status with Code()=Canceled>", res, err)
  4643  		}
  4644  	}
  4645  
  4646  }
  4647  
  4648  func (s) TestClientWriteFailsAfterServerClosesStream(t *testing.T) {
  4649  	ss := &stubserver.StubServer{
  4650  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  4651  			return status.Errorf(codes.Internal, "")
  4652  		},
  4653  	}
  4654  	sopts := []grpc.ServerOption{}
  4655  	if err := ss.Start(sopts); err != nil {
  4656  		t.Fatalf("Error starting endpoint server: %v", err)
  4657  	}
  4658  	defer ss.Stop()
  4659  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4660  	defer cancel()
  4661  	stream, err := ss.Client.FullDuplexCall(ctx)
  4662  	if err != nil {
  4663  		t.Fatalf("Error while creating stream: %v", err)
  4664  	}
  4665  	for {
  4666  		if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err == nil {
  4667  			time.Sleep(5 * time.Millisecond)
  4668  		} else if err == io.EOF {
  4669  			break // Success.
  4670  		} else {
  4671  			t.Fatalf("stream.Send(_) = %v, want io.EOF", err)
  4672  		}
  4673  	}
  4674  }
  4675  
  4676  type windowSizeConfig struct {
  4677  	serverStream int32
  4678  	serverConn   int32
  4679  	clientStream int32
  4680  	clientConn   int32
  4681  }
  4682  
  4683  func max(a, b int32) int32 {
  4684  	if a > b {
  4685  		return a
  4686  	}
  4687  	return b
  4688  }
  4689  
  4690  func (s) TestConfigurableWindowSizeWithLargeWindow(t *testing.T) {
  4691  	wc := windowSizeConfig{
  4692  		serverStream: 8 * 1024 * 1024,
  4693  		serverConn:   12 * 1024 * 1024,
  4694  		clientStream: 6 * 1024 * 1024,
  4695  		clientConn:   8 * 1024 * 1024,
  4696  	}
  4697  	for _, e := range listTestEnv() {
  4698  		testConfigurableWindowSize(t, e, wc)
  4699  	}
  4700  }
  4701  
  4702  func (s) TestConfigurableWindowSizeWithSmallWindow(t *testing.T) {
  4703  	wc := windowSizeConfig{
  4704  		serverStream: 1,
  4705  		serverConn:   1,
  4706  		clientStream: 1,
  4707  		clientConn:   1,
  4708  	}
  4709  	for _, e := range listTestEnv() {
  4710  		testConfigurableWindowSize(t, e, wc)
  4711  	}
  4712  }
  4713  
  4714  func testConfigurableWindowSize(t *testing.T, e env, wc windowSizeConfig) {
  4715  	te := newTest(t, e)
  4716  	te.serverInitialWindowSize = wc.serverStream
  4717  	te.serverInitialConnWindowSize = wc.serverConn
  4718  	te.clientInitialWindowSize = wc.clientStream
  4719  	te.clientInitialConnWindowSize = wc.clientConn
  4720  
  4721  	te.startServer(&testServer{security: e.security})
  4722  	defer te.tearDown()
  4723  
  4724  	cc := te.clientConn()
  4725  	tc := testgrpc.NewTestServiceClient(cc)
  4726  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4727  	defer cancel()
  4728  	stream, err := tc.FullDuplexCall(ctx)
  4729  	if err != nil {
  4730  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4731  	}
  4732  	numOfIter := 11
  4733  	// Set message size to exhaust largest of window sizes.
  4734  	messageSize := max(max(wc.serverStream, wc.serverConn), max(wc.clientStream, wc.clientConn)) / int32(numOfIter-1)
  4735  	messageSize = max(messageSize, 64*1024)
  4736  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, messageSize)
  4737  	if err != nil {
  4738  		t.Fatal(err)
  4739  	}
  4740  	respParams := []*testpb.ResponseParameters{
  4741  		{
  4742  			Size: messageSize,
  4743  		},
  4744  	}
  4745  	req := &testpb.StreamingOutputCallRequest{
  4746  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4747  		ResponseParameters: respParams,
  4748  		Payload:            payload,
  4749  	}
  4750  	for i := 0; i < numOfIter; i++ {
  4751  		if err := stream.Send(req); err != nil {
  4752  			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  4753  		}
  4754  		if _, err := stream.Recv(); err != nil {
  4755  			t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
  4756  		}
  4757  	}
  4758  	if err := stream.CloseSend(); err != nil {
  4759  		t.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
  4760  	}
  4761  }
  4762  
  4763  func (s) TestWaitForReadyConnection(t *testing.T) {
  4764  	for _, e := range listTestEnv() {
  4765  		testWaitForReadyConnection(t, e)
  4766  	}
  4767  
  4768  }
  4769  
  4770  func testWaitForReadyConnection(t *testing.T, e env) {
  4771  	te := newTest(t, e)
  4772  	te.userAgent = testAppUA
  4773  	te.startServer(&testServer{security: e.security})
  4774  	defer te.tearDown()
  4775  
  4776  	cc := te.clientConn() // Non-blocking dial.
  4777  	tc := testgrpc.NewTestServiceClient(cc)
  4778  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4779  	defer cancel()
  4780  	testutils.AwaitState(ctx, t, cc, connectivity.Ready)
  4781  	// Make a fail-fast RPC.
  4782  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  4783  		t.Fatalf("TestService/EmptyCall(_,_) = _, %v, want _, nil", err)
  4784  	}
  4785  }
  4786  
  4787  func (s) TestSvrWriteStatusEarlyWrite(t *testing.T) {
  4788  	for _, e := range listTestEnv() {
  4789  		testSvrWriteStatusEarlyWrite(t, e)
  4790  	}
  4791  }
  4792  
  4793  func testSvrWriteStatusEarlyWrite(t *testing.T, e env) {
  4794  	te := newTest(t, e)
  4795  	const smallSize = 1024
  4796  	const largeSize = 2048
  4797  	const extraLargeSize = 4096
  4798  	te.maxServerReceiveMsgSize = newInt(largeSize)
  4799  	te.maxServerSendMsgSize = newInt(largeSize)
  4800  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  4801  	if err != nil {
  4802  		t.Fatal(err)
  4803  	}
  4804  	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
  4805  	if err != nil {
  4806  		t.Fatal(err)
  4807  	}
  4808  	te.startServer(&testServer{security: e.security})
  4809  	defer te.tearDown()
  4810  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  4811  	respParam := []*testpb.ResponseParameters{
  4812  		{
  4813  			Size: int32(smallSize),
  4814  		},
  4815  	}
  4816  	sreq := &testpb.StreamingOutputCallRequest{
  4817  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4818  		ResponseParameters: respParam,
  4819  		Payload:            extraLargePayload,
  4820  	}
  4821  	// Test recv case: server receives a message larger than maxServerReceiveMsgSize.
  4822  	stream, err := tc.FullDuplexCall(te.ctx)
  4823  	if err != nil {
  4824  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4825  	}
  4826  	if err = stream.Send(sreq); err != nil {
  4827  		t.Fatalf("%v.Send() = _, %v, want <nil>", stream, err)
  4828  	}
  4829  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  4830  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  4831  	}
  4832  	// Test send case: server sends a message larger than maxServerSendMsgSize.
  4833  	sreq.Payload = smallPayload
  4834  	respParam[0].Size = int32(extraLargeSize)
  4835  
  4836  	stream, err = tc.FullDuplexCall(te.ctx)
  4837  	if err != nil {
  4838  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4839  	}
  4840  	if err = stream.Send(sreq); err != nil {
  4841  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  4842  	}
  4843  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  4844  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  4845  	}
  4846  }
  4847  
  4848  // TestMalformedStreamMethod starts a test server and sends an RPC with a
  4849  // malformed method name. The server should respond with an UNIMPLEMENTED status
  4850  // code in this case.
  4851  func (s) TestMalformedStreamMethod(t *testing.T) {
  4852  	const testMethod = "a-method-name-without-any-slashes"
  4853  	te := newTest(t, tcpClearRREnv)
  4854  	te.startServer(nil)
  4855  	defer te.tearDown()
  4856  
  4857  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4858  	defer cancel()
  4859  	err := te.clientConn().Invoke(ctx, testMethod, nil, nil)
  4860  	if gotCode := status.Code(err); gotCode != codes.Unimplemented {
  4861  		t.Fatalf("Invoke with method %q, got code %s, want %s", testMethod, gotCode, codes.Unimplemented)
  4862  	}
  4863  }
  4864  
  4865  func (s) TestMethodFromServerStream(t *testing.T) {
  4866  	const testMethod = "/package.service/method"
  4867  	e := tcpClearRREnv
  4868  	te := newTest(t, e)
  4869  	var method string
  4870  	var ok bool
  4871  	te.unknownHandler = func(srv any, stream grpc.ServerStream) error {
  4872  		method, ok = grpc.MethodFromServerStream(stream)
  4873  		return nil
  4874  	}
  4875  
  4876  	te.startServer(nil)
  4877  	defer te.tearDown()
  4878  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4879  	defer cancel()
  4880  	_ = te.clientConn().Invoke(ctx, testMethod, nil, nil)
  4881  	if !ok || method != testMethod {
  4882  		t.Fatalf("Invoke with method %q, got %q, %v, want %q, true", testMethod, method, ok, testMethod)
  4883  	}
  4884  }
  4885  
  4886  func (s) TestInterceptorCanAccessCallOptions(t *testing.T) {
  4887  	e := tcpClearRREnv
  4888  	te := newTest(t, e)
  4889  	te.startServer(&testServer{security: e.security})
  4890  	defer te.tearDown()
  4891  
  4892  	type observedOptions struct {
  4893  		headers     []*metadata.MD
  4894  		trailers    []*metadata.MD
  4895  		peer        []*peer.Peer
  4896  		creds       []credentials.PerRPCCredentials
  4897  		failFast    []bool
  4898  		maxRecvSize []int
  4899  		maxSendSize []int
  4900  		compressor  []string
  4901  		subtype     []string
  4902  	}
  4903  	var observedOpts observedOptions
  4904  	populateOpts := func(opts []grpc.CallOption) {
  4905  		for _, o := range opts {
  4906  			switch o := o.(type) {
  4907  			case grpc.HeaderCallOption:
  4908  				observedOpts.headers = append(observedOpts.headers, o.HeaderAddr)
  4909  			case grpc.TrailerCallOption:
  4910  				observedOpts.trailers = append(observedOpts.trailers, o.TrailerAddr)
  4911  			case grpc.PeerCallOption:
  4912  				observedOpts.peer = append(observedOpts.peer, o.PeerAddr)
  4913  			case grpc.PerRPCCredsCallOption:
  4914  				observedOpts.creds = append(observedOpts.creds, o.Creds)
  4915  			case grpc.FailFastCallOption:
  4916  				observedOpts.failFast = append(observedOpts.failFast, o.FailFast)
  4917  			case grpc.MaxRecvMsgSizeCallOption:
  4918  				observedOpts.maxRecvSize = append(observedOpts.maxRecvSize, o.MaxRecvMsgSize)
  4919  			case grpc.MaxSendMsgSizeCallOption:
  4920  				observedOpts.maxSendSize = append(observedOpts.maxSendSize, o.MaxSendMsgSize)
  4921  			case grpc.CompressorCallOption:
  4922  				observedOpts.compressor = append(observedOpts.compressor, o.CompressorType)
  4923  			case grpc.ContentSubtypeCallOption:
  4924  				observedOpts.subtype = append(observedOpts.subtype, o.ContentSubtype)
  4925  			}
  4926  		}
  4927  	}
  4928  
  4929  	te.unaryClientInt = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
  4930  		populateOpts(opts)
  4931  		return nil
  4932  	}
  4933  	te.streamClientInt = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
  4934  		populateOpts(opts)
  4935  		return nil, nil
  4936  	}
  4937  
  4938  	defaults := []grpc.CallOption{
  4939  		grpc.WaitForReady(true),
  4940  		grpc.MaxCallRecvMsgSize(1010),
  4941  	}
  4942  	tc := testgrpc.NewTestServiceClient(te.clientConn(grpc.WithDefaultCallOptions(defaults...)))
  4943  
  4944  	var headers metadata.MD
  4945  	var trailers metadata.MD
  4946  	var pr peer.Peer
  4947  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4948  	defer cancel()
  4949  	tc.UnaryCall(ctx, &testpb.SimpleRequest{},
  4950  		grpc.MaxCallRecvMsgSize(100),
  4951  		grpc.MaxCallSendMsgSize(200),
  4952  		grpc.PerRPCCredentials(testPerRPCCredentials{}),
  4953  		grpc.Header(&headers),
  4954  		grpc.Trailer(&trailers),
  4955  		grpc.Peer(&pr))
  4956  	expected := observedOptions{
  4957  		failFast:    []bool{false},
  4958  		maxRecvSize: []int{1010, 100},
  4959  		maxSendSize: []int{200},
  4960  		creds:       []credentials.PerRPCCredentials{testPerRPCCredentials{}},
  4961  		headers:     []*metadata.MD{&headers},
  4962  		trailers:    []*metadata.MD{&trailers},
  4963  		peer:        []*peer.Peer{&pr},
  4964  	}
  4965  
  4966  	if !reflect.DeepEqual(expected, observedOpts) {
  4967  		t.Errorf("unary call did not observe expected options: expected %#v, got %#v", expected, observedOpts)
  4968  	}
  4969  
  4970  	observedOpts = observedOptions{} // reset
  4971  
  4972  	tc.StreamingInputCall(ctx,
  4973  		grpc.WaitForReady(false),
  4974  		grpc.MaxCallSendMsgSize(2020),
  4975  		grpc.UseCompressor("comp-type"),
  4976  		grpc.CallContentSubtype("json"))
  4977  	expected = observedOptions{
  4978  		failFast:    []bool{false, true},
  4979  		maxRecvSize: []int{1010},
  4980  		maxSendSize: []int{2020},
  4981  		compressor:  []string{"comp-type"},
  4982  		subtype:     []string{"json"},
  4983  	}
  4984  
  4985  	if !reflect.DeepEqual(expected, observedOpts) {
  4986  		t.Errorf("streaming call did not observe expected options: expected %#v, got %#v", expected, observedOpts)
  4987  	}
  4988  }
  4989  
  4990  func (s) TestServeExitsWhenListenerClosed(t *testing.T) {
  4991  	ss := &stubserver.StubServer{
  4992  		EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) {
  4993  			return &testpb.Empty{}, nil
  4994  		},
  4995  	}
  4996  
  4997  	s := grpc.NewServer()
  4998  	defer s.Stop()
  4999  	testgrpc.RegisterTestServiceServer(s, ss)
  5000  
  5001  	lis, err := net.Listen("tcp", "localhost:0")
  5002  	if err != nil {
  5003  		t.Fatalf("Failed to create listener: %v", err)
  5004  	}
  5005  
  5006  	done := make(chan struct{})
  5007  	go func() {
  5008  		s.Serve(lis)
  5009  		close(done)
  5010  	}()
  5011  
  5012  	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
  5013  	if err != nil {
  5014  		t.Fatalf("Failed to dial server: %v", err)
  5015  	}
  5016  	defer cc.Close()
  5017  	c := testgrpc.NewTestServiceClient(cc)
  5018  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5019  	defer cancel()
  5020  	if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  5021  		t.Fatalf("Failed to send test RPC to server: %v", err)
  5022  	}
  5023  
  5024  	if err := lis.Close(); err != nil {
  5025  		t.Fatalf("Failed to close listener: %v", err)
  5026  	}
  5027  	const timeout = 5 * time.Second
  5028  	timer := time.NewTimer(timeout)
  5029  	select {
  5030  	case <-done:
  5031  		return
  5032  	case <-timer.C:
  5033  		t.Fatalf("Serve did not return after %v", timeout)
  5034  	}
  5035  }
  5036  
  5037  // Service handler returns status with invalid utf8 message.
  5038  func (s) TestStatusInvalidUTF8Message(t *testing.T) {
  5039  	var (
  5040  		origMsg = string([]byte{0xff, 0xfe, 0xfd})
  5041  		wantMsg = "���"
  5042  	)
  5043  
  5044  	ss := &stubserver.StubServer{
  5045  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5046  			return nil, status.Errorf(codes.Internal, origMsg)
  5047  		},
  5048  	}
  5049  	if err := ss.Start(nil); err != nil {
  5050  		t.Fatalf("Error starting endpoint server: %v", err)
  5051  	}
  5052  	defer ss.Stop()
  5053  
  5054  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5055  	defer cancel()
  5056  
  5057  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Message() != wantMsg {
  5058  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, status.Convert(err).Message(), wantMsg)
  5059  	}
  5060  }
  5061  
  5062  // Service handler returns status with details and invalid utf8 message. Proto
  5063  // will fail to marshal the status because of the invalid utf8 message. Details
  5064  // will be dropped when sending.
  5065  func (s) TestStatusInvalidUTF8Details(t *testing.T) {
  5066  	grpctest.TLogger.ExpectError("Failed to marshal rpc status")
  5067  
  5068  	var (
  5069  		origMsg = string([]byte{0xff, 0xfe, 0xfd})
  5070  		wantMsg = "���"
  5071  	)
  5072  
  5073  	ss := &stubserver.StubServer{
  5074  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5075  			st := status.New(codes.Internal, origMsg)
  5076  			st, err := st.WithDetails(&testpb.Empty{})
  5077  			if err != nil {
  5078  				return nil, err
  5079  			}
  5080  			return nil, st.Err()
  5081  		},
  5082  	}
  5083  	if err := ss.Start(nil); err != nil {
  5084  		t.Fatalf("Error starting endpoint server: %v", err)
  5085  	}
  5086  	defer ss.Stop()
  5087  
  5088  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5089  	defer cancel()
  5090  
  5091  	_, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
  5092  	st := status.Convert(err)
  5093  	if st.Message() != wantMsg {
  5094  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, st.Message(), wantMsg)
  5095  	}
  5096  	if len(st.Details()) != 0 {
  5097  		// Details should be dropped on the server side.
  5098  		t.Fatalf("RPC status contain details: %v, want no details", st.Details())
  5099  	}
  5100  }
  5101  
  5102  func (s) TestRPCTimeout(t *testing.T) {
  5103  	for _, e := range listTestEnv() {
  5104  		testRPCTimeout(t, e)
  5105  	}
  5106  }
  5107  
  5108  func testRPCTimeout(t *testing.T, e env) {
  5109  	te := newTest(t, e)
  5110  	te.startServer(&testServer{security: e.security, unaryCallSleepTime: 500 * time.Millisecond})
  5111  	defer te.tearDown()
  5112  
  5113  	cc := te.clientConn()
  5114  	tc := testgrpc.NewTestServiceClient(cc)
  5115  
  5116  	const argSize = 2718
  5117  	const respSize = 314
  5118  
  5119  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  5120  	if err != nil {
  5121  		t.Fatal(err)
  5122  	}
  5123  
  5124  	req := &testpb.SimpleRequest{
  5125  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  5126  		ResponseSize: respSize,
  5127  		Payload:      payload,
  5128  	}
  5129  	for i := -1; i <= 10; i++ {
  5130  		ctx, cancel := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond)
  5131  		if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.DeadlineExceeded {
  5132  			t.Fatalf("TestService/UnaryCallv(_, _) = _, %v; want <nil>, error code: %s", err, codes.DeadlineExceeded)
  5133  		}
  5134  		cancel()
  5135  	}
  5136  }
  5137  
  5138  func (s) TestDisabledIOBuffers(t *testing.T) {
  5139  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(60000))
  5140  	if err != nil {
  5141  		t.Fatalf("Failed to create payload: %v", err)
  5142  	}
  5143  	req := &testpb.StreamingOutputCallRequest{
  5144  		Payload: payload,
  5145  	}
  5146  	resp := &testpb.StreamingOutputCallResponse{
  5147  		Payload: payload,
  5148  	}
  5149  
  5150  	ss := &stubserver.StubServer{
  5151  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  5152  			for {
  5153  				in, err := stream.Recv()
  5154  				if err == io.EOF {
  5155  					return nil
  5156  				}
  5157  				if err != nil {
  5158  					t.Errorf("stream.Recv() = _, %v, want _, <nil>", err)
  5159  					return err
  5160  				}
  5161  				if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
  5162  					t.Errorf("Received message(len: %v) on server not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
  5163  					return err
  5164  				}
  5165  				if err := stream.Send(resp); err != nil {
  5166  					t.Errorf("stream.Send(_)= %v, want <nil>", err)
  5167  					return err
  5168  				}
  5169  
  5170  			}
  5171  		},
  5172  	}
  5173  
  5174  	s := grpc.NewServer(grpc.WriteBufferSize(0), grpc.ReadBufferSize(0))
  5175  	testgrpc.RegisterTestServiceServer(s, ss)
  5176  
  5177  	lis, err := net.Listen("tcp", "localhost:0")
  5178  	if err != nil {
  5179  		t.Fatalf("Failed to create listener: %v", err)
  5180  	}
  5181  
  5182  	go func() {
  5183  		s.Serve(lis)
  5184  	}()
  5185  	defer s.Stop()
  5186  	dctx, dcancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5187  	defer dcancel()
  5188  	cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0))
  5189  	if err != nil {
  5190  		t.Fatalf("Failed to dial server")
  5191  	}
  5192  	defer cc.Close()
  5193  	c := testgrpc.NewTestServiceClient(cc)
  5194  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5195  	defer cancel()
  5196  	stream, err := c.FullDuplexCall(ctx, grpc.WaitForReady(true))
  5197  	if err != nil {
  5198  		t.Fatalf("Failed to send test RPC to server")
  5199  	}
  5200  	for i := 0; i < 10; i++ {
  5201  		if err := stream.Send(req); err != nil {
  5202  			t.Fatalf("stream.Send(_) = %v, want <nil>", err)
  5203  		}
  5204  		in, err := stream.Recv()
  5205  		if err != nil {
  5206  			t.Fatalf("stream.Recv() = _, %v, want _, <nil>", err)
  5207  		}
  5208  		if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
  5209  			t.Fatalf("Received message(len: %v) on client not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
  5210  		}
  5211  	}
  5212  	stream.CloseSend()
  5213  	if _, err := stream.Recv(); err != io.EOF {
  5214  		t.Fatalf("stream.Recv() = _, %v, want _, io.EOF", err)
  5215  	}
  5216  }
  5217  
  5218  func (s) TestServerMaxHeaderListSizeClientUserViolation(t *testing.T) {
  5219  	for _, e := range listTestEnv() {
  5220  		if e.httpHandler {
  5221  			continue
  5222  		}
  5223  		testServerMaxHeaderListSizeClientUserViolation(t, e)
  5224  	}
  5225  }
  5226  
  5227  func testServerMaxHeaderListSizeClientUserViolation(t *testing.T, e env) {
  5228  	te := newTest(t, e)
  5229  	te.maxServerHeaderListSize = new(uint32)
  5230  	*te.maxServerHeaderListSize = 216
  5231  	te.startServer(&testServer{security: e.security})
  5232  	defer te.tearDown()
  5233  
  5234  	cc := te.clientConn()
  5235  	tc := testgrpc.NewTestServiceClient(cc)
  5236  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5237  	defer cancel()
  5238  	metadata.AppendToOutgoingContext(ctx, "oversize", string(make([]byte, 216)))
  5239  	var err error
  5240  	if err = verifyResultWithDelay(func() (bool, error) {
  5241  		if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
  5242  			return true, nil
  5243  		}
  5244  		return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
  5245  	}); err != nil {
  5246  		t.Fatal(err)
  5247  	}
  5248  }
  5249  
  5250  func (s) TestClientMaxHeaderListSizeServerUserViolation(t *testing.T) {
  5251  	for _, e := range listTestEnv() {
  5252  		if e.httpHandler {
  5253  			continue
  5254  		}
  5255  		testClientMaxHeaderListSizeServerUserViolation(t, e)
  5256  	}
  5257  }
  5258  
  5259  func testClientMaxHeaderListSizeServerUserViolation(t *testing.T, e env) {
  5260  	te := newTest(t, e)
  5261  	te.maxClientHeaderListSize = new(uint32)
  5262  	*te.maxClientHeaderListSize = 1 // any header server sends will violate
  5263  	te.startServer(&testServer{security: e.security})
  5264  	defer te.tearDown()
  5265  
  5266  	cc := te.clientConn()
  5267  	tc := testgrpc.NewTestServiceClient(cc)
  5268  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5269  	defer cancel()
  5270  	var err error
  5271  	if err = verifyResultWithDelay(func() (bool, error) {
  5272  		if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
  5273  			return true, nil
  5274  		}
  5275  		return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
  5276  	}); err != nil {
  5277  		t.Fatal(err)
  5278  	}
  5279  }
  5280  
  5281  func (s) TestServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T) {
  5282  	for _, e := range listTestEnv() {
  5283  		if e.httpHandler || e.security == "tls" {
  5284  			continue
  5285  		}
  5286  		testServerMaxHeaderListSizeClientIntentionalViolation(t, e)
  5287  	}
  5288  }
  5289  
  5290  func testServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T, e env) {
  5291  	te := newTest(t, e)
  5292  	te.maxServerHeaderListSize = new(uint32)
  5293  	*te.maxServerHeaderListSize = 512
  5294  	te.startServer(&testServer{security: e.security})
  5295  	defer te.tearDown()
  5296  
  5297  	cc, dw := te.clientConnWithConnControl()
  5298  	tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)}
  5299  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5300  	defer cancel()
  5301  	stream, err := tc.FullDuplexCall(ctx)
  5302  	if err != nil {
  5303  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
  5304  	}
  5305  	rcw := dw.getRawConnWrapper()
  5306  	val := make([]string, 512)
  5307  	for i := range val {
  5308  		val[i] = "a"
  5309  	}
  5310  	// allow for client to send the initial header
  5311  	time.Sleep(100 * time.Millisecond)
  5312  	rcw.writeHeaders(http2.HeadersFrameParam{
  5313  		StreamID:      tc.getCurrentStreamID(),
  5314  		BlockFragment: rcw.encodeHeader("oversize", strings.Join(val, "")),
  5315  		EndStream:     false,
  5316  		EndHeaders:    true,
  5317  	})
  5318  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
  5319  		t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
  5320  	}
  5321  }
  5322  
  5323  func (s) TestClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T) {
  5324  	for _, e := range listTestEnv() {
  5325  		if e.httpHandler || e.security == "tls" {
  5326  			continue
  5327  		}
  5328  		testClientMaxHeaderListSizeServerIntentionalViolation(t, e)
  5329  	}
  5330  }
  5331  
  5332  func testClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T, e env) {
  5333  	te := newTest(t, e)
  5334  	te.maxClientHeaderListSize = new(uint32)
  5335  	*te.maxClientHeaderListSize = 200
  5336  	lw := te.startServerWithConnControl(&testServer{security: e.security, setHeaderOnly: true})
  5337  	defer te.tearDown()
  5338  	cc, _ := te.clientConnWithConnControl()
  5339  	tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)}
  5340  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5341  	defer cancel()
  5342  	stream, err := tc.FullDuplexCall(ctx)
  5343  	if err != nil {
  5344  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
  5345  	}
  5346  	var i int
  5347  	var rcw *rawConnWrapper
  5348  	for i = 0; i < 100; i++ {
  5349  		rcw = lw.getLastConn()
  5350  		if rcw != nil {
  5351  			break
  5352  		}
  5353  		time.Sleep(10 * time.Millisecond)
  5354  		continue
  5355  	}
  5356  	if i == 100 {
  5357  		t.Fatalf("failed to create server transport after 1s")
  5358  	}
  5359  
  5360  	val := make([]string, 200)
  5361  	for i := range val {
  5362  		val[i] = "a"
  5363  	}
  5364  	// allow for client to send the initial header.
  5365  	time.Sleep(100 * time.Millisecond)
  5366  	rcw.writeHeaders(http2.HeadersFrameParam{
  5367  		StreamID:      tc.getCurrentStreamID(),
  5368  		BlockFragment: rcw.encodeRawHeader("oversize", strings.Join(val, "")),
  5369  		EndStream:     false,
  5370  		EndHeaders:    true,
  5371  	})
  5372  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
  5373  		t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
  5374  	}
  5375  }
  5376  
  5377  func (s) TestNetPipeConn(t *testing.T) {
  5378  	// This test will block indefinitely if grpc writes both client and server
  5379  	// prefaces without either reading from the Conn.
  5380  	pl := testutils.NewPipeListener()
  5381  	s := grpc.NewServer()
  5382  	defer s.Stop()
  5383  	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  5384  		return &testpb.SimpleResponse{}, nil
  5385  	}}
  5386  	testgrpc.RegisterTestServiceServer(s, ts)
  5387  	go s.Serve(pl)
  5388  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5389  	defer cancel()
  5390  	cc, err := grpc.DialContext(ctx, "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDialer(pl.Dialer()))
  5391  	if err != nil {
  5392  		t.Fatalf("Error creating client: %v", err)
  5393  	}
  5394  	defer cc.Close()
  5395  	client := testgrpc.NewTestServiceClient(cc)
  5396  	if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  5397  		t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
  5398  	}
  5399  }
  5400  
  5401  func (s) TestLargeTimeout(t *testing.T) {
  5402  	for _, e := range listTestEnv() {
  5403  		testLargeTimeout(t, e)
  5404  	}
  5405  }
  5406  
  5407  func testLargeTimeout(t *testing.T, e env) {
  5408  	te := newTest(t, e)
  5409  	te.declareLogNoise("Server.processUnaryRPC failed to write status")
  5410  
  5411  	ts := &funcServer{}
  5412  	te.startServer(ts)
  5413  	defer te.tearDown()
  5414  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  5415  
  5416  	timeouts := []time.Duration{
  5417  		time.Duration(math.MaxInt64), // will be (correctly) converted to
  5418  		// 2562048 hours, which overflows upon converting back to an int64
  5419  		2562047 * time.Hour, // the largest timeout that does not overflow
  5420  	}
  5421  
  5422  	for i, maxTimeout := range timeouts {
  5423  		ts.unaryCall = func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  5424  			deadline, ok := ctx.Deadline()
  5425  			timeout := time.Until(deadline)
  5426  			minTimeout := maxTimeout - 5*time.Second
  5427  			if !ok || timeout < minTimeout || timeout > maxTimeout {
  5428  				t.Errorf("ctx.Deadline() = (now+%v), %v; want [%v, %v], true", timeout, ok, minTimeout, maxTimeout)
  5429  				return nil, status.Error(codes.OutOfRange, "deadline error")
  5430  			}
  5431  			return &testpb.SimpleResponse{}, nil
  5432  		}
  5433  
  5434  		ctx, cancel := context.WithTimeout(context.Background(), maxTimeout)
  5435  		defer cancel()
  5436  
  5437  		if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  5438  			t.Errorf("case %v: UnaryCall(_) = _, %v; want _, nil", i, err)
  5439  		}
  5440  	}
  5441  }
  5442  
  5443  func listenWithNotifyingListener(network, address string, event *grpcsync.Event) (net.Listener, error) {
  5444  	lis, err := net.Listen(network, address)
  5445  	if err != nil {
  5446  		return nil, err
  5447  	}
  5448  	return notifyingListener{connEstablished: event, Listener: lis}, nil
  5449  }
  5450  
  5451  type notifyingListener struct {
  5452  	connEstablished *grpcsync.Event
  5453  	net.Listener
  5454  }
  5455  
  5456  func (lis notifyingListener) Accept() (net.Conn, error) {
  5457  	defer lis.connEstablished.Fire()
  5458  	return lis.Listener.Accept()
  5459  }
  5460  
  5461  func (s) TestRPCWaitsForResolver(t *testing.T) {
  5462  	te := testServiceConfigSetup(t, tcpClearRREnv)
  5463  	te.startServer(&testServer{security: tcpClearRREnv.security})
  5464  	defer te.tearDown()
  5465  	r := manual.NewBuilderWithScheme("whatever")
  5466  
  5467  	te.resolverScheme = r.Scheme()
  5468  	cc := te.clientConn(grpc.WithResolvers(r))
  5469  	tc := testgrpc.NewTestServiceClient(cc)
  5470  
  5471  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  5472  	defer cancel()
  5473  	// With no resolved addresses yet, this will timeout.
  5474  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  5475  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  5476  	}
  5477  
  5478  	ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
  5479  	defer cancel()
  5480  	go func() {
  5481  		time.Sleep(time.Second)
  5482  		r.UpdateState(resolver.State{
  5483  			Addresses: []resolver.Address{{Addr: te.srvAddr}},
  5484  			ServiceConfig: parseServiceConfig(t, r, `{
  5485  		    "methodConfig": [
  5486  		        {
  5487  		            "name": [
  5488  		                {
  5489  		                    "service": "grpc.testing.TestService",
  5490  		                    "method": "UnaryCall"
  5491  		                }
  5492  		            ],
  5493                      "maxRequestMessageBytes": 0
  5494  		        }
  5495  		    ]
  5496  		}`)})
  5497  	}()
  5498  	// We wait a second before providing a service config and resolving
  5499  	// addresses.  So this will wait for that and then honor the
  5500  	// maxRequestMessageBytes it contains.
  5501  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1)
  5502  	if err != nil {
  5503  		t.Fatal(err)
  5504  	}
  5505  	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{Payload: payload}); status.Code(err) != codes.ResourceExhausted {
  5506  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err)
  5507  	}
  5508  	if got := ctx.Err(); got != nil {
  5509  		t.Fatalf("ctx.Err() = %v; want nil (deadline should be set short by service config)", got)
  5510  	}
  5511  	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  5512  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err)
  5513  	}
  5514  }
  5515  
  5516  type httpServerResponse struct {
  5517  	headers  [][]string
  5518  	payload  []byte
  5519  	trailers [][]string
  5520  }
  5521  
  5522  type httpServer struct {
  5523  	// If waitForEndStream is set, wait for the client to send a frame with end
  5524  	// stream in it before sending a response/refused stream.
  5525  	waitForEndStream bool
  5526  	refuseStream     func(uint32) bool
  5527  	responses        []httpServerResponse
  5528  }
  5529  
  5530  func (s *httpServer) writeHeader(framer *http2.Framer, sid uint32, headerFields []string, endStream bool) error {
  5531  	if len(headerFields)%2 == 1 {
  5532  		panic("odd number of kv args")
  5533  	}
  5534  
  5535  	var buf bytes.Buffer
  5536  	henc := hpack.NewEncoder(&buf)
  5537  	for len(headerFields) > 0 {
  5538  		k, v := headerFields[0], headerFields[1]
  5539  		headerFields = headerFields[2:]
  5540  		henc.WriteField(hpack.HeaderField{Name: k, Value: v})
  5541  	}
  5542  
  5543  	return framer.WriteHeaders(http2.HeadersFrameParam{
  5544  		StreamID:      sid,
  5545  		BlockFragment: buf.Bytes(),
  5546  		EndStream:     endStream,
  5547  		EndHeaders:    true,
  5548  	})
  5549  }
  5550  
  5551  func (s *httpServer) writePayload(framer *http2.Framer, sid uint32, payload []byte) error {
  5552  	return framer.WriteData(sid, false, payload)
  5553  }
  5554  
  5555  func (s *httpServer) start(t *testing.T, lis net.Listener) {
  5556  	// Launch an HTTP server to send back header.
  5557  	go func() {
  5558  		conn, err := lis.Accept()
  5559  		if err != nil {
  5560  			t.Errorf("Error accepting connection: %v", err)
  5561  			return
  5562  		}
  5563  		defer conn.Close()
  5564  		// Read preface sent by client.
  5565  		if _, err = io.ReadFull(conn, make([]byte, len(http2.ClientPreface))); err != nil {
  5566  			t.Errorf("Error at server-side while reading preface from client. Err: %v", err)
  5567  			return
  5568  		}
  5569  		reader := bufio.NewReader(conn)
  5570  		writer := bufio.NewWriter(conn)
  5571  		framer := http2.NewFramer(writer, reader)
  5572  		if err = framer.WriteSettingsAck(); err != nil {
  5573  			t.Errorf("Error at server-side while sending Settings ack. Err: %v", err)
  5574  			return
  5575  		}
  5576  		writer.Flush() // necessary since client is expecting preface before declaring connection fully setup.
  5577  		var sid uint32
  5578  		// Loop until framer returns possible conn closed errors.
  5579  		for requestNum := 0; ; requestNum = (requestNum + 1) % len(s.responses) {
  5580  			// Read frames until a header is received.
  5581  			for {
  5582  				frame, err := framer.ReadFrame()
  5583  				if err != nil {
  5584  					if !isConnClosedErr(err) {
  5585  						t.Errorf("Error at server-side while reading frame. got: %q, want: rpc error containing substring %q OR %q", err, possibleConnResetMsg, possibleEOFMsg)
  5586  					}
  5587  					return
  5588  				}
  5589  				sid = 0
  5590  				switch fr := frame.(type) {
  5591  				case *http2.HeadersFrame:
  5592  					// Respond after this if we are not waiting for an end
  5593  					// stream or if this frame ends it.
  5594  					if !s.waitForEndStream || fr.StreamEnded() {
  5595  						sid = fr.Header().StreamID
  5596  					}
  5597  
  5598  				case *http2.DataFrame:
  5599  					// Respond after this if we were waiting for an end stream
  5600  					// and this frame ends it.  (If we were not waiting for an
  5601  					// end stream, this stream was already responded to when
  5602  					// the headers were received.)
  5603  					if s.waitForEndStream && fr.StreamEnded() {
  5604  						sid = fr.Header().StreamID
  5605  					}
  5606  				}
  5607  				if sid != 0 {
  5608  					if s.refuseStream == nil || !s.refuseStream(sid) {
  5609  						break
  5610  					}
  5611  					framer.WriteRSTStream(sid, http2.ErrCodeRefusedStream)
  5612  					writer.Flush()
  5613  				}
  5614  			}
  5615  
  5616  			response := s.responses[requestNum]
  5617  			for _, header := range response.headers {
  5618  				if err = s.writeHeader(framer, sid, header, false); err != nil {
  5619  					t.Errorf("Error at server-side while writing headers. Err: %v", err)
  5620  					return
  5621  				}
  5622  				writer.Flush()
  5623  			}
  5624  			if response.payload != nil {
  5625  				if err = s.writePayload(framer, sid, response.payload); err != nil {
  5626  					t.Errorf("Error at server-side while writing payload. Err: %v", err)
  5627  					return
  5628  				}
  5629  				writer.Flush()
  5630  			}
  5631  			for i, trailer := range response.trailers {
  5632  				if err = s.writeHeader(framer, sid, trailer, i == len(response.trailers)-1); err != nil {
  5633  					t.Errorf("Error at server-side while writing trailers. Err: %v", err)
  5634  					return
  5635  				}
  5636  				writer.Flush()
  5637  			}
  5638  		}
  5639  	}()
  5640  }
  5641  
  5642  func (s) TestClientCancellationPropagatesUnary(t *testing.T) {
  5643  	wg := &sync.WaitGroup{}
  5644  	called, done := make(chan struct{}), make(chan struct{})
  5645  	ss := &stubserver.StubServer{
  5646  		EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) {
  5647  			close(called)
  5648  			<-ctx.Done()
  5649  			err := ctx.Err()
  5650  			if err != context.Canceled {
  5651  				t.Errorf("ctx.Err() = %v; want context.Canceled", err)
  5652  			}
  5653  			close(done)
  5654  			return nil, err
  5655  		},
  5656  	}
  5657  	if err := ss.Start(nil); err != nil {
  5658  		t.Fatalf("Error starting endpoint server: %v", err)
  5659  	}
  5660  	defer ss.Stop()
  5661  
  5662  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5663  
  5664  	wg.Add(1)
  5665  	go func() {
  5666  		if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Canceled {
  5667  			t.Errorf("ss.Client.EmptyCall() = _, %v; want _, Code()=codes.Canceled", err)
  5668  		}
  5669  		wg.Done()
  5670  	}()
  5671  
  5672  	select {
  5673  	case <-called:
  5674  	case <-time.After(5 * time.Second):
  5675  		t.Fatalf("failed to perform EmptyCall after 10s")
  5676  	}
  5677  	cancel()
  5678  	select {
  5679  	case <-done:
  5680  	case <-time.After(5 * time.Second):
  5681  		t.Fatalf("server failed to close done chan due to cancellation propagation")
  5682  	}
  5683  	wg.Wait()
  5684  }
  5685  
  5686  // When an RPC is canceled, it's possible that the last Recv() returns before
  5687  // all call options' after are executed.
  5688  func (s) TestCanceledRPCCallOptionRace(t *testing.T) {
  5689  	ss := &stubserver.StubServer{
  5690  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  5691  			err := stream.Send(&testpb.StreamingOutputCallResponse{})
  5692  			if err != nil {
  5693  				return err
  5694  			}
  5695  			<-stream.Context().Done()
  5696  			return nil
  5697  		},
  5698  	}
  5699  	if err := ss.Start(nil); err != nil {
  5700  		t.Fatalf("Error starting endpoint server: %v", err)
  5701  	}
  5702  	defer ss.Stop()
  5703  
  5704  	const count = 1000
  5705  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5706  	defer cancel()
  5707  
  5708  	var wg sync.WaitGroup
  5709  	wg.Add(count)
  5710  	for i := 0; i < count; i++ {
  5711  		go func() {
  5712  			defer wg.Done()
  5713  			var p peer.Peer
  5714  			ctx, cancel := context.WithCancel(ctx)
  5715  			defer cancel()
  5716  			stream, err := ss.Client.FullDuplexCall(ctx, grpc.Peer(&p))
  5717  			if err != nil {
  5718  				t.Errorf("_.FullDuplexCall(_) = _, %v", err)
  5719  				return
  5720  			}
  5721  			if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil {
  5722  				t.Errorf("_ has error %v while sending", err)
  5723  				return
  5724  			}
  5725  			if _, err := stream.Recv(); err != nil {
  5726  				t.Errorf("%v.Recv() = %v", stream, err)
  5727  				return
  5728  			}
  5729  			cancel()
  5730  			if _, err := stream.Recv(); status.Code(err) != codes.Canceled {
  5731  				t.Errorf("%v compleled with error %v, want %s", stream, err, codes.Canceled)
  5732  				return
  5733  			}
  5734  			// If recv returns before call options are executed, peer.Addr is not set,
  5735  			// fail the test.
  5736  			if p.Addr == nil {
  5737  				t.Errorf("peer.Addr is nil, want non-nil")
  5738  				return
  5739  			}
  5740  		}()
  5741  	}
  5742  	wg.Wait()
  5743  }
  5744  
  5745  func (s) TestClientSettingsFloodCloseConn(t *testing.T) {
  5746  	// Tests that the server properly closes its transport if the client floods
  5747  	// settings frames and then closes the connection.
  5748  
  5749  	// Minimize buffer sizes to stimulate failure condition more quickly.
  5750  	s := grpc.NewServer(grpc.WriteBufferSize(20))
  5751  	l := bufconn.Listen(20)
  5752  	go s.Serve(l)
  5753  
  5754  	// Dial our server and handshake.
  5755  	conn, err := l.Dial()
  5756  	if err != nil {
  5757  		t.Fatalf("Error dialing bufconn: %v", err)
  5758  	}
  5759  
  5760  	n, err := conn.Write([]byte(http2.ClientPreface))
  5761  	if err != nil || n != len(http2.ClientPreface) {
  5762  		t.Fatalf("Error writing client preface: %v, %v", n, err)
  5763  	}
  5764  
  5765  	fr := http2.NewFramer(conn, conn)
  5766  	f, err := fr.ReadFrame()
  5767  	if err != nil {
  5768  		t.Fatalf("Error reading initial settings frame: %v", err)
  5769  	}
  5770  	if _, ok := f.(*http2.SettingsFrame); ok {
  5771  		if err := fr.WriteSettingsAck(); err != nil {
  5772  			t.Fatalf("Error writing settings ack: %v", err)
  5773  		}
  5774  	} else {
  5775  		t.Fatalf("Error reading initial settings frame: type=%T", f)
  5776  	}
  5777  
  5778  	// Confirm settings can be written, and that an ack is read.
  5779  	if err = fr.WriteSettings(); err != nil {
  5780  		t.Fatalf("Error writing settings frame: %v", err)
  5781  	}
  5782  	if f, err = fr.ReadFrame(); err != nil {
  5783  		t.Fatalf("Error reading frame: %v", err)
  5784  	}
  5785  	if sf, ok := f.(*http2.SettingsFrame); !ok || !sf.IsAck() {
  5786  		t.Fatalf("Unexpected frame: %v", f)
  5787  	}
  5788  
  5789  	// Flood settings frames until a timeout occurs, indiciating the server has
  5790  	// stopped reading from the connection, then close the conn.
  5791  	for {
  5792  		conn.SetWriteDeadline(time.Now().Add(50 * time.Millisecond))
  5793  		if err := fr.WriteSettings(); err != nil {
  5794  			if to, ok := err.(interface{ Timeout() bool }); !ok || !to.Timeout() {
  5795  				t.Fatalf("Received unexpected write error: %v", err)
  5796  			}
  5797  			break
  5798  		}
  5799  	}
  5800  	conn.Close()
  5801  
  5802  	// If the server does not handle this situation correctly, it will never
  5803  	// close the transport.  This is because its loopyWriter.run() will have
  5804  	// exited, and thus not handle the goAway the draining process initiates.
  5805  	// Also, we would see a goroutine leak in this case, as the reader would be
  5806  	// blocked on the controlBuf's throttle() method indefinitely.
  5807  
  5808  	timer := time.AfterFunc(5*time.Second, func() {
  5809  		t.Errorf("Timeout waiting for GracefulStop to return")
  5810  		s.Stop()
  5811  	})
  5812  	s.GracefulStop()
  5813  	timer.Stop()
  5814  }
  5815  
  5816  func unaryInterceptorVerifyConn(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
  5817  	conn := transport.GetConnection(ctx)
  5818  	if conn == nil {
  5819  		return nil, status.Error(codes.NotFound, "connection was not in context")
  5820  	}
  5821  	return nil, status.Error(codes.OK, "")
  5822  }
  5823  
  5824  // TestUnaryServerInterceptorGetsConnection tests whether the accepted conn on
  5825  // the server gets to any unary interceptors on the server side.
  5826  func (s) TestUnaryServerInterceptorGetsConnection(t *testing.T) {
  5827  	ss := &stubserver.StubServer{}
  5828  	if err := ss.Start([]grpc.ServerOption{grpc.UnaryInterceptor(unaryInterceptorVerifyConn)}); err != nil {
  5829  		t.Fatalf("Error starting endpoint server: %v", err)
  5830  	}
  5831  	defer ss.Stop()
  5832  
  5833  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5834  	defer cancel()
  5835  
  5836  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK {
  5837  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v, want _, error code %s", err, codes.OK)
  5838  	}
  5839  }
  5840  
  5841  func streamingInterceptorVerifyConn(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
  5842  	conn := transport.GetConnection(ss.Context())
  5843  	if conn == nil {
  5844  		return status.Error(codes.NotFound, "connection was not in context")
  5845  	}
  5846  	return status.Error(codes.OK, "")
  5847  }
  5848  
  5849  // TestStreamingServerInterceptorGetsConnection tests whether the accepted conn on
  5850  // the server gets to any streaming interceptors on the server side.
  5851  func (s) TestStreamingServerInterceptorGetsConnection(t *testing.T) {
  5852  	ss := &stubserver.StubServer{}
  5853  	if err := ss.Start([]grpc.ServerOption{grpc.StreamInterceptor(streamingInterceptorVerifyConn)}); err != nil {
  5854  		t.Fatalf("Error starting endpoint server: %v", err)
  5855  	}
  5856  	defer ss.Stop()
  5857  
  5858  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5859  	defer cancel()
  5860  
  5861  	s, err := ss.Client.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{})
  5862  	if err != nil {
  5863  		t.Fatalf("ss.Client.StreamingOutputCall(_) = _, %v, want _, <nil>", err)
  5864  	}
  5865  	if _, err := s.Recv(); err != io.EOF {
  5866  		t.Fatalf("ss.Client.StreamingInputCall(_) = _, %v, want _, %v", err, io.EOF)
  5867  	}
  5868  }
  5869  
  5870  // unaryInterceptorVerifyAuthority verifies there is an unambiguous :authority
  5871  // once the request gets to an interceptor. An unambiguous :authority is defined
  5872  // as at most a single :authority header, and no host header according to A41.
  5873  func unaryInterceptorVerifyAuthority(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
  5874  	md, ok := metadata.FromIncomingContext(ctx)
  5875  	if !ok {
  5876  		return nil, status.Error(codes.NotFound, "metadata was not in context")
  5877  	}
  5878  	authority := md.Get(":authority")
  5879  	if len(authority) > 1 { // Should be an unambiguous authority by the time it gets to interceptor.
  5880  		return nil, status.Error(codes.NotFound, ":authority value had more than one value")
  5881  	}
  5882  	// Host header shouldn't be present by the time it gets to the interceptor
  5883  	// level (should either be renamed to :authority or explicitly deleted).
  5884  	host := md.Get("host")
  5885  	if len(host) != 0 {
  5886  		return nil, status.Error(codes.NotFound, "host header should not be present in metadata")
  5887  	}
  5888  	// Pass back the authority for verification on client - NotFound so
  5889  	// grpc-message will be available to read for verification.
  5890  	if len(authority) == 0 {
  5891  		// Represent no :authority header present with an empty string.
  5892  		return nil, status.Error(codes.NotFound, "")
  5893  	}
  5894  	return nil, status.Error(codes.NotFound, authority[0])
  5895  }
  5896  
  5897  // TestAuthorityHeader tests that the eventual :authority that reaches the grpc
  5898  // layer is unambiguous due to logic added in A41.
  5899  func (s) TestAuthorityHeader(t *testing.T) {
  5900  	tests := []struct {
  5901  		name          string
  5902  		headers       []string
  5903  		wantAuthority string
  5904  	}{
  5905  		// "If :authority is missing, Host must be renamed to :authority." - A41
  5906  		{
  5907  			name: "Missing :authority",
  5908  			// Codepath triggered by incoming headers with no authority but with
  5909  			// a host.
  5910  			headers: []string{
  5911  				":method", "POST",
  5912  				":path", "/grpc.testing.TestService/UnaryCall",
  5913  				"content-type", "application/grpc",
  5914  				"te", "trailers",
  5915  				"host", "localhost",
  5916  			},
  5917  			wantAuthority: "localhost",
  5918  		},
  5919  		{
  5920  			name: "Missing :authority and host",
  5921  			// Codepath triggered by incoming headers with no :authority and no
  5922  			// host.
  5923  			headers: []string{
  5924  				":method", "POST",
  5925  				":path", "/grpc.testing.TestService/UnaryCall",
  5926  				"content-type", "application/grpc",
  5927  				"te", "trailers",
  5928  			},
  5929  			wantAuthority: "",
  5930  		},
  5931  		// "If :authority is present, Host must be discarded." - A41
  5932  		{
  5933  			name: ":authority and host present",
  5934  			// Codepath triggered by incoming headers with both an authority
  5935  			// header and a host header.
  5936  			headers: []string{
  5937  				":method", "POST",
  5938  				":path", "/grpc.testing.TestService/UnaryCall",
  5939  				":authority", "localhost",
  5940  				"content-type", "application/grpc",
  5941  				"host", "localhost2",
  5942  			},
  5943  			wantAuthority: "localhost",
  5944  		},
  5945  	}
  5946  	for _, test := range tests {
  5947  		t.Run(test.name, func(t *testing.T) {
  5948  			te := newTest(t, tcpClearRREnv)
  5949  			ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  5950  				return &testpb.SimpleResponse{}, nil
  5951  			}}
  5952  			te.unaryServerInt = unaryInterceptorVerifyAuthority
  5953  			te.startServer(ts)
  5954  			defer te.tearDown()
  5955  			success := testutils.NewChannel()
  5956  			te.withServerTester(func(st *serverTester) {
  5957  				st.writeHeaders(http2.HeadersFrameParam{
  5958  					StreamID:      1,
  5959  					BlockFragment: st.encodeHeader(test.headers...),
  5960  					EndStream:     false,
  5961  					EndHeaders:    true,
  5962  				})
  5963  				st.writeData(1, true, []byte{0, 0, 0, 0, 0})
  5964  
  5965  				for {
  5966  					frame := st.wantAnyFrame()
  5967  					f, ok := frame.(*http2.MetaHeadersFrame)
  5968  					if !ok {
  5969  						continue
  5970  					}
  5971  					for _, header := range f.Fields {
  5972  						if header.Name == "grpc-message" {
  5973  							success.Send(header.Value)
  5974  							return
  5975  						}
  5976  					}
  5977  				}
  5978  			})
  5979  
  5980  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5981  			defer cancel()
  5982  			gotAuthority, err := success.Receive(ctx)
  5983  			if err != nil {
  5984  				t.Fatalf("Error receiving from channel: %v", err)
  5985  			}
  5986  			if gotAuthority != test.wantAuthority {
  5987  				t.Fatalf("gotAuthority: %v, wantAuthority %v", gotAuthority, test.wantAuthority)
  5988  			}
  5989  		})
  5990  	}
  5991  }
  5992  
  5993  // wrapCloseListener tracks Accepts/Closes and maintains a counter of the
  5994  // number of open connections.
  5995  type wrapCloseListener struct {
  5996  	net.Listener
  5997  	connsOpen int32
  5998  }
  5999  
  6000  // wrapCloseListener is returned by wrapCloseListener.Accept and decrements its
  6001  // connsOpen when Close is called.
  6002  type wrapCloseConn struct {
  6003  	net.Conn
  6004  	lis       *wrapCloseListener
  6005  	closeOnce sync.Once
  6006  }
  6007  
  6008  func (w *wrapCloseListener) Accept() (net.Conn, error) {
  6009  	conn, err := w.Listener.Accept()
  6010  	if err != nil {
  6011  		return nil, err
  6012  	}
  6013  	atomic.AddInt32(&w.connsOpen, 1)
  6014  	return &wrapCloseConn{Conn: conn, lis: w}, nil
  6015  }
  6016  
  6017  func (w *wrapCloseConn) Close() error {
  6018  	defer w.closeOnce.Do(func() { atomic.AddInt32(&w.lis.connsOpen, -1) })
  6019  	return w.Conn.Close()
  6020  }
  6021  
  6022  // TestServerClosesConn ensures conn.Close is always closed even if the client
  6023  // doesn't complete the HTTP/2 handshake.
  6024  func (s) TestServerClosesConn(t *testing.T) {
  6025  	lis := bufconn.Listen(20)
  6026  	wrapLis := &wrapCloseListener{Listener: lis}
  6027  
  6028  	s := grpc.NewServer()
  6029  	go s.Serve(wrapLis)
  6030  	defer s.Stop()
  6031  
  6032  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6033  	defer cancel()
  6034  
  6035  	for i := 0; i < 10; i++ {
  6036  		conn, err := lis.DialContext(ctx)
  6037  		if err != nil {
  6038  			t.Fatalf("Dial = _, %v; want _, nil", err)
  6039  		}
  6040  		conn.Close()
  6041  	}
  6042  	for ctx.Err() == nil {
  6043  		if atomic.LoadInt32(&wrapLis.connsOpen) == 0 {
  6044  			return
  6045  		}
  6046  		time.Sleep(50 * time.Millisecond)
  6047  	}
  6048  	t.Fatalf("timed out waiting for conns to be closed by server; still open: %v", atomic.LoadInt32(&wrapLis.connsOpen))
  6049  }
  6050  
  6051  // TestNilStatsHandler ensures we do not panic as a result of a nil stats
  6052  // handler.
  6053  func (s) TestNilStatsHandler(t *testing.T) {
  6054  	grpctest.TLogger.ExpectErrorN("ignoring nil parameter", 2)
  6055  	ss := &stubserver.StubServer{
  6056  		UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  6057  			return &testpb.SimpleResponse{}, nil
  6058  		},
  6059  	}
  6060  	if err := ss.Start([]grpc.ServerOption{grpc.StatsHandler(nil)}, grpc.WithStatsHandler(nil)); err != nil {
  6061  		t.Fatalf("Error starting endpoint server: %v", err)
  6062  	}
  6063  	defer ss.Stop()
  6064  
  6065  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6066  	defer cancel()
  6067  	if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  6068  		t.Fatalf("Unexpected error from UnaryCall: %v", err)
  6069  	}
  6070  }
  6071  
  6072  // TestUnexpectedEOF tests a scenario where a client invokes two unary RPC
  6073  // calls. The first call receives a payload which exceeds max grpc receive
  6074  // message length, and the second gets a large response. This second RPC should
  6075  // not fail with unexpected.EOF.
  6076  func (s) TestUnexpectedEOF(t *testing.T) {
  6077  	ss := &stubserver.StubServer{
  6078  		UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  6079  			return &testpb.SimpleResponse{
  6080  				Payload: &testpb.Payload{
  6081  					Body: bytes.Repeat([]byte("a"), int(in.ResponseSize)),
  6082  				},
  6083  			}, nil
  6084  		},
  6085  	}
  6086  	if err := ss.Start([]grpc.ServerOption{}); err != nil {
  6087  		t.Fatalf("Error starting endpoint server: %v", err)
  6088  	}
  6089  	defer ss.Stop()
  6090  
  6091  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6092  	defer cancel()
  6093  	for i := 0; i < 10; i++ {
  6094  		// exceeds grpc.DefaultMaxRecvMessageSize, this should error with
  6095  		// RESOURCE_EXHAUSTED error.
  6096  		_, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{ResponseSize: 4194304})
  6097  		if code := status.Code(err); code != codes.ResourceExhausted {
  6098  			t.Fatalf("UnaryCall RPC returned error: %v, want status code %v", err, codes.ResourceExhausted)
  6099  		}
  6100  		// Larger response that doesn't exceed DefaultMaxRecvMessageSize, this
  6101  		// should work normally.
  6102  		if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{ResponseSize: 275075}); err != nil {
  6103  			t.Fatalf("UnaryCall RPC failed: %v", err)
  6104  		}
  6105  	}
  6106  }
  6107  
  6108  // TestRecvWhileReturningStatus performs a Recv in a service handler while the
  6109  // handler returns its status.  A race condition could result in the server
  6110  // sending the first headers frame without the HTTP :status header.  This can
  6111  // happen when the failed Recv (due to the handler returning) and the handler's
  6112  // status both attempt to write the status, which would be the first headers
  6113  // frame sent, simultaneously.
  6114  func (s) TestRecvWhileReturningStatus(t *testing.T) {
  6115  	ss := &stubserver.StubServer{
  6116  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  6117  			// The client never sends, so this Recv blocks until the server
  6118  			// returns and causes stream operations to return errors.
  6119  			go stream.Recv()
  6120  			return nil
  6121  		},
  6122  	}
  6123  	if err := ss.Start(nil); err != nil {
  6124  		t.Fatalf("Error starting endpoint server: %v", err)
  6125  	}
  6126  	defer ss.Stop()
  6127  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6128  	defer cancel()
  6129  	for i := 0; i < 100; i++ {
  6130  		stream, err := ss.Client.FullDuplexCall(ctx)
  6131  		if err != nil {
  6132  			t.Fatalf("Error while creating stream: %v", err)
  6133  		}
  6134  		if _, err := stream.Recv(); err != io.EOF {
  6135  			t.Fatalf("stream.Recv() = %v, want io.EOF", err)
  6136  		}
  6137  	}
  6138  }
  6139  
  6140  type mockBinaryLogger struct {
  6141  	mml *mockMethodLogger
  6142  }
  6143  
  6144  func newMockBinaryLogger() *mockBinaryLogger {
  6145  	return &mockBinaryLogger{
  6146  		mml: &mockMethodLogger{},
  6147  	}
  6148  }
  6149  
  6150  func (mbl *mockBinaryLogger) GetMethodLogger(string) binarylog.MethodLogger {
  6151  	return mbl.mml
  6152  }
  6153  
  6154  type mockMethodLogger struct {
  6155  	events uint64
  6156  }
  6157  
  6158  func (mml *mockMethodLogger) Log(context.Context, binarylog.LogEntryConfig) {
  6159  	atomic.AddUint64(&mml.events, 1)
  6160  }
  6161  
  6162  // TestGlobalBinaryLoggingOptions tests the binary logging options for client
  6163  // and server side. The test configures a binary logger to be plumbed into every
  6164  // created ClientConn and server. It then makes a unary RPC call, and a
  6165  // streaming RPC call. A certain amount of logging calls should happen as a
  6166  // result of the stream operations on each of these calls.
  6167  func (s) TestGlobalBinaryLoggingOptions(t *testing.T) {
  6168  	csbl := newMockBinaryLogger()
  6169  	ssbl := newMockBinaryLogger()
  6170  
  6171  	internal.AddGlobalDialOptions.(func(opt ...grpc.DialOption))(internal.WithBinaryLogger.(func(bl binarylog.Logger) grpc.DialOption)(csbl))
  6172  	internal.AddGlobalServerOptions.(func(opt ...grpc.ServerOption))(internal.BinaryLogger.(func(bl binarylog.Logger) grpc.ServerOption)(ssbl))
  6173  	defer func() {
  6174  		internal.ClearGlobalDialOptions()
  6175  		internal.ClearGlobalServerOptions()
  6176  	}()
  6177  	ss := &stubserver.StubServer{
  6178  		UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  6179  			return &testpb.SimpleResponse{}, nil
  6180  		},
  6181  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  6182  			_, err := stream.Recv()
  6183  			if err == io.EOF {
  6184  				return nil
  6185  			}
  6186  			return status.Errorf(codes.Unknown, "expected client to call CloseSend")
  6187  		},
  6188  	}
  6189  
  6190  	// No client or server options specified, because should pick up configured
  6191  	// global options.
  6192  	if err := ss.Start(nil); err != nil {
  6193  		t.Fatalf("Error starting endpoint server: %v", err)
  6194  	}
  6195  	defer ss.Stop()
  6196  
  6197  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6198  	defer cancel()
  6199  	// Make a Unary RPC. This should cause Log calls on the MethodLogger.
  6200  	if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  6201  		t.Fatalf("Unexpected error from UnaryCall: %v", err)
  6202  	}
  6203  	if csbl.mml.events != 5 {
  6204  		t.Fatalf("want 5 client side binary logging events, got %v", csbl.mml.events)
  6205  	}
  6206  	if ssbl.mml.events != 5 {
  6207  		t.Fatalf("want 5 server side binary logging events, got %v", ssbl.mml.events)
  6208  	}
  6209  
  6210  	// Make a streaming RPC. This should cause Log calls on the MethodLogger.
  6211  	stream, err := ss.Client.FullDuplexCall(ctx)
  6212  	if err != nil {
  6213  		t.Fatalf("ss.Client.FullDuplexCall failed: %f", err)
  6214  	}
  6215  
  6216  	stream.CloseSend()
  6217  	if _, err = stream.Recv(); err != io.EOF {
  6218  		t.Fatalf("unexpected error: %v, expected an EOF error", err)
  6219  	}
  6220  
  6221  	if csbl.mml.events != 8 {
  6222  		t.Fatalf("want 8 client side binary logging events, got %v", csbl.mml.events)
  6223  	}
  6224  	if ssbl.mml.events != 8 {
  6225  		t.Fatalf("want 8 server side binary logging events, got %v", ssbl.mml.events)
  6226  	}
  6227  }
  6228  
  6229  type statsHandlerRecordEvents struct {
  6230  	mu sync.Mutex
  6231  	s  []stats.RPCStats
  6232  }
  6233  
  6234  func (*statsHandlerRecordEvents) TagRPC(ctx context.Context, _ *stats.RPCTagInfo) context.Context {
  6235  	return ctx
  6236  }
  6237  func (h *statsHandlerRecordEvents) HandleRPC(_ context.Context, s stats.RPCStats) {
  6238  	h.mu.Lock()
  6239  	defer h.mu.Unlock()
  6240  	h.s = append(h.s, s)
  6241  }
  6242  func (*statsHandlerRecordEvents) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context {
  6243  	return ctx
  6244  }
  6245  func (*statsHandlerRecordEvents) HandleConn(context.Context, stats.ConnStats) {}
  6246  
  6247  type triggerRPCBlockPicker struct {
  6248  	pickDone func()
  6249  }
  6250  
  6251  func (bp *triggerRPCBlockPicker) Pick(pi balancer.PickInfo) (balancer.PickResult, error) {
  6252  	bp.pickDone()
  6253  	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
  6254  }
  6255  
  6256  const name = "triggerRPCBlockBalancer"
  6257  
  6258  type triggerRPCBlockPickerBalancerBuilder struct{}
  6259  
  6260  func (triggerRPCBlockPickerBalancerBuilder) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer {
  6261  	b := &triggerRPCBlockBalancer{
  6262  		blockingPickerDone: grpcsync.NewEvent(),
  6263  		ClientConn:         cc,
  6264  	}
  6265  	// round_robin child to complete balancer tree with a usable leaf policy and
  6266  	// have RPCs actually work.
  6267  	builder := balancer.Get(roundrobin.Name)
  6268  	rr := builder.Build(b, bOpts)
  6269  	if rr == nil {
  6270  		panic("round robin builder returned nil")
  6271  	}
  6272  	b.Balancer = rr
  6273  	return b
  6274  }
  6275  
  6276  func (triggerRPCBlockPickerBalancerBuilder) ParseConfig(json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
  6277  	return &bpbConfig{}, nil
  6278  }
  6279  
  6280  func (triggerRPCBlockPickerBalancerBuilder) Name() string {
  6281  	return name
  6282  }
  6283  
  6284  type bpbConfig struct {
  6285  	serviceconfig.LoadBalancingConfig
  6286  }
  6287  
  6288  // triggerRPCBlockBalancer uses a child RR balancer, but blocks all UpdateState
  6289  // calls until the first Pick call. That first Pick returns
  6290  // ErrNoSubConnAvailable to make the RPC block and trigger the appropriate stats
  6291  // handler callout. After the first Pick call, it will forward at least one
  6292  // READY picker update from the child, causing RPCs to proceed as normal using a
  6293  // round robin balancer's picker if it updates with a READY picker.
  6294  type triggerRPCBlockBalancer struct {
  6295  	stateMu    sync.Mutex
  6296  	childState balancer.State
  6297  
  6298  	blockingPickerDone *grpcsync.Event
  6299  	// embed a ClientConn to wrap only UpdateState() operation
  6300  	balancer.ClientConn
  6301  	// embed a Balancer to wrap only UpdateClientConnState() operation
  6302  	balancer.Balancer
  6303  }
  6304  
  6305  func (bpb *triggerRPCBlockBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
  6306  	err := bpb.Balancer.UpdateClientConnState(s)
  6307  	bpb.ClientConn.UpdateState(balancer.State{
  6308  		ConnectivityState: connectivity.Connecting,
  6309  		Picker: &triggerRPCBlockPicker{
  6310  			pickDone: func() {
  6311  				bpb.stateMu.Lock()
  6312  				defer bpb.stateMu.Unlock()
  6313  				bpb.blockingPickerDone.Fire()
  6314  				if bpb.childState.ConnectivityState == connectivity.Ready {
  6315  					bpb.ClientConn.UpdateState(bpb.childState)
  6316  				}
  6317  			},
  6318  		},
  6319  	})
  6320  	return err
  6321  }
  6322  
  6323  func (bpb *triggerRPCBlockBalancer) UpdateState(state balancer.State) {
  6324  	bpb.stateMu.Lock()
  6325  	defer bpb.stateMu.Unlock()
  6326  	bpb.childState = state
  6327  	if bpb.blockingPickerDone.HasFired() { // guard first one to get a picker sending ErrNoSubConnAvailable first
  6328  		if state.ConnectivityState == connectivity.Ready {
  6329  			bpb.ClientConn.UpdateState(state) // after the first rr picker update, only forward once READY for deterministic picker counts
  6330  		}
  6331  	}
  6332  }
  6333  
  6334  // TestRPCBlockingOnPickerStatsCall tests the emission of a stats handler call
  6335  // that represents the RPC had to block waiting for a new picker due to
  6336  // ErrNoSubConnAvailable being returned from the first picker call.
  6337  func (s) TestRPCBlockingOnPickerStatsCall(t *testing.T) {
  6338  	sh := &statsHandlerRecordEvents{}
  6339  	ss := &stubserver.StubServer{
  6340  		UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  6341  			return &testpb.SimpleResponse{}, nil
  6342  		},
  6343  	}
  6344  
  6345  	if err := ss.StartServer(); err != nil {
  6346  		t.Fatalf("Error starting endpoint server: %v", err)
  6347  	}
  6348  	defer ss.Stop()
  6349  
  6350  	lbCfgJSON := `{
  6351    		"loadBalancingConfig": [
  6352      		{
  6353        			"triggerRPCBlockBalancer": {}
  6354      		}
  6355  		]
  6356  	}`
  6357  
  6358  	sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(lbCfgJSON)
  6359  	mr := manual.NewBuilderWithScheme("pickerupdatedbalancer")
  6360  	defer mr.Close()
  6361  	mr.InitialState(resolver.State{
  6362  		Addresses: []resolver.Address{
  6363  			{Addr: ss.Address},
  6364  		},
  6365  		ServiceConfig: sc,
  6366  	})
  6367  
  6368  	cc, err := grpc.Dial(mr.Scheme()+":///", grpc.WithResolvers(mr), grpc.WithStatsHandler(sh), grpc.WithTransportCredentials(insecure.NewCredentials()))
  6369  	if err != nil {
  6370  		t.Fatalf("grpc.Dial() failed: %v", err)
  6371  	}
  6372  	defer cc.Close()
  6373  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6374  	defer cancel()
  6375  	testServiceClient := testgrpc.NewTestServiceClient(cc)
  6376  	if _, err := testServiceClient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  6377  		t.Fatalf("Unexpected error from UnaryCall: %v", err)
  6378  	}
  6379  
  6380  	var pickerUpdatedCount uint
  6381  	for _, stat := range sh.s {
  6382  		if _, ok := stat.(*stats.PickerUpdated); ok {
  6383  			pickerUpdatedCount++
  6384  		}
  6385  	}
  6386  	if pickerUpdatedCount != 1 {
  6387  		t.Fatalf("sh.pickerUpdated count: %v, want: %v", pickerUpdatedCount, 2)
  6388  	}
  6389  }