gitee.com/zhaochuninhefei/gmgo@v0.0.31-0.20240209061119-069254a02979/grpc/test/end2end_test.go (about)

     1  /*
     2   *
     3   * Copyright 2014 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package test
    20  
    21  import (
    22  	"bufio"
    23  	"bytes"
    24  	"compress/gzip"
    25  	"context"
    26  	"errors"
    27  	"flag"
    28  	"fmt"
    29  	"gitee.com/zhaochuninhefei/gmgo/grpc/credentials/insecure"
    30  	"io"
    31  	"math"
    32  	"net"
    33  	"os"
    34  	"reflect"
    35  	"runtime"
    36  	"strings"
    37  	"sync"
    38  	"sync/atomic"
    39  	"syscall"
    40  	"testing"
    41  	"time"
    42  
    43  	http "gitee.com/zhaochuninhefei/gmgo/gmhttp"
    44  	tls "gitee.com/zhaochuninhefei/gmgo/gmtls"
    45  	"gitee.com/zhaochuninhefei/gmgo/grpc"
    46  	"gitee.com/zhaochuninhefei/gmgo/grpc/codes"
    47  	"gitee.com/zhaochuninhefei/gmgo/grpc/connectivity"
    48  	"gitee.com/zhaochuninhefei/gmgo/grpc/credentials"
    49  	"gitee.com/zhaochuninhefei/gmgo/grpc/encoding"
    50  	_ "gitee.com/zhaochuninhefei/gmgo/grpc/encoding/gzip"
    51  	"gitee.com/zhaochuninhefei/gmgo/grpc/health"
    52  	healthgrpc "gitee.com/zhaochuninhefei/gmgo/grpc/health/grpc_health_v1"
    53  	healthpb "gitee.com/zhaochuninhefei/gmgo/grpc/health/grpc_health_v1"
    54  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal"
    55  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/channelz"
    56  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/grpcsync"
    57  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/grpctest"
    58  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/stubserver"
    59  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/testutils"
    60  	"gitee.com/zhaochuninhefei/gmgo/grpc/internal/transport"
    61  	"gitee.com/zhaochuninhefei/gmgo/grpc/keepalive"
    62  	"gitee.com/zhaochuninhefei/gmgo/grpc/metadata"
    63  	"gitee.com/zhaochuninhefei/gmgo/grpc/peer"
    64  	"gitee.com/zhaochuninhefei/gmgo/grpc/resolver"
    65  	"gitee.com/zhaochuninhefei/gmgo/grpc/resolver/manual"
    66  	"gitee.com/zhaochuninhefei/gmgo/grpc/serviceconfig"
    67  	"gitee.com/zhaochuninhefei/gmgo/grpc/stats"
    68  	"gitee.com/zhaochuninhefei/gmgo/grpc/status"
    69  	"gitee.com/zhaochuninhefei/gmgo/grpc/tap"
    70  	"gitee.com/zhaochuninhefei/gmgo/grpc/test/bufconn"
    71  	testpb "gitee.com/zhaochuninhefei/gmgo/grpc/test/grpc_testing"
    72  	"gitee.com/zhaochuninhefei/gmgo/grpc/testdata"
    73  	"gitee.com/zhaochuninhefei/gmgo/net/http2"
    74  	"gitee.com/zhaochuninhefei/gmgo/net/http2/hpack"
    75  	"github.com/golang/protobuf/proto"
    76  	anypb "github.com/golang/protobuf/ptypes/any"
    77  	spb "google.golang.org/genproto/googleapis/rpc/status"
    78  )
    79  
    80  const defaultHealthService = "grpc.health.v1.Health"
    81  
    82  func init() {
    83  	channelz.TurnOn()
    84  }
    85  
    86  type s struct {
    87  	grpctest.Tester
    88  }
    89  
    90  func Test(t *testing.T) {
    91  	grpctest.RunSubTests(t, s{})
    92  }
    93  
    94  var (
    95  	// For headers:
    96  	testMetadata = metadata.MD{
    97  		"key1":     []string{"value1"},
    98  		"key2":     []string{"value2"},
    99  		"key3-bin": []string{"binvalue1", string([]byte{1, 2, 3})},
   100  	}
   101  	testMetadata2 = metadata.MD{
   102  		"key1": []string{"value12"},
   103  		"key2": []string{"value22"},
   104  	}
   105  	// For trailers:
   106  	testTrailerMetadata = metadata.MD{
   107  		"tkey1":     []string{"trailerValue1"},
   108  		"tkey2":     []string{"trailerValue2"},
   109  		"tkey3-bin": []string{"trailerbinvalue1", string([]byte{3, 2, 1})},
   110  	}
   111  	testTrailerMetadata2 = metadata.MD{
   112  		"tkey1": []string{"trailerValue12"},
   113  		"tkey2": []string{"trailerValue22"},
   114  	}
   115  	// capital "Key" is illegal in HTTP/2.
   116  	malformedHTTP2Metadata = metadata.MD{
   117  		"Key": []string{"foo"},
   118  	}
   119  	testAppUA     = "myApp1/1.0 myApp2/0.9"
   120  	failAppUA     = "fail-this-RPC"
   121  	detailedError = status.ErrorProto(&spb.Status{
   122  		Code:    int32(codes.DataLoss),
   123  		Message: "error for testing: " + failAppUA,
   124  		Details: []*anypb.Any{{
   125  			TypeUrl: "url",
   126  			Value:   []byte{6, 0, 0, 6, 1, 3},
   127  		}},
   128  	})
   129  )
   130  
   131  var raceMode bool // set by race.go in race mode
   132  
   133  type testServer struct {
   134  	testpb.UnimplementedTestServiceServer
   135  
   136  	security           string // indicate the authentication protocol used by this server.
   137  	earlyFail          bool   // whether to error out the execution of a service handler prematurely.
   138  	setAndSendHeader   bool   // whether to call setHeader and sendHeader.
   139  	setHeaderOnly      bool   // whether to only call setHeader, not sendHeader.
   140  	multipleSetTrailer bool   // whether to call setTrailer multiple times.
   141  	unaryCallSleepTime time.Duration
   142  }
   143  
   144  //goland:noinspection GoUnusedParameter
   145  func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
   146  	if md, ok := metadata.FromIncomingContext(ctx); ok {
   147  		// For testing purpose, returns an error if user-agent is failAppUA.
   148  		// To test that client gets the correct error.
   149  		if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) {
   150  			return nil, detailedError
   151  		}
   152  		var str []string
   153  		for _, entry := range md["user-agent"] {
   154  			str = append(str, "ua", entry)
   155  		}
   156  		_ = grpc.SendHeader(ctx, metadata.Pairs(str...))
   157  	}
   158  	return new(testpb.Empty), nil
   159  }
   160  
   161  func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) {
   162  	if size < 0 {
   163  		return nil, fmt.Errorf("requested a response with invalid length %d", size)
   164  	}
   165  	body := make([]byte, size)
   166  	switch t {
   167  	case testpb.PayloadType_COMPRESSABLE:
   168  	case testpb.PayloadType_UNCOMPRESSABLE:
   169  		return nil, fmt.Errorf("PayloadType UNCOMPRESSABLE is not supported")
   170  	default:
   171  		return nil, fmt.Errorf("unsupported payload type: %d", t)
   172  	}
   173  	return &testpb.Payload{
   174  		Type: t,
   175  		Body: body,
   176  	}, nil
   177  }
   178  
   179  func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
   180  	md, ok := metadata.FromIncomingContext(ctx)
   181  	if ok {
   182  		if _, exists := md[":authority"]; !exists {
   183  			return nil, status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md)
   184  		}
   185  		if s.setAndSendHeader {
   186  			if err := grpc.SetHeader(ctx, md); err != nil {
   187  				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", md, err)
   188  			}
   189  			if err := grpc.SendHeader(ctx, testMetadata2); err != nil {
   190  				return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", testMetadata2, err)
   191  			}
   192  		} else if s.setHeaderOnly {
   193  			if err := grpc.SetHeader(ctx, md); err != nil {
   194  				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", md, err)
   195  			}
   196  			if err := grpc.SetHeader(ctx, testMetadata2); err != nil {
   197  				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", testMetadata2, err)
   198  			}
   199  		} else {
   200  			if err := grpc.SendHeader(ctx, md); err != nil {
   201  				return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", md, err)
   202  			}
   203  		}
   204  		if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil {
   205  			return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata, err)
   206  		}
   207  		if s.multipleSetTrailer {
   208  			if err := grpc.SetTrailer(ctx, testTrailerMetadata2); err != nil {
   209  				return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata2, err)
   210  			}
   211  		}
   212  	}
   213  	pr, ok := peer.FromContext(ctx)
   214  	if !ok {
   215  		return nil, status.Error(codes.DataLoss, "failed to get peer from ctx")
   216  	}
   217  	if pr.Addr == net.Addr(nil) {
   218  		return nil, status.Error(codes.DataLoss, "failed to get peer address")
   219  	}
   220  	if s.security != "" {
   221  		// Check Auth info
   222  		var authType, serverName string
   223  		switch info := pr.AuthInfo.(type) {
   224  		case credentials.TLSInfo:
   225  			authType = info.AuthType()
   226  			serverName = info.State.ServerName
   227  		default:
   228  			return nil, status.Error(codes.Unauthenticated, "Unknown AuthInfo type")
   229  		}
   230  		if authType != s.security {
   231  			return nil, status.Errorf(codes.Unauthenticated, "Wrong auth type: got %q, want %q", authType, s.security)
   232  		}
   233  		if serverName != "x.test.example.com" {
   234  			return nil, status.Errorf(codes.Unauthenticated, "Unknown server name %q", serverName)
   235  		}
   236  	}
   237  	// Simulate some service delay.
   238  	time.Sleep(s.unaryCallSleepTime)
   239  
   240  	payload, err := newPayload(in.GetResponseType(), in.GetResponseSize())
   241  	if err != nil {
   242  		return nil, err
   243  	}
   244  
   245  	return &testpb.SimpleResponse{
   246  		Payload: payload,
   247  	}, nil
   248  }
   249  
   250  func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error {
   251  	if md, ok := metadata.FromIncomingContext(stream.Context()); ok {
   252  		if _, exists := md[":authority"]; !exists {
   253  			return status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md)
   254  		}
   255  		// For testing purpose, returns an error if user-agent is failAppUA.
   256  		// To test that client gets the correct error.
   257  		if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) {
   258  			return status.Error(codes.DataLoss, "error for testing: "+failAppUA)
   259  		}
   260  	}
   261  	cs := args.GetResponseParameters()
   262  	for _, c := range cs {
   263  		if us := c.GetIntervalUs(); us > 0 {
   264  			time.Sleep(time.Duration(us) * time.Microsecond)
   265  		}
   266  
   267  		payload, err := newPayload(args.GetResponseType(), c.GetSize())
   268  		if err != nil {
   269  			return err
   270  		}
   271  
   272  		if err := stream.Send(&testpb.StreamingOutputCallResponse{
   273  			Payload: payload,
   274  		}); err != nil {
   275  			return err
   276  		}
   277  	}
   278  	return nil
   279  }
   280  
   281  func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error {
   282  	var sum int
   283  	for {
   284  		in, err := stream.Recv()
   285  		if err == io.EOF {
   286  			return stream.SendAndClose(&testpb.StreamingInputCallResponse{
   287  				AggregatedPayloadSize: int32(sum),
   288  			})
   289  		}
   290  		if err != nil {
   291  			return err
   292  		}
   293  		p := in.GetPayload().GetBody()
   294  		sum += len(p)
   295  		if s.earlyFail {
   296  			return status.Error(codes.NotFound, "not found")
   297  		}
   298  	}
   299  }
   300  
   301  func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error {
   302  	md, ok := metadata.FromIncomingContext(stream.Context())
   303  	if ok {
   304  		if s.setAndSendHeader {
   305  			if err := stream.SetHeader(md); err != nil {
   306  				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, md, err)
   307  			}
   308  			if err := stream.SendHeader(testMetadata2); err != nil {
   309  				return status.Errorf(status.Code(err), "%v.SendHeader(_, %v) = %v, want <nil>", stream, testMetadata2, err)
   310  			}
   311  		} else if s.setHeaderOnly {
   312  			if err := stream.SetHeader(md); err != nil {
   313  				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, md, err)
   314  			}
   315  			if err := stream.SetHeader(testMetadata2); err != nil {
   316  				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, testMetadata2, err)
   317  			}
   318  		} else {
   319  			if err := stream.SendHeader(md); err != nil {
   320  				return status.Errorf(status.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil)
   321  			}
   322  		}
   323  		stream.SetTrailer(testTrailerMetadata)
   324  		if s.multipleSetTrailer {
   325  			stream.SetTrailer(testTrailerMetadata2)
   326  		}
   327  	}
   328  	for {
   329  		in, err := stream.Recv()
   330  		if err == io.EOF {
   331  			// read done.
   332  			return nil
   333  		}
   334  		if err != nil {
   335  			// to facilitate testSvrWriteStatusEarlyWrite
   336  			if status.Code(err) == codes.ResourceExhausted {
   337  				return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error())
   338  			}
   339  			return err
   340  		}
   341  		cs := in.GetResponseParameters()
   342  		for _, c := range cs {
   343  			if us := c.GetIntervalUs(); us > 0 {
   344  				time.Sleep(time.Duration(us) * time.Microsecond)
   345  			}
   346  
   347  			payload, err := newPayload(in.GetResponseType(), c.GetSize())
   348  			if err != nil {
   349  				return err
   350  			}
   351  
   352  			if err := stream.Send(&testpb.StreamingOutputCallResponse{
   353  				Payload: payload,
   354  			}); err != nil {
   355  				// to facilitate testSvrWriteStatusEarlyWrite
   356  				if status.Code(err) == codes.ResourceExhausted {
   357  					return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error())
   358  				}
   359  				return err
   360  			}
   361  		}
   362  	}
   363  }
   364  
   365  func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error {
   366  	var msgBuf []*testpb.StreamingOutputCallRequest
   367  	for {
   368  		in, err := stream.Recv()
   369  		if err == io.EOF {
   370  			// read done.
   371  			break
   372  		}
   373  		if err != nil {
   374  			return err
   375  		}
   376  		msgBuf = append(msgBuf, in)
   377  	}
   378  	for _, m := range msgBuf {
   379  		cs := m.GetResponseParameters()
   380  		for _, c := range cs {
   381  			if us := c.GetIntervalUs(); us > 0 {
   382  				time.Sleep(time.Duration(us) * time.Microsecond)
   383  			}
   384  
   385  			payload, err := newPayload(m.GetResponseType(), c.GetSize())
   386  			if err != nil {
   387  				return err
   388  			}
   389  
   390  			if err := stream.Send(&testpb.StreamingOutputCallResponse{
   391  				Payload: payload,
   392  			}); err != nil {
   393  				return err
   394  			}
   395  		}
   396  	}
   397  	return nil
   398  }
   399  
   400  type env struct {
   401  	name         string
   402  	network      string // The type of network such as tcp, unix, etc.
   403  	security     string // The security protocol such as TLS, SSH, etc.
   404  	httpHandler  bool   // whether to use the http.Handler ServerTransport; requires TLS
   405  	balancer     string // One of "round_robin", "pick_first", or "".
   406  	customDialer func(string, string, time.Duration) (net.Conn, error)
   407  }
   408  
   409  func (e env) runnable() bool {
   410  	if runtime.GOOS == "windows" && e.network == "unix" {
   411  		return false
   412  	}
   413  	return true
   414  }
   415  
   416  func (e env) dialer(addr string, timeout time.Duration) (net.Conn, error) {
   417  	if e.customDialer != nil {
   418  		return e.customDialer(e.network, addr, timeout)
   419  	}
   420  	return net.DialTimeout(e.network, addr, timeout)
   421  }
   422  
   423  var (
   424  	tcpClearEnv   = env{name: "tcp-clear-v1-balancer", network: "tcp"}
   425  	tcpTLSEnv     = env{name: "tcp-tls-v1-balancer", network: "tcp", security: "tls"}
   426  	tcpClearRREnv = env{name: "tcp-clear", network: "tcp", balancer: "round_robin"}
   427  	tcpTLSRREnv   = env{name: "tcp-tls", network: "tcp", security: "tls", balancer: "round_robin"}
   428  	handlerEnv    = env{name: "handler-tls", network: "tcp", security: "tls", httpHandler: true, balancer: "round_robin"}
   429  	noBalancerEnv = env{name: "no-balancer", network: "tcp", security: "tls"}
   430  	allEnv        = []env{tcpClearEnv, tcpTLSEnv, tcpClearRREnv, tcpTLSRREnv, handlerEnv, noBalancerEnv}
   431  )
   432  
   433  var onlyEnv = flag.String("only_env", "", "If non-empty, one of 'tcp-clear', 'tcp-tls', 'unix-clear', 'unix-tls', or 'handler-tls' to only run the tests for that environment. Empty means all.")
   434  
   435  func listTestEnv() (envs []env) {
   436  	if *onlyEnv != "" {
   437  		for _, e := range allEnv {
   438  			if e.name == *onlyEnv {
   439  				if !e.runnable() {
   440  					panic(fmt.Sprintf("--only_env environment %q does not run on %s", *onlyEnv, runtime.GOOS))
   441  				}
   442  				return []env{e}
   443  			}
   444  		}
   445  		panic(fmt.Sprintf("invalid --only_env value %q", *onlyEnv))
   446  	}
   447  	for _, e := range allEnv {
   448  		if e.runnable() {
   449  			envs = append(envs, e)
   450  		}
   451  	}
   452  	return envs
   453  }
   454  
   455  // test is an end-to-end test. It should be created with the newTest
   456  // func, modified as needed, and then started with its startServer method.
   457  // It should be cleaned up with the tearDown method.
   458  type test struct {
   459  	// The following are setup in newTest().
   460  	t      *testing.T
   461  	e      env
   462  	ctx    context.Context // valid for life of test, before tearDown
   463  	cancel context.CancelFunc
   464  
   465  	// The following knobs are for the server-side, and should be set after
   466  	// calling newTest() and before calling startServer().
   467  
   468  	// whether or not to expose the server's health via the default health
   469  	// service implementation.
   470  	enableHealthServer bool
   471  	// In almost all cases, one should set the 'enableHealthServer' flag above to
   472  	// expose the server's health using the default health service
   473  	// implementation. This should only be used when a non-default health service
   474  	// implementation is required.
   475  	healthServer            healthpb.HealthServer
   476  	maxStream               uint32
   477  	tapHandle               tap.ServerInHandle
   478  	maxServerMsgSize        *int
   479  	maxServerReceiveMsgSize *int
   480  	maxServerSendMsgSize    *int
   481  	maxServerHeaderListSize *uint32
   482  	// Used to test the deprecated API WithCompressor and WithDecompressor.
   483  	serverCompression           bool
   484  	unknownHandler              grpc.StreamHandler
   485  	unaryServerInt              grpc.UnaryServerInterceptor
   486  	streamServerInt             grpc.StreamServerInterceptor
   487  	serverInitialWindowSize     int32
   488  	serverInitialConnWindowSize int32
   489  	customServerOptions         []grpc.ServerOption
   490  
   491  	// The following knobs are for the client-side, and should be set after
   492  	// calling newTest() and before calling clientConn().
   493  	maxClientMsgSize        *int
   494  	maxClientReceiveMsgSize *int
   495  	maxClientSendMsgSize    *int
   496  	maxClientHeaderListSize *uint32
   497  	userAgent               string
   498  	// Used to test the deprecated API WithCompressor and WithDecompressor.
   499  	clientCompression bool
   500  	// Used to test the new compressor registration API UseCompressor.
   501  	clientUseCompression bool
   502  	// clientNopCompression is set to create a compressor whose type is not supported.
   503  	clientNopCompression        bool
   504  	unaryClientInt              grpc.UnaryClientInterceptor
   505  	streamClientInt             grpc.StreamClientInterceptor
   506  	sc                          <-chan grpc.ServiceConfig
   507  	customCodec                 encoding.Codec
   508  	clientInitialWindowSize     int32
   509  	clientInitialConnWindowSize int32
   510  	perRPCCreds                 credentials.PerRPCCredentials
   511  	customDialOptions           []grpc.DialOption
   512  	resolverScheme              string
   513  
   514  	// These are are set once startServer is called. The common case is to have
   515  	// only one testServer.
   516  	srv     stopper
   517  	hSrv    healthpb.HealthServer
   518  	srvAddr string
   519  
   520  	// These are are set once startServers is called.
   521  	srvs     []stopper
   522  	hSrvs    []healthpb.HealthServer
   523  	srvAddrs []string
   524  
   525  	cc          *grpc.ClientConn // nil until requested via clientConn
   526  	restoreLogs func()           // nil unless declareLogNoise is used
   527  }
   528  
   529  type stopper interface {
   530  	Stop()
   531  	GracefulStop()
   532  }
   533  
   534  func (te *test) tearDown() {
   535  	if te.cancel != nil {
   536  		te.cancel()
   537  		te.cancel = nil
   538  	}
   539  
   540  	if te.cc != nil {
   541  		_ = te.cc.Close()
   542  		te.cc = nil
   543  	}
   544  
   545  	if te.restoreLogs != nil {
   546  		te.restoreLogs()
   547  		te.restoreLogs = nil
   548  	}
   549  
   550  	if te.srv != nil {
   551  		te.srv.Stop()
   552  	}
   553  	for _, s := range te.srvs {
   554  		s.Stop()
   555  	}
   556  }
   557  
   558  // newTest returns a new test using the provided testing.T and
   559  // environment.  It is returned with default values. Tests should
   560  // modify it before calling its startServer and clientConn methods.
   561  func newTest(t *testing.T, e env) *test {
   562  	te := &test{
   563  		t:         t,
   564  		e:         e,
   565  		maxStream: math.MaxUint32,
   566  	}
   567  	te.ctx, te.cancel = context.WithCancel(context.Background())
   568  	return te
   569  }
   570  
   571  func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network, address string) (net.Listener, error)) net.Listener {
   572  	te.t.Helper()
   573  	te.t.Logf("Running test in %s environment...", te.e.name)
   574  	sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)}
   575  	if te.maxServerMsgSize != nil {
   576  		sopts = append(sopts, grpc.MaxMsgSize(*te.maxServerMsgSize))
   577  	}
   578  	if te.maxServerReceiveMsgSize != nil {
   579  		sopts = append(sopts, grpc.MaxRecvMsgSize(*te.maxServerReceiveMsgSize))
   580  	}
   581  	if te.maxServerSendMsgSize != nil {
   582  		sopts = append(sopts, grpc.MaxSendMsgSize(*te.maxServerSendMsgSize))
   583  	}
   584  	if te.maxServerHeaderListSize != nil {
   585  		sopts = append(sopts, grpc.MaxHeaderListSize(*te.maxServerHeaderListSize))
   586  	}
   587  	if te.tapHandle != nil {
   588  		sopts = append(sopts, grpc.InTapHandle(te.tapHandle))
   589  	}
   590  	if te.serverCompression {
   591  		sopts = append(sopts,
   592  			grpc.RPCCompressor(grpc.NewGZIPCompressor()),
   593  			grpc.RPCDecompressor(grpc.NewGZIPDecompressor()),
   594  		)
   595  	}
   596  	if te.unaryServerInt != nil {
   597  		sopts = append(sopts, grpc.UnaryInterceptor(te.unaryServerInt))
   598  	}
   599  	if te.streamServerInt != nil {
   600  		sopts = append(sopts, grpc.StreamInterceptor(te.streamServerInt))
   601  	}
   602  	if te.unknownHandler != nil {
   603  		sopts = append(sopts, grpc.UnknownServiceHandler(te.unknownHandler))
   604  	}
   605  	if te.serverInitialWindowSize > 0 {
   606  		sopts = append(sopts, grpc.InitialWindowSize(te.serverInitialWindowSize))
   607  	}
   608  	if te.serverInitialConnWindowSize > 0 {
   609  		sopts = append(sopts, grpc.InitialConnWindowSize(te.serverInitialConnWindowSize))
   610  	}
   611  	la := "localhost:0"
   612  	switch te.e.network {
   613  	case "unix":
   614  		la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now().UnixNano())
   615  		_ = syscall.Unlink(la)
   616  	}
   617  	lis, err := listen(te.e.network, la)
   618  	if err != nil {
   619  		te.t.Fatalf("Failed to listen: %v", err)
   620  	}
   621  	if te.e.security == "tls" {
   622  		creds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem"))
   623  		if err != nil {
   624  			te.t.Fatalf("Failed to generate credentials %v", err)
   625  		}
   626  		sopts = append(sopts, grpc.Creds(creds))
   627  	}
   628  	sopts = append(sopts, te.customServerOptions...)
   629  	s := grpc.NewServer(sopts...)
   630  	if ts != nil {
   631  		testpb.RegisterTestServiceServer(s, ts)
   632  	}
   633  
   634  	// Create a new default health server if enableHealthServer is set, or use
   635  	// the provided one.
   636  	hs := te.healthServer
   637  	if te.enableHealthServer {
   638  		hs = health.NewServer()
   639  	}
   640  	if hs != nil {
   641  		healthgrpc.RegisterHealthServer(s, hs)
   642  	}
   643  
   644  	addr := la
   645  	switch te.e.network {
   646  	case "unix":
   647  	default:
   648  		_, port, err := net.SplitHostPort(lis.Addr().String())
   649  		if err != nil {
   650  			te.t.Fatalf("Failed to parse listener address: %v", err)
   651  		}
   652  		addr = "localhost:" + port
   653  	}
   654  
   655  	te.srv = s
   656  	te.hSrv = hs
   657  	te.srvAddr = addr
   658  
   659  	if te.e.httpHandler {
   660  		if te.e.security != "tls" {
   661  			te.t.Fatalf("unsupported environment settings")
   662  		}
   663  		cert, err := tls.LoadX509KeyPair(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem"))
   664  		if err != nil {
   665  			te.t.Fatal("tls.LoadX509KeyPair(server1.pem, server1.key) failed: ", err)
   666  		}
   667  		hs := &http.Server{
   668  			Handler:   s,
   669  			TLSConfig: &tls.Config{Certificates: []tls.Certificate{cert}},
   670  		}
   671  		if err := http2.ConfigureServer(hs, &http2.Server{MaxConcurrentStreams: te.maxStream}); err != nil {
   672  			te.t.Fatal("http2.ConfigureServer(_, _) failed: ", err)
   673  		}
   674  		te.srv = wrapHS{hs}
   675  		tlsListener := tls.NewListener(lis, hs.TLSConfig)
   676  		go func() {
   677  			_ = hs.Serve(tlsListener)
   678  		}()
   679  		return lis
   680  	}
   681  
   682  	go func() {
   683  		_ = s.Serve(lis)
   684  	}()
   685  	return lis
   686  }
   687  
   688  type wrapHS struct {
   689  	s *http.Server
   690  }
   691  
   692  func (w wrapHS) GracefulStop() {
   693  	_ = w.s.Shutdown(context.Background())
   694  }
   695  
   696  func (w wrapHS) Stop() {
   697  	_ = w.s.Close()
   698  }
   699  
   700  func (te *test) startServerWithConnControl(ts testpb.TestServiceServer) *listenerWrapper {
   701  	l := te.listenAndServe(ts, listenWithConnControl)
   702  	return l.(*listenerWrapper)
   703  }
   704  
   705  // startServer starts a gRPC server exposing the provided TestService
   706  // implementation. Callers should defer a call to te.tearDown to clean up
   707  func (te *test) startServer(ts testpb.TestServiceServer) {
   708  	te.t.Helper()
   709  	te.listenAndServe(ts, net.Listen)
   710  }
   711  
   712  // startServers starts 'num' gRPC servers exposing the provided TestService.
   713  func (te *test) startServers(ts testpb.TestServiceServer, num int) {
   714  	for i := 0; i < num; i++ {
   715  		te.startServer(ts)
   716  		te.srvs = append(te.srvs, te.srv.(*grpc.Server))
   717  		te.hSrvs = append(te.hSrvs, te.hSrv)
   718  		te.srvAddrs = append(te.srvAddrs, te.srvAddr)
   719  		te.srv = nil
   720  		te.hSrv = nil
   721  		te.srvAddr = ""
   722  	}
   723  }
   724  
   725  // setHealthServingStatus is a helper function to set the health status.
   726  func (te *test) setHealthServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) {
   727  	hs, ok := te.hSrv.(*health.Server)
   728  	if !ok {
   729  		panic(fmt.Sprintf("SetServingStatus(%v, %v) called for health server of type %T", service, status, hs))
   730  	}
   731  	hs.SetServingStatus(service, status)
   732  }
   733  
   734  type nopCompressor struct {
   735  	grpc.Compressor
   736  }
   737  
   738  // NewNopCompressor creates a compressor to test the case that type is not supported.
   739  func NewNopCompressor() grpc.Compressor {
   740  	return &nopCompressor{grpc.NewGZIPCompressor()}
   741  }
   742  
   743  func (c *nopCompressor) Type() string {
   744  	return "nop"
   745  }
   746  
   747  type nopDecompressor struct {
   748  	grpc.Decompressor
   749  }
   750  
   751  // NewNopDecompressor creates a decompressor to test the case that type is not supported.
   752  func NewNopDecompressor() grpc.Decompressor {
   753  	return &nopDecompressor{grpc.NewGZIPDecompressor()}
   754  }
   755  
   756  func (d *nopDecompressor) Type() string {
   757  	return "nop"
   758  }
   759  
   760  func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string) {
   761  	// grpc.WithDialer is deprecated, use WithContextDialer instead.
   762  	//opts = append(opts, grpc.WithDialer(te.e.dialer), grpc.WithUserAgent(te.userAgent))
   763  	opts = append(opts, grpc.WithContextDialer(
   764  		func(ctx context.Context, addr string) (net.Conn, error) {
   765  			if deadline, ok := ctx.Deadline(); ok {
   766  				return te.e.dialer(addr, time.Until(deadline))
   767  			}
   768  			return te.e.dialer(addr, 0)
   769  		}))
   770  
   771  	if te.sc != nil {
   772  		//goland:noinspection GoDeprecation
   773  		opts = append(opts, grpc.WithServiceConfig(te.sc))
   774  	}
   775  
   776  	if te.clientCompression {
   777  		opts = append(opts,
   778  			grpc.WithCompressor(grpc.NewGZIPCompressor()),
   779  			grpc.WithDecompressor(grpc.NewGZIPDecompressor()),
   780  		)
   781  	}
   782  	if te.clientUseCompression {
   783  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor("gzip")))
   784  	}
   785  	if te.clientNopCompression {
   786  		opts = append(opts,
   787  			grpc.WithCompressor(NewNopCompressor()),
   788  			grpc.WithDecompressor(NewNopDecompressor()),
   789  		)
   790  	}
   791  	if te.unaryClientInt != nil {
   792  		opts = append(opts, grpc.WithUnaryInterceptor(te.unaryClientInt))
   793  	}
   794  	if te.streamClientInt != nil {
   795  		opts = append(opts, grpc.WithStreamInterceptor(te.streamClientInt))
   796  	}
   797  	if te.maxClientMsgSize != nil {
   798  		// grpc.WithMaxMsgSize is deprecated, use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
   799  		//opts = append(opts, grpc.WithMaxMsgSize(*te.maxClientMsgSize))
   800  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(*te.maxClientMsgSize)))
   801  	}
   802  	if te.maxClientReceiveMsgSize != nil {
   803  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(*te.maxClientReceiveMsgSize)))
   804  	}
   805  	if te.maxClientSendMsgSize != nil {
   806  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(*te.maxClientSendMsgSize)))
   807  	}
   808  	if te.maxClientHeaderListSize != nil {
   809  		opts = append(opts, grpc.WithMaxHeaderListSize(*te.maxClientHeaderListSize))
   810  	}
   811  	switch te.e.security {
   812  	case "tls":
   813  		creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "x.test.example.com")
   814  		if err != nil {
   815  			te.t.Fatalf("Failed to load credentials: %v", err)
   816  		}
   817  		opts = append(opts, grpc.WithTransportCredentials(creds))
   818  	case "empty":
   819  		// Don't add any transport creds option.
   820  	default:
   821  		// grpc.WithInsecure is deprecated, use WithTransportCredentials and insecure.NewCredentials() instead.
   822  		//opts = append(opts, grpc.WithInsecure())
   823  		opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
   824  	}
   825  	// TODO(bar) switch balancer case "pick_first".
   826  	var scheme string
   827  	if te.resolverScheme == "" {
   828  		scheme = "passthrough:///"
   829  	} else {
   830  		scheme = te.resolverScheme + ":///"
   831  	}
   832  	if te.e.balancer != "" {
   833  		opts = append(opts, grpc.WithBalancerName(te.e.balancer))
   834  	}
   835  	if te.clientInitialWindowSize > 0 {
   836  		opts = append(opts, grpc.WithInitialWindowSize(te.clientInitialWindowSize))
   837  	}
   838  	if te.clientInitialConnWindowSize > 0 {
   839  		opts = append(opts, grpc.WithInitialConnWindowSize(te.clientInitialConnWindowSize))
   840  	}
   841  	if te.perRPCCreds != nil {
   842  		opts = append(opts, grpc.WithPerRPCCredentials(te.perRPCCreds))
   843  	}
   844  	if te.customCodec != nil {
   845  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.ForceCodec(te.customCodec)))
   846  	}
   847  	if te.srvAddr == "" {
   848  		te.srvAddr = "client.side.only.test"
   849  	}
   850  	opts = append(opts, te.customDialOptions...)
   851  	return opts, scheme
   852  }
   853  
   854  func (te *test) clientConnWithConnControl() (*grpc.ClientConn, *dialerWrapper) {
   855  	if te.cc != nil {
   856  		return te.cc, nil
   857  	}
   858  	opts, scheme := te.configDial()
   859  	dw := &dialerWrapper{}
   860  	// overwrite the dialer before
   861  	// grpc.WithDialer is deprecated, use WithContextDialer instead.
   862  	//opts = append(opts, grpc.WithDialer(dw.dialer))
   863  	opts = append(opts, grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
   864  		return dw.dialer(addr, 0)
   865  	}))
   866  	var err error
   867  	te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...)
   868  	if err != nil {
   869  		te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err)
   870  	}
   871  	return te.cc, dw
   872  }
   873  
   874  func (te *test) clientConn(opts ...grpc.DialOption) *grpc.ClientConn {
   875  	if te.cc != nil {
   876  		return te.cc
   877  	}
   878  	var scheme string
   879  	opts, scheme = te.configDial(opts...)
   880  	var err error
   881  	te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...)
   882  	if err != nil {
   883  		te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err)
   884  	}
   885  	return te.cc
   886  }
   887  
   888  func (te *test) declareLogNoise(phrases ...string) {
   889  	te.restoreLogs = declareLogNoise(te.t, phrases...)
   890  }
   891  
   892  func (te *test) withServerTester(fn func(st *serverTester)) {
   893  	c, err := te.e.dialer(te.srvAddr, 10*time.Second)
   894  	if err != nil {
   895  		te.t.Fatal(err)
   896  	}
   897  	defer func(c net.Conn) {
   898  		_ = c.Close()
   899  	}(c)
   900  	if te.e.security == "tls" {
   901  		c = tls.Client(c, &tls.Config{
   902  			InsecureSkipVerify: true,
   903  			NextProtos:         []string{http2.NextProtoTLS},
   904  		})
   905  	}
   906  	st := newServerTesterFromConn(te.t, c)
   907  	st.greet()
   908  	fn(st)
   909  }
   910  
   911  type lazyConn struct {
   912  	net.Conn
   913  	beLazy int32
   914  }
   915  
   916  func (l *lazyConn) Write(b []byte) (int, error) {
   917  	if atomic.LoadInt32(&(l.beLazy)) == 1 {
   918  		time.Sleep(time.Second)
   919  	}
   920  	return l.Conn.Write(b)
   921  }
   922  
   923  func (s) TestContextDeadlineNotIgnored(t *testing.T) {
   924  	e := noBalancerEnv
   925  	var lc *lazyConn
   926  	e.customDialer = func(network, addr string, timeout time.Duration) (net.Conn, error) {
   927  		conn, err := net.DialTimeout(network, addr, timeout)
   928  		if err != nil {
   929  			return nil, err
   930  		}
   931  		lc = &lazyConn{Conn: conn}
   932  		return lc, nil
   933  	}
   934  
   935  	te := newTest(t, e)
   936  	te.startServer(&testServer{security: e.security})
   937  	defer te.tearDown()
   938  
   939  	cc := te.clientConn()
   940  	tc := testpb.NewTestServiceClient(cc)
   941  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   942  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
   943  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
   944  	}
   945  	cancel()
   946  	atomic.StoreInt32(&(lc.beLazy), 1)
   947  	ctx, cancel = context.WithTimeout(context.Background(), 50*time.Millisecond)
   948  	defer cancel()
   949  	t1 := time.Now()
   950  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
   951  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, context.DeadlineExceeded", err)
   952  	}
   953  	if time.Since(t1) > 2*time.Second {
   954  		t.Fatalf("TestService/EmptyCall(_, _) ran over the deadline")
   955  	}
   956  }
   957  
   958  func (s) TestTimeoutOnDeadServer(t *testing.T) {
   959  	for _, e := range listTestEnv() {
   960  		testTimeoutOnDeadServer(t, e)
   961  	}
   962  }
   963  
   964  func testTimeoutOnDeadServer(t *testing.T, e env) {
   965  	te := newTest(t, e)
   966  	te.userAgent = testAppUA
   967  	te.declareLogNoise(
   968  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
   969  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
   970  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
   971  	)
   972  	te.startServer(&testServer{security: e.security})
   973  	defer te.tearDown()
   974  
   975  	cc := te.clientConn()
   976  	tc := testpb.NewTestServiceClient(cc)
   977  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   978  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
   979  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
   980  	}
   981  	te.srv.Stop()
   982  	cancel()
   983  
   984  	// Wait for the client to notice the connection is gone.
   985  	ctx, cancel = context.WithTimeout(context.Background(), 500*time.Millisecond)
   986  	state := cc.GetState()
   987  	for ; state == connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() {
   988  	}
   989  	cancel()
   990  	if state == connectivity.Ready {
   991  		t.Fatalf("Timed out waiting for non-ready state")
   992  	}
   993  	ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond)
   994  	_, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true))
   995  	cancel()
   996  	if e.balancer != "" && status.Code(err) != codes.DeadlineExceeded {
   997  		// If e.balancer == nil, the ac will stop reconnecting because the dialer returns non-temp error,
   998  		// the error will be an internal error.
   999  		t.Fatalf("TestService/EmptyCall(%v, _) = _, %v, want _, error code: %s", ctx, err, codes.DeadlineExceeded)
  1000  	}
  1001  	awaitNewConnLogOutput()
  1002  }
  1003  
  1004  func (s) TestServerGracefulStopIdempotent(t *testing.T) {
  1005  	for _, e := range listTestEnv() {
  1006  		if e.name == "handler-tls" {
  1007  			continue
  1008  		}
  1009  		testServerGracefulStopIdempotent(t, e)
  1010  	}
  1011  }
  1012  
  1013  func testServerGracefulStopIdempotent(t *testing.T, e env) {
  1014  	te := newTest(t, e)
  1015  	te.userAgent = testAppUA
  1016  	te.startServer(&testServer{security: e.security})
  1017  	defer te.tearDown()
  1018  
  1019  	for i := 0; i < 3; i++ {
  1020  		te.srv.GracefulStop()
  1021  	}
  1022  }
  1023  
  1024  func (s) TestServerGoAway(t *testing.T) {
  1025  	for _, e := range listTestEnv() {
  1026  		if e.name == "handler-tls" {
  1027  			continue
  1028  		}
  1029  		testServerGoAway(t, e)
  1030  	}
  1031  }
  1032  
  1033  func testServerGoAway(t *testing.T, e env) {
  1034  	te := newTest(t, e)
  1035  	te.userAgent = testAppUA
  1036  	te.startServer(&testServer{security: e.security})
  1037  	defer te.tearDown()
  1038  
  1039  	cc := te.clientConn()
  1040  	tc := testpb.NewTestServiceClient(cc)
  1041  	// Finish an RPC to make sure the connection is good.
  1042  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  1043  	defer cancel()
  1044  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1045  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  1046  	}
  1047  	ch := make(chan struct{})
  1048  	go func() {
  1049  		te.srv.GracefulStop()
  1050  		close(ch)
  1051  	}()
  1052  	// Loop until the server side GoAway signal is propagated to the client.
  1053  	for {
  1054  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
  1055  		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) != codes.DeadlineExceeded {
  1056  			cancel()
  1057  			break
  1058  		}
  1059  		cancel()
  1060  	}
  1061  	// A new RPC should fail.
  1062  	ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
  1063  	defer cancel()
  1064  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable && status.Code(err) != codes.Internal {
  1065  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s or %s", err, codes.Unavailable, codes.Internal)
  1066  	}
  1067  	<-ch
  1068  	awaitNewConnLogOutput()
  1069  }
  1070  
  1071  func (s) TestServerGoAwayPendingRPC(t *testing.T) {
  1072  	for _, e := range listTestEnv() {
  1073  		if e.name == "handler-tls" {
  1074  			continue
  1075  		}
  1076  		testServerGoAwayPendingRPC(t, e)
  1077  	}
  1078  }
  1079  
  1080  func testServerGoAwayPendingRPC(t *testing.T, e env) {
  1081  	te := newTest(t, e)
  1082  	te.userAgent = testAppUA
  1083  	te.declareLogNoise(
  1084  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  1085  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  1086  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  1087  	)
  1088  	te.startServer(&testServer{security: e.security})
  1089  	defer te.tearDown()
  1090  
  1091  	cc := te.clientConn()
  1092  	tc := testpb.NewTestServiceClient(cc)
  1093  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  1094  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  1095  	if err != nil {
  1096  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1097  	}
  1098  	// Finish an RPC to make sure the connection is good.
  1099  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1100  		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
  1101  	}
  1102  	ch := make(chan struct{})
  1103  	go func() {
  1104  		te.srv.GracefulStop()
  1105  		close(ch)
  1106  	}()
  1107  	// Loop until the server side GoAway signal is propagated to the client.
  1108  	start := time.Now()
  1109  	errored := false
  1110  	for time.Since(start) < time.Second {
  1111  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
  1112  		_, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true))
  1113  		cancel()
  1114  		if err != nil {
  1115  			errored = true
  1116  			break
  1117  		}
  1118  	}
  1119  	if !errored {
  1120  		t.Fatalf("GoAway never received by client")
  1121  	}
  1122  	respParam := []*testpb.ResponseParameters{{Size: 1}}
  1123  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100))
  1124  	if err != nil {
  1125  		t.Fatal(err)
  1126  	}
  1127  	req := &testpb.StreamingOutputCallRequest{
  1128  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1129  		ResponseParameters: respParam,
  1130  		Payload:            payload,
  1131  	}
  1132  	// The existing RPC should be still good to proceed.
  1133  	if err := stream.Send(req); err != nil {
  1134  		t.Fatalf("%v.Send(_) = %v, want <nil>", stream, err)
  1135  	}
  1136  	if _, err := stream.Recv(); err != nil {
  1137  		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
  1138  	}
  1139  	// The RPC will run until canceled.
  1140  	cancel()
  1141  	<-ch
  1142  	awaitNewConnLogOutput()
  1143  }
  1144  
  1145  func (s) TestServerMultipleGoAwayPendingRPC(t *testing.T) {
  1146  	for _, e := range listTestEnv() {
  1147  		if e.name == "handler-tls" {
  1148  			continue
  1149  		}
  1150  		testServerMultipleGoAwayPendingRPC(t, e)
  1151  	}
  1152  }
  1153  
  1154  func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) {
  1155  	te := newTest(t, e)
  1156  	te.userAgent = testAppUA
  1157  	te.declareLogNoise(
  1158  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  1159  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  1160  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  1161  	)
  1162  	te.startServer(&testServer{security: e.security})
  1163  	defer te.tearDown()
  1164  
  1165  	cc := te.clientConn()
  1166  	tc := testpb.NewTestServiceClient(cc)
  1167  	ctx, cancel := context.WithCancel(context.Background())
  1168  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  1169  	if err != nil {
  1170  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1171  	}
  1172  	// Finish an RPC to make sure the connection is good.
  1173  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1174  		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
  1175  	}
  1176  	ch1 := make(chan struct{})
  1177  	go func() {
  1178  		te.srv.GracefulStop()
  1179  		close(ch1)
  1180  	}()
  1181  	ch2 := make(chan struct{})
  1182  	go func() {
  1183  		te.srv.GracefulStop()
  1184  		close(ch2)
  1185  	}()
  1186  	// Loop until the server side GoAway signal is propagated to the client.
  1187  	for {
  1188  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
  1189  		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1190  			cancel()
  1191  			break
  1192  		}
  1193  		cancel()
  1194  	}
  1195  	select {
  1196  	case <-ch1:
  1197  		t.Fatal("GracefulStop() terminated early")
  1198  	case <-ch2:
  1199  		t.Fatal("GracefulStop() terminated early")
  1200  	default:
  1201  	}
  1202  	respParam := []*testpb.ResponseParameters{
  1203  		{
  1204  			Size: 1,
  1205  		},
  1206  	}
  1207  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100))
  1208  	if err != nil {
  1209  		t.Fatal(err)
  1210  	}
  1211  	req := &testpb.StreamingOutputCallRequest{
  1212  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1213  		ResponseParameters: respParam,
  1214  		Payload:            payload,
  1215  	}
  1216  	// The existing RPC should be still good to proceed.
  1217  	if err := stream.Send(req); err != nil {
  1218  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  1219  	}
  1220  	if _, err := stream.Recv(); err != nil {
  1221  		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
  1222  	}
  1223  	if err := stream.CloseSend(); err != nil {
  1224  		t.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
  1225  	}
  1226  	<-ch1
  1227  	<-ch2
  1228  	cancel()
  1229  	awaitNewConnLogOutput()
  1230  }
  1231  
  1232  func (s) TestConcurrentClientConnCloseAndServerGoAway(t *testing.T) {
  1233  	for _, e := range listTestEnv() {
  1234  		if e.name == "handler-tls" {
  1235  			continue
  1236  		}
  1237  		testConcurrentClientConnCloseAndServerGoAway(t, e)
  1238  	}
  1239  }
  1240  
  1241  func testConcurrentClientConnCloseAndServerGoAway(t *testing.T, e env) {
  1242  	te := newTest(t, e)
  1243  	te.userAgent = testAppUA
  1244  	te.declareLogNoise(
  1245  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  1246  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  1247  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  1248  	)
  1249  	te.startServer(&testServer{security: e.security})
  1250  	defer te.tearDown()
  1251  
  1252  	cc := te.clientConn()
  1253  	tc := testpb.NewTestServiceClient(cc)
  1254  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1255  	defer cancel()
  1256  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1257  		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
  1258  	}
  1259  	ch := make(chan struct{})
  1260  	// Close ClientConn and Server concurrently.
  1261  	go func() {
  1262  		te.srv.GracefulStop()
  1263  		close(ch)
  1264  	}()
  1265  	go func() {
  1266  		_ = cc.Close()
  1267  	}()
  1268  	<-ch
  1269  }
  1270  
  1271  func (s) TestConcurrentServerStopAndGoAway(t *testing.T) {
  1272  	for _, e := range listTestEnv() {
  1273  		if e.name == "handler-tls" {
  1274  			continue
  1275  		}
  1276  		testConcurrentServerStopAndGoAway(t, e)
  1277  	}
  1278  }
  1279  
  1280  func testConcurrentServerStopAndGoAway(t *testing.T, e env) {
  1281  	te := newTest(t, e)
  1282  	te.userAgent = testAppUA
  1283  	te.declareLogNoise(
  1284  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  1285  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  1286  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  1287  	)
  1288  	te.startServer(&testServer{security: e.security})
  1289  	defer te.tearDown()
  1290  
  1291  	cc := te.clientConn()
  1292  	tc := testpb.NewTestServiceClient(cc)
  1293  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1294  	defer cancel()
  1295  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  1296  	if err != nil {
  1297  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1298  	}
  1299  
  1300  	// Finish an RPC to make sure the connection is good.
  1301  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1302  		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
  1303  	}
  1304  
  1305  	ch := make(chan struct{})
  1306  	go func() {
  1307  		te.srv.GracefulStop()
  1308  		close(ch)
  1309  	}()
  1310  	// Loop until the server side GoAway signal is propagated to the client.
  1311  	for {
  1312  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
  1313  		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1314  			cancel()
  1315  			break
  1316  		}
  1317  		cancel()
  1318  	}
  1319  	// Stop the server and close all the connections.
  1320  	te.srv.Stop()
  1321  	respParam := []*testpb.ResponseParameters{
  1322  		{
  1323  			Size: 1,
  1324  		},
  1325  	}
  1326  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100))
  1327  	if err != nil {
  1328  		t.Fatal(err)
  1329  	}
  1330  	req := &testpb.StreamingOutputCallRequest{
  1331  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1332  		ResponseParameters: respParam,
  1333  		Payload:            payload,
  1334  	}
  1335  	sendStart := time.Now()
  1336  	for {
  1337  		if err := stream.Send(req); err == io.EOF {
  1338  			// stream.Send should eventually send io.EOF
  1339  			break
  1340  		} else if err != nil {
  1341  			// Send should never return a transport-level error.
  1342  			t.Fatalf("stream.Send(%v) = %v; want <nil or io.EOF>", req, err)
  1343  		}
  1344  		if time.Since(sendStart) > 2*time.Second {
  1345  			t.Fatalf("stream.Send(_) did not return io.EOF after 2s")
  1346  		}
  1347  		time.Sleep(time.Millisecond)
  1348  	}
  1349  	if _, err := stream.Recv(); err == nil || err == io.EOF {
  1350  		t.Fatalf("%v.Recv() = _, %v, want _, <non-nil, non-EOF>", stream, err)
  1351  	}
  1352  	<-ch
  1353  	awaitNewConnLogOutput()
  1354  }
  1355  
  1356  func (s) TestDetailedConnectionCloseErrorPropagatesToRpcError(t *testing.T) {
  1357  	rpcStartedOnServer := make(chan struct{})
  1358  	rpcDoneOnClient := make(chan struct{})
  1359  	ss := &stubserver.StubServer{
  1360  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  1361  			close(rpcStartedOnServer)
  1362  			<-rpcDoneOnClient
  1363  			return status.Error(codes.Internal, "arbitrary status")
  1364  		},
  1365  	}
  1366  	if err := ss.Start(nil); err != nil {
  1367  		t.Fatalf("Error starting endpoint server: %v", err)
  1368  	}
  1369  	defer ss.Stop()
  1370  
  1371  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1372  	defer cancel()
  1373  	// The precise behavior of this test is subject to raceyness around the timing of when TCP packets
  1374  	// are sent from client to server, and when we tell the server to stop, so we need to account for both
  1375  	// of these possible error messages:
  1376  	// 1) If the call to ss.S.Stop() causes the server's sockets to close while there's still in-fight
  1377  	//    data from the client on the TCP connection, then the kernel can send an RST back to the client (also
  1378  	//    see https://stackoverflow.com/questions/33053507/econnreset-in-send-linux-c). Note that while this
  1379  	//    condition is expected to be rare due to the rpcStartedOnServer synchronization, in theory it should
  1380  	//    be possible, e.g. if the client sends a BDP ping at the right time.
  1381  	// 2) If, for example, the call to ss.S.Stop() happens after the RPC headers have been received at the
  1382  	//    server, then the TCP connection can shutdown gracefully when the server's socket closes.
  1383  	const possibleConnResetMsg = "connection reset by peer"
  1384  	const possibleEOFMsg = "error reading from server: EOF"
  1385  	// Start an RPC. Then, while the RPC is still being accepted or handled at the server, abruptly
  1386  	// stop the server, killing the connection. The RPC error message should include details about the specific
  1387  	// connection error that was encountered.
  1388  	stream, err := ss.Client.FullDuplexCall(ctx)
  1389  	if err != nil {
  1390  		t.Fatalf("%v.FullDuplexCall = _, %v, want _, <nil>", ss.Client, err)
  1391  	}
  1392  	// Block until the RPC has been started on the server. This ensures that the ClientConn will find a healthy
  1393  	// connection for the RPC to go out on initially, and that the TCP connection will shut down strictly after
  1394  	// the RPC has been started on it.
  1395  	<-rpcStartedOnServer
  1396  	ss.S.Stop()
  1397  	if _, err := stream.Recv(); err == nil || (!strings.Contains(err.Error(), possibleConnResetMsg) && !strings.Contains(err.Error(), possibleEOFMsg)) {
  1398  		t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q OR %q", stream, err, possibleConnResetMsg, possibleEOFMsg)
  1399  	}
  1400  	close(rpcDoneOnClient)
  1401  }
  1402  
  1403  func (s) TestDetailedGoawayErrorOnGracefulClosePropagatesToRPCError(t *testing.T) {
  1404  	rpcDoneOnClient := make(chan struct{})
  1405  	ss := &stubserver.StubServer{
  1406  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  1407  			<-rpcDoneOnClient
  1408  			return status.Error(codes.Internal, "arbitrary status")
  1409  		},
  1410  	}
  1411  	sopts := []grpc.ServerOption{
  1412  		grpc.KeepaliveParams(keepalive.ServerParameters{
  1413  			MaxConnectionAge:      time.Millisecond * 100,
  1414  			MaxConnectionAgeGrace: time.Millisecond,
  1415  		}),
  1416  	}
  1417  	if err := ss.Start(sopts); err != nil {
  1418  		t.Fatalf("Error starting endpoint server: %v", err)
  1419  	}
  1420  	defer ss.Stop()
  1421  
  1422  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1423  	defer cancel()
  1424  	stream, err := ss.Client.FullDuplexCall(ctx)
  1425  	if err != nil {
  1426  		t.Fatalf("%v.FullDuplexCall = _, %v, want _, <nil>", ss.Client, err)
  1427  	}
  1428  	const expectedErrorMessageSubstring = "received prior goaway: code: NO_ERROR"
  1429  	_, err = stream.Recv()
  1430  	close(rpcDoneOnClient)
  1431  	if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) {
  1432  		t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q", stream, err, expectedErrorMessageSubstring)
  1433  	}
  1434  }
  1435  
  1436  func (s) TestDetailedGoawayErrorOnAbruptClosePropagatesToRPCError(t *testing.T) {
  1437  	// set the min keepalive time very low so that this test can take
  1438  	// a reasonable amount of time
  1439  	prev := internal.KeepaliveMinPingTime
  1440  	internal.KeepaliveMinPingTime = time.Millisecond
  1441  	defer func() { internal.KeepaliveMinPingTime = prev }()
  1442  
  1443  	rpcDoneOnClient := make(chan struct{})
  1444  	ss := &stubserver.StubServer{
  1445  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  1446  			<-rpcDoneOnClient
  1447  			return status.Error(codes.Internal, "arbitrary status")
  1448  		},
  1449  	}
  1450  	sopts := []grpc.ServerOption{
  1451  		grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
  1452  			MinTime: time.Second * 1000, /* arbitrary, large value */
  1453  		}),
  1454  	}
  1455  	dopts := []grpc.DialOption{
  1456  		grpc.WithKeepaliveParams(keepalive.ClientParameters{
  1457  			Time:                time.Millisecond,   /* should trigger "too many pings" error quickly */
  1458  			Timeout:             time.Second * 1000, /* arbitrary, large value */
  1459  			PermitWithoutStream: false,
  1460  		}),
  1461  	}
  1462  	if err := ss.Start(sopts, dopts...); err != nil {
  1463  		t.Fatalf("Error starting endpoint server: %v", err)
  1464  	}
  1465  	defer ss.Stop()
  1466  
  1467  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1468  	defer cancel()
  1469  	stream, err := ss.Client.FullDuplexCall(ctx)
  1470  	if err != nil {
  1471  		t.Fatalf("%v.FullDuplexCall = _, %v, want _, <nil>", ss.Client, err)
  1472  	}
  1473  	const expectedErrorMessageSubstring = `received prior goaway: code: ENHANCE_YOUR_CALM, debug data: "too_many_pings"`
  1474  	_, err = stream.Recv()
  1475  	close(rpcDoneOnClient)
  1476  	if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) {
  1477  		t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: |%v|", stream, err, expectedErrorMessageSubstring)
  1478  	}
  1479  }
  1480  
  1481  func (s) TestClientConnCloseAfterGoAwayWithActiveStream(t *testing.T) {
  1482  	for _, e := range listTestEnv() {
  1483  		if e.name == "handler-tls" {
  1484  			continue
  1485  		}
  1486  		testClientConnCloseAfterGoAwayWithActiveStream(t, e)
  1487  	}
  1488  }
  1489  
  1490  func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) {
  1491  	te := newTest(t, e)
  1492  	te.startServer(&testServer{security: e.security})
  1493  	defer te.tearDown()
  1494  	cc := te.clientConn()
  1495  	tc := testpb.NewTestServiceClient(cc)
  1496  
  1497  	ctx, cancel := context.WithCancel(context.Background())
  1498  	defer cancel()
  1499  	if _, err := tc.FullDuplexCall(ctx); err != nil {
  1500  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
  1501  	}
  1502  	done := make(chan struct{})
  1503  	go func() {
  1504  		te.srv.GracefulStop()
  1505  		close(done)
  1506  	}()
  1507  	time.Sleep(50 * time.Millisecond)
  1508  	_ = cc.Close()
  1509  	timeout := time.NewTimer(time.Second)
  1510  	select {
  1511  	case <-done:
  1512  	case <-timeout.C:
  1513  		t.Fatalf("Test timed-out.")
  1514  	}
  1515  }
  1516  
  1517  func (s) TestFailFast(t *testing.T) {
  1518  	for _, e := range listTestEnv() {
  1519  		testFailFast(t, e)
  1520  	}
  1521  }
  1522  
  1523  func testFailFast(t *testing.T, e env) {
  1524  	te := newTest(t, e)
  1525  	te.userAgent = testAppUA
  1526  	te.declareLogNoise(
  1527  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  1528  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  1529  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  1530  	)
  1531  	te.startServer(&testServer{security: e.security})
  1532  	defer te.tearDown()
  1533  
  1534  	cc := te.clientConn()
  1535  	tc := testpb.NewTestServiceClient(cc)
  1536  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1537  	defer cancel()
  1538  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  1539  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  1540  	}
  1541  	// Stop the server and tear down all the existing connections.
  1542  	te.srv.Stop()
  1543  	// Loop until the server teardown is propagated to the client.
  1544  	for {
  1545  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1546  		_, err := tc.EmptyCall(ctx, &testpb.Empty{})
  1547  		cancel()
  1548  		if status.Code(err) == codes.Unavailable {
  1549  			break
  1550  		}
  1551  		t.Logf("%v.EmptyCall(_, _) = _, %v", tc, err)
  1552  		time.Sleep(10 * time.Millisecond)
  1553  	}
  1554  	// The client keeps reconnecting and ongoing fail-fast RPCs should fail with code.Unavailable.
  1555  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
  1556  		t.Fatalf("TestService/EmptyCall(_, _, _) = _, %v, want _, error code: %s", err, codes.Unavailable)
  1557  	}
  1558  	if _, err := tc.StreamingInputCall(ctx); status.Code(err) != codes.Unavailable {
  1559  		t.Fatalf("TestService/StreamingInputCall(_) = _, %v, want _, error code: %s", err, codes.Unavailable)
  1560  	}
  1561  
  1562  	awaitNewConnLogOutput()
  1563  }
  1564  
  1565  func testServiceConfigSetup(t *testing.T, e env) *test {
  1566  	te := newTest(t, e)
  1567  	te.userAgent = testAppUA
  1568  	te.declareLogNoise(
  1569  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  1570  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  1571  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  1572  		"Failed to dial : context canceled; please retry.",
  1573  	)
  1574  	return te
  1575  }
  1576  
  1577  func newBool(b bool) (a *bool) {
  1578  	return &b
  1579  }
  1580  
  1581  func newInt(b int) (a *int) {
  1582  	return &b
  1583  }
  1584  
  1585  func newDuration(b time.Duration) (a *time.Duration) {
  1586  	a = new(time.Duration)
  1587  	*a = b
  1588  	return
  1589  }
  1590  
  1591  func (s) TestGetMethodConfig(t *testing.T) {
  1592  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1593  	defer te.tearDown()
  1594  	r := manual.NewBuilderWithScheme("whatever")
  1595  
  1596  	te.resolverScheme = r.Scheme()
  1597  	cc := te.clientConn(grpc.WithResolvers(r))
  1598  	addrs := []resolver.Address{{Addr: te.srvAddr}}
  1599  	r.UpdateState(resolver.State{
  1600  		Addresses: addrs,
  1601  		ServiceConfig: parseCfg(r, `{
  1602      "methodConfig": [
  1603          {
  1604              "name": [
  1605                  {
  1606                      "service": "grpc.testing.TestService",
  1607                      "method": "EmptyCall"
  1608                  }
  1609              ],
  1610              "waitForReady": true,
  1611              "timeout": ".001s"
  1612          },
  1613          {
  1614              "name": [
  1615                  {
  1616                      "service": "grpc.testing.TestService"
  1617                  }
  1618              ],
  1619              "waitForReady": false
  1620          }
  1621      ]
  1622  }`)})
  1623  
  1624  	tc := testpb.NewTestServiceClient(cc)
  1625  
  1626  	// Make sure service config has been processed by grpc.
  1627  	for {
  1628  		if cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil {
  1629  			break
  1630  		}
  1631  		time.Sleep(time.Millisecond)
  1632  	}
  1633  
  1634  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1635  	defer cancel()
  1636  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  1637  	var err error
  1638  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  1639  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1640  	}
  1641  
  1642  	r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseCfg(r, `{
  1643      "methodConfig": [
  1644          {
  1645              "name": [
  1646                  {
  1647                      "service": "grpc.testing.TestService",
  1648                      "method": "UnaryCall"
  1649                  }
  1650              ],
  1651              "waitForReady": true,
  1652              "timeout": ".001s"
  1653          },
  1654          {
  1655              "name": [
  1656                  {
  1657                      "service": "grpc.testing.TestService"
  1658                  }
  1659              ],
  1660              "waitForReady": false
  1661          }
  1662      ]
  1663  }`)})
  1664  
  1665  	// Make sure service config has been processed by grpc.
  1666  	for {
  1667  		if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && !*mc.WaitForReady {
  1668  			break
  1669  		}
  1670  		time.Sleep(time.Millisecond)
  1671  	}
  1672  	// The following RPCs are expected to become fail-fast.
  1673  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
  1674  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable)
  1675  	}
  1676  }
  1677  
  1678  func (s) TestServiceConfigWaitForReady(t *testing.T) {
  1679  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1680  	defer te.tearDown()
  1681  	r := manual.NewBuilderWithScheme("whatever")
  1682  
  1683  	// Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds.
  1684  	te.resolverScheme = r.Scheme()
  1685  	cc := te.clientConn(grpc.WithResolvers(r))
  1686  	addrs := []resolver.Address{{Addr: te.srvAddr}}
  1687  	r.UpdateState(resolver.State{
  1688  		Addresses: addrs,
  1689  		ServiceConfig: parseCfg(r, `{
  1690      "methodConfig": [
  1691          {
  1692              "name": [
  1693                  {
  1694                      "service": "grpc.testing.TestService",
  1695                      "method": "EmptyCall"
  1696                  },
  1697                  {
  1698                      "service": "grpc.testing.TestService",
  1699                      "method": "FullDuplexCall"
  1700                  }
  1701              ],
  1702              "waitForReady": false,
  1703              "timeout": ".001s"
  1704          }
  1705      ]
  1706  }`)})
  1707  
  1708  	tc := testpb.NewTestServiceClient(cc)
  1709  
  1710  	// Make sure service config has been processed by grpc.
  1711  	for {
  1712  		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").WaitForReady != nil {
  1713  			break
  1714  		}
  1715  		time.Sleep(time.Millisecond)
  1716  	}
  1717  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1718  	defer cancel()
  1719  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  1720  	var err error
  1721  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1722  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1723  	}
  1724  	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1725  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1726  	}
  1727  
  1728  	// Generate a service config update.
  1729  	// Case2:Client API set failfast to be false, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds.
  1730  	r.UpdateState(resolver.State{
  1731  		Addresses: addrs,
  1732  		ServiceConfig: parseCfg(r, `{
  1733      "methodConfig": [
  1734          {
  1735              "name": [
  1736                  {
  1737                      "service": "grpc.testing.TestService",
  1738                      "method": "EmptyCall"
  1739                  },
  1740                  {
  1741                      "service": "grpc.testing.TestService",
  1742                      "method": "FullDuplexCall"
  1743                  }
  1744              ],
  1745              "waitForReady": true,
  1746              "timeout": ".001s"
  1747          }
  1748      ]
  1749  }`)})
  1750  
  1751  	// Wait for the new service config to take effect.
  1752  	for {
  1753  		if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && *mc.WaitForReady {
  1754  			break
  1755  		}
  1756  		time.Sleep(time.Millisecond)
  1757  	}
  1758  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  1759  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  1760  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1761  	}
  1762  	if _, err := tc.FullDuplexCall(ctx); status.Code(err) != codes.DeadlineExceeded {
  1763  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1764  	}
  1765  }
  1766  
  1767  func (s) TestServiceConfigTimeout(t *testing.T) {
  1768  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1769  	defer te.tearDown()
  1770  	r := manual.NewBuilderWithScheme("whatever")
  1771  
  1772  	// Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
  1773  	te.resolverScheme = r.Scheme()
  1774  	cc := te.clientConn(grpc.WithResolvers(r))
  1775  	addrs := []resolver.Address{{Addr: te.srvAddr}}
  1776  	r.UpdateState(resolver.State{
  1777  		Addresses: addrs,
  1778  		ServiceConfig: parseCfg(r, `{
  1779      "methodConfig": [
  1780          {
  1781              "name": [
  1782                  {
  1783                      "service": "grpc.testing.TestService",
  1784                      "method": "EmptyCall"
  1785                  },
  1786                  {
  1787                      "service": "grpc.testing.TestService",
  1788                      "method": "FullDuplexCall"
  1789                  }
  1790              ],
  1791              "waitForReady": true,
  1792              "timeout": "3600s"
  1793          }
  1794      ]
  1795  }`)})
  1796  
  1797  	tc := testpb.NewTestServiceClient(cc)
  1798  
  1799  	// Make sure service config has been processed by grpc.
  1800  	for {
  1801  		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil {
  1802  			break
  1803  		}
  1804  		time.Sleep(time.Millisecond)
  1805  	}
  1806  
  1807  	// The following RPCs are expected to become non-fail-fast ones with 1ns deadline.
  1808  	var err error
  1809  	ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
  1810  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1811  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1812  	}
  1813  	cancel()
  1814  
  1815  	ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond)
  1816  	if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1817  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1818  	}
  1819  	cancel()
  1820  
  1821  	// Generate a service config update.
  1822  	// Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
  1823  	r.UpdateState(resolver.State{
  1824  		Addresses: addrs,
  1825  		ServiceConfig: parseCfg(r, `{
  1826      "methodConfig": [
  1827          {
  1828              "name": [
  1829                  {
  1830                      "service": "grpc.testing.TestService",
  1831                      "method": "EmptyCall"
  1832                  },
  1833                  {
  1834                      "service": "grpc.testing.TestService",
  1835                      "method": "FullDuplexCall"
  1836                  }
  1837              ],
  1838              "waitForReady": true,
  1839              "timeout": ".000000001s"
  1840          }
  1841      ]
  1842  }`)})
  1843  
  1844  	// Wait for the new service config to take effect.
  1845  	for {
  1846  		if mc := cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall"); mc.Timeout != nil && *mc.Timeout == time.Nanosecond {
  1847  			break
  1848  		}
  1849  		time.Sleep(time.Millisecond)
  1850  	}
  1851  
  1852  	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
  1853  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1854  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1855  	}
  1856  	cancel()
  1857  
  1858  	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
  1859  	if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1860  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1861  	}
  1862  	cancel()
  1863  }
  1864  
  1865  func (s) TestServiceConfigMaxMsgSize(t *testing.T) {
  1866  	e := tcpClearRREnv
  1867  	r := manual.NewBuilderWithScheme("whatever")
  1868  
  1869  	// Setting up values and objects shared across all test cases.
  1870  	const smallSize = 1
  1871  	const largeSize = 1024
  1872  	const extraLargeSize = 2048
  1873  
  1874  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  1875  	if err != nil {
  1876  		t.Fatal(err)
  1877  	}
  1878  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  1879  	if err != nil {
  1880  		t.Fatal(err)
  1881  	}
  1882  	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
  1883  	if err != nil {
  1884  		t.Fatal(err)
  1885  	}
  1886  
  1887  	// Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  1888  	te1 := testServiceConfigSetup(t, e)
  1889  	defer te1.tearDown()
  1890  
  1891  	te1.resolverScheme = r.Scheme()
  1892  	te1.startServer(&testServer{security: e.security})
  1893  	cc1 := te1.clientConn(grpc.WithResolvers(r))
  1894  
  1895  	addrs := []resolver.Address{{Addr: te1.srvAddr}}
  1896  	sc := parseCfg(r, `{
  1897      "methodConfig": [
  1898          {
  1899              "name": [
  1900                  {
  1901                      "service": "grpc.testing.TestService",
  1902                      "method": "UnaryCall"
  1903                  },
  1904                  {
  1905                      "service": "grpc.testing.TestService",
  1906                      "method": "FullDuplexCall"
  1907                  }
  1908              ],
  1909              "maxRequestMessageBytes": 2048,
  1910              "maxResponseMessageBytes": 2048
  1911          }
  1912      ]
  1913  }`)
  1914  	r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: sc})
  1915  	tc := testpb.NewTestServiceClient(cc1)
  1916  
  1917  	req := &testpb.SimpleRequest{
  1918  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  1919  		ResponseSize: int32(extraLargeSize),
  1920  		Payload:      smallPayload,
  1921  	}
  1922  
  1923  	for {
  1924  		if cc1.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
  1925  			break
  1926  		}
  1927  		time.Sleep(time.Millisecond)
  1928  	}
  1929  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1930  	defer cancel()
  1931  	// Test for unary RPC recv.
  1932  	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted {
  1933  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1934  	}
  1935  
  1936  	// Test for unary RPC send.
  1937  	req.Payload = extraLargePayload
  1938  	req.ResponseSize = int32(smallSize)
  1939  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1940  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1941  	}
  1942  
  1943  	// Test for streaming RPC recv.
  1944  	respParam := []*testpb.ResponseParameters{
  1945  		{
  1946  			Size: int32(extraLargeSize),
  1947  		},
  1948  	}
  1949  	sreq := &testpb.StreamingOutputCallRequest{
  1950  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1951  		ResponseParameters: respParam,
  1952  		Payload:            smallPayload,
  1953  	}
  1954  	stream, err := tc.FullDuplexCall(te1.ctx)
  1955  	if err != nil {
  1956  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1957  	}
  1958  	if err = stream.Send(sreq); err != nil {
  1959  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1960  	}
  1961  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  1962  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  1963  	}
  1964  
  1965  	// Test for streaming RPC send.
  1966  	respParam[0].Size = int32(smallSize)
  1967  	sreq.Payload = extraLargePayload
  1968  	stream, err = tc.FullDuplexCall(te1.ctx)
  1969  	if err != nil {
  1970  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1971  	}
  1972  	if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  1973  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  1974  	}
  1975  
  1976  	// Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  1977  	te2 := testServiceConfigSetup(t, e)
  1978  	te2.resolverScheme = r.Scheme()
  1979  	te2.maxClientReceiveMsgSize = newInt(1024)
  1980  	te2.maxClientSendMsgSize = newInt(1024)
  1981  
  1982  	te2.startServer(&testServer{security: e.security})
  1983  	defer te2.tearDown()
  1984  	cc2 := te2.clientConn(grpc.WithResolvers(r))
  1985  	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te2.srvAddr}}, ServiceConfig: sc})
  1986  	tc = testpb.NewTestServiceClient(cc2)
  1987  
  1988  	for {
  1989  		if cc2.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
  1990  			break
  1991  		}
  1992  		time.Sleep(time.Millisecond)
  1993  	}
  1994  
  1995  	// Test for unary RPC recv.
  1996  	req.Payload = smallPayload
  1997  	req.ResponseSize = int32(largeSize)
  1998  
  1999  	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted {
  2000  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2001  	}
  2002  
  2003  	// Test for unary RPC send.
  2004  	req.Payload = largePayload
  2005  	req.ResponseSize = int32(smallSize)
  2006  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2007  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2008  	}
  2009  
  2010  	// Test for streaming RPC recv.
  2011  	stream, err = tc.FullDuplexCall(te2.ctx)
  2012  	respParam[0].Size = int32(largeSize)
  2013  	sreq.Payload = smallPayload
  2014  	if err != nil {
  2015  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2016  	}
  2017  	if err = stream.Send(sreq); err != nil {
  2018  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2019  	}
  2020  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2021  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2022  	}
  2023  
  2024  	// Test for streaming RPC send.
  2025  	respParam[0].Size = int32(smallSize)
  2026  	sreq.Payload = largePayload
  2027  	stream, err = tc.FullDuplexCall(te2.ctx)
  2028  	if err != nil {
  2029  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2030  	}
  2031  	if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  2032  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  2033  	}
  2034  
  2035  	// Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  2036  	te3 := testServiceConfigSetup(t, e)
  2037  	te3.resolverScheme = r.Scheme()
  2038  	te3.maxClientReceiveMsgSize = newInt(4096)
  2039  	te3.maxClientSendMsgSize = newInt(4096)
  2040  
  2041  	te3.startServer(&testServer{security: e.security})
  2042  	defer te3.tearDown()
  2043  
  2044  	cc3 := te3.clientConn(grpc.WithResolvers(r))
  2045  	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te3.srvAddr}}, ServiceConfig: sc})
  2046  	tc = testpb.NewTestServiceClient(cc3)
  2047  
  2048  	for {
  2049  		if cc3.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
  2050  			break
  2051  		}
  2052  		time.Sleep(time.Millisecond)
  2053  	}
  2054  
  2055  	// Test for unary RPC recv.
  2056  	req.Payload = smallPayload
  2057  	req.ResponseSize = int32(largeSize)
  2058  
  2059  	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err != nil {
  2060  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
  2061  	}
  2062  
  2063  	req.ResponseSize = int32(extraLargeSize)
  2064  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2065  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2066  	}
  2067  
  2068  	// Test for unary RPC send.
  2069  	req.Payload = largePayload
  2070  	req.ResponseSize = int32(smallSize)
  2071  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  2072  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
  2073  	}
  2074  
  2075  	req.Payload = extraLargePayload
  2076  	if _, err = tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2077  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2078  	}
  2079  
  2080  	// Test for streaming RPC recv.
  2081  	stream, err = tc.FullDuplexCall(te3.ctx)
  2082  	if err != nil {
  2083  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2084  	}
  2085  	respParam[0].Size = int32(largeSize)
  2086  	sreq.Payload = smallPayload
  2087  
  2088  	if err = stream.Send(sreq); err != nil {
  2089  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2090  	}
  2091  	if _, err = stream.Recv(); err != nil {
  2092  		t.Fatalf("%v.Recv() = _, %v, want <nil>", stream, err)
  2093  	}
  2094  
  2095  	respParam[0].Size = int32(extraLargeSize)
  2096  
  2097  	if err = stream.Send(sreq); err != nil {
  2098  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2099  	}
  2100  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2101  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2102  	}
  2103  
  2104  	// Test for streaming RPC send.
  2105  	respParam[0].Size = int32(smallSize)
  2106  	sreq.Payload = largePayload
  2107  	stream, err = tc.FullDuplexCall(te3.ctx)
  2108  	if err != nil {
  2109  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2110  	}
  2111  	if err := stream.Send(sreq); err != nil {
  2112  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2113  	}
  2114  	sreq.Payload = extraLargePayload
  2115  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  2116  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  2117  	}
  2118  }
  2119  
  2120  // Reading from a streaming RPC may fail with context canceled if timeout was
  2121  // set by service config (https://github.com/grpc/grpc-go/issues/1818). This
  2122  // test makes sure read from streaming RPC doesn't fail in this case.
  2123  func (s) TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) {
  2124  	te := testServiceConfigSetup(t, tcpClearRREnv)
  2125  	te.startServer(&testServer{security: tcpClearRREnv.security})
  2126  	defer te.tearDown()
  2127  	r := manual.NewBuilderWithScheme("whatever")
  2128  
  2129  	te.resolverScheme = r.Scheme()
  2130  	cc := te.clientConn(grpc.WithResolvers(r))
  2131  	tc := testpb.NewTestServiceClient(cc)
  2132  
  2133  	r.UpdateState(resolver.State{
  2134  		Addresses: []resolver.Address{{Addr: te.srvAddr}},
  2135  		ServiceConfig: parseCfg(r, `{
  2136  	    "methodConfig": [
  2137  	        {
  2138  	            "name": [
  2139  	                {
  2140  	                    "service": "grpc.testing.TestService",
  2141  	                    "method": "FullDuplexCall"
  2142  	                }
  2143  	            ],
  2144  	            "waitForReady": true,
  2145  	            "timeout": "10s"
  2146  	        }
  2147  	    ]
  2148  	}`)})
  2149  	// Make sure service config has been processed by grpc.
  2150  	for {
  2151  		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil {
  2152  			break
  2153  		}
  2154  		time.Sleep(time.Millisecond)
  2155  	}
  2156  
  2157  	ctx, cancel := context.WithCancel(context.Background())
  2158  	defer cancel()
  2159  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  2160  	if err != nil {
  2161  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want <nil>", err)
  2162  	}
  2163  
  2164  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 0)
  2165  	if err != nil {
  2166  		t.Fatalf("failed to newPayload: %v", err)
  2167  	}
  2168  	req := &testpb.StreamingOutputCallRequest{
  2169  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2170  		ResponseParameters: []*testpb.ResponseParameters{{Size: 0}},
  2171  		Payload:            payload,
  2172  	}
  2173  	if err := stream.Send(req); err != nil {
  2174  		t.Fatalf("stream.Send(%v) = %v, want <nil>", req, err)
  2175  	}
  2176  	_ = stream.CloseSend()
  2177  	time.Sleep(time.Second)
  2178  	// Sleep 1 second before recv to make sure the final status is received
  2179  	// before the recv.
  2180  	if _, err := stream.Recv(); err != nil {
  2181  		t.Fatalf("stream.Recv = _, %v, want _, <nil>", err)
  2182  	}
  2183  	// Keep reading to drain the stream.
  2184  	for {
  2185  		if _, err := stream.Recv(); err != nil {
  2186  			break
  2187  		}
  2188  	}
  2189  }
  2190  
  2191  func (s) TestPreloaderClientSend(t *testing.T) {
  2192  	for _, e := range listTestEnv() {
  2193  		testPreloaderClientSend(t, e)
  2194  	}
  2195  }
  2196  
  2197  func testPreloaderClientSend(t *testing.T, e env) {
  2198  	te := newTest(t, e)
  2199  	te.userAgent = testAppUA
  2200  	te.declareLogNoise(
  2201  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  2202  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  2203  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  2204  		"Failed to dial : context canceled; please retry.",
  2205  	)
  2206  	te.startServer(&testServer{security: e.security})
  2207  
  2208  	defer te.tearDown()
  2209  	tc := testpb.NewTestServiceClient(te.clientConn())
  2210  
  2211  	// Test for streaming RPC recv.
  2212  	// Set context for send with proper RPC Information
  2213  	stream, err := tc.FullDuplexCall(te.ctx, grpc.UseCompressor("gzip"))
  2214  	if err != nil {
  2215  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2216  	}
  2217  	var index int
  2218  	for index < len(reqSizes) {
  2219  		respParam := []*testpb.ResponseParameters{
  2220  			{
  2221  				Size: int32(respSizes[index]),
  2222  			},
  2223  		}
  2224  
  2225  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
  2226  		if err != nil {
  2227  			t.Fatal(err)
  2228  		}
  2229  
  2230  		req := &testpb.StreamingOutputCallRequest{
  2231  			ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2232  			ResponseParameters: respParam,
  2233  			Payload:            payload,
  2234  		}
  2235  		preparedMsg := &grpc.PreparedMsg{}
  2236  		err = preparedMsg.Encode(stream, req)
  2237  		if err != nil {
  2238  			t.Fatalf("PrepareMsg failed for size %d : %v", reqSizes[index], err)
  2239  		}
  2240  		if err := stream.SendMsg(preparedMsg); err != nil {
  2241  			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  2242  		}
  2243  		reply, err := stream.Recv()
  2244  		if err != nil {
  2245  			t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  2246  		}
  2247  		pt := reply.GetPayload().GetType()
  2248  		if pt != testpb.PayloadType_COMPRESSABLE {
  2249  			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
  2250  		}
  2251  		size := len(reply.GetPayload().GetBody())
  2252  		if size != respSizes[index] {
  2253  			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
  2254  		}
  2255  		index++
  2256  	}
  2257  	if err := stream.CloseSend(); err != nil {
  2258  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  2259  	}
  2260  	if _, err := stream.Recv(); err != io.EOF {
  2261  		t.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
  2262  	}
  2263  }
  2264  
  2265  func (s) TestPreloaderSenderSend(t *testing.T) {
  2266  	ss := &stubserver.StubServer{
  2267  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  2268  			for i := 0; i < 10; i++ {
  2269  				preparedMsg := &grpc.PreparedMsg{}
  2270  				err := preparedMsg.Encode(stream, &testpb.StreamingOutputCallResponse{
  2271  					Payload: &testpb.Payload{
  2272  						Body: []byte{'0' + uint8(i)},
  2273  					},
  2274  				})
  2275  				if err != nil {
  2276  					return err
  2277  				}
  2278  				_ = stream.SendMsg(preparedMsg)
  2279  			}
  2280  			return nil
  2281  		},
  2282  	}
  2283  	if err := ss.Start(nil); err != nil {
  2284  		t.Fatalf("Error starting endpoint server: %v", err)
  2285  	}
  2286  	defer ss.Stop()
  2287  
  2288  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  2289  	defer cancel()
  2290  
  2291  	stream, err := ss.Client.FullDuplexCall(ctx)
  2292  	if err != nil {
  2293  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  2294  	}
  2295  
  2296  	var ngot int
  2297  	var buf bytes.Buffer
  2298  	for {
  2299  		reply, err := stream.Recv()
  2300  		if err == io.EOF {
  2301  			break
  2302  		}
  2303  		if err != nil {
  2304  			t.Fatal(err)
  2305  		}
  2306  		ngot++
  2307  		if buf.Len() > 0 {
  2308  			buf.WriteByte(',')
  2309  		}
  2310  		buf.Write(reply.GetPayload().GetBody())
  2311  	}
  2312  	if want := 10; ngot != want {
  2313  		t.Errorf("Got %d replies, want %d", ngot, want)
  2314  	}
  2315  	if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want {
  2316  		t.Errorf("Got replies %q; want %q", got, want)
  2317  	}
  2318  }
  2319  
  2320  func (s) TestMaxMsgSizeClientDefault(t *testing.T) {
  2321  	for _, e := range listTestEnv() {
  2322  		testMaxMsgSizeClientDefault(t, e)
  2323  	}
  2324  }
  2325  
  2326  func testMaxMsgSizeClientDefault(t *testing.T, e env) {
  2327  	te := newTest(t, e)
  2328  	te.userAgent = testAppUA
  2329  	te.declareLogNoise(
  2330  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  2331  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  2332  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  2333  		"Failed to dial : context canceled; please retry.",
  2334  	)
  2335  	te.startServer(&testServer{security: e.security})
  2336  
  2337  	defer te.tearDown()
  2338  	tc := testpb.NewTestServiceClient(te.clientConn())
  2339  
  2340  	const smallSize = 1
  2341  	const largeSize = 4 * 1024 * 1024
  2342  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  2343  	if err != nil {
  2344  		t.Fatal(err)
  2345  	}
  2346  	req := &testpb.SimpleRequest{
  2347  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2348  		ResponseSize: int32(largeSize),
  2349  		Payload:      smallPayload,
  2350  	}
  2351  
  2352  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2353  	defer cancel()
  2354  	// Test for unary RPC recv.
  2355  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2356  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2357  	}
  2358  
  2359  	respParam := []*testpb.ResponseParameters{
  2360  		{
  2361  			Size: int32(largeSize),
  2362  		},
  2363  	}
  2364  	sreq := &testpb.StreamingOutputCallRequest{
  2365  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2366  		ResponseParameters: respParam,
  2367  		Payload:            smallPayload,
  2368  	}
  2369  
  2370  	// Test for streaming RPC recv.
  2371  	stream, err := tc.FullDuplexCall(te.ctx)
  2372  	if err != nil {
  2373  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2374  	}
  2375  	if err := stream.Send(sreq); err != nil {
  2376  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2377  	}
  2378  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2379  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2380  	}
  2381  }
  2382  
  2383  func (s) TestMaxMsgSizeClientAPI(t *testing.T) {
  2384  	for _, e := range listTestEnv() {
  2385  		testMaxMsgSizeClientAPI(t, e)
  2386  	}
  2387  }
  2388  
  2389  func testMaxMsgSizeClientAPI(t *testing.T, e env) {
  2390  	te := newTest(t, e)
  2391  	te.userAgent = testAppUA
  2392  	// To avoid error on server side.
  2393  	te.maxServerSendMsgSize = newInt(5 * 1024 * 1024)
  2394  	te.maxClientReceiveMsgSize = newInt(1024)
  2395  	te.maxClientSendMsgSize = newInt(1024)
  2396  	te.declareLogNoise(
  2397  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  2398  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  2399  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  2400  		"Failed to dial : context canceled; please retry.",
  2401  	)
  2402  	te.startServer(&testServer{security: e.security})
  2403  
  2404  	defer te.tearDown()
  2405  	tc := testpb.NewTestServiceClient(te.clientConn())
  2406  
  2407  	const smallSize = 1
  2408  	const largeSize = 1024
  2409  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  2410  	if err != nil {
  2411  		t.Fatal(err)
  2412  	}
  2413  
  2414  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  2415  	if err != nil {
  2416  		t.Fatal(err)
  2417  	}
  2418  	req := &testpb.SimpleRequest{
  2419  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2420  		ResponseSize: int32(largeSize),
  2421  		Payload:      smallPayload,
  2422  	}
  2423  
  2424  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2425  	defer cancel()
  2426  	// Test for unary RPC recv.
  2427  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2428  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2429  	}
  2430  
  2431  	// Test for unary RPC send.
  2432  	req.Payload = largePayload
  2433  	req.ResponseSize = int32(smallSize)
  2434  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2435  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2436  	}
  2437  
  2438  	respParam := []*testpb.ResponseParameters{
  2439  		{
  2440  			Size: int32(largeSize),
  2441  		},
  2442  	}
  2443  	sreq := &testpb.StreamingOutputCallRequest{
  2444  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2445  		ResponseParameters: respParam,
  2446  		Payload:            smallPayload,
  2447  	}
  2448  
  2449  	// Test for streaming RPC recv.
  2450  	stream, err := tc.FullDuplexCall(te.ctx)
  2451  	if err != nil {
  2452  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2453  	}
  2454  	if err := stream.Send(sreq); err != nil {
  2455  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2456  	}
  2457  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2458  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2459  	}
  2460  
  2461  	// Test for streaming RPC send.
  2462  	respParam[0].Size = int32(smallSize)
  2463  	sreq.Payload = largePayload
  2464  	stream, err = tc.FullDuplexCall(te.ctx)
  2465  	if err != nil {
  2466  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2467  	}
  2468  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  2469  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  2470  	}
  2471  }
  2472  
  2473  func (s) TestMaxMsgSizeServerAPI(t *testing.T) {
  2474  	for _, e := range listTestEnv() {
  2475  		testMaxMsgSizeServerAPI(t, e)
  2476  	}
  2477  }
  2478  
  2479  func testMaxMsgSizeServerAPI(t *testing.T, e env) {
  2480  	te := newTest(t, e)
  2481  	te.userAgent = testAppUA
  2482  	te.maxServerReceiveMsgSize = newInt(1024)
  2483  	te.maxServerSendMsgSize = newInt(1024)
  2484  	te.declareLogNoise(
  2485  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  2486  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  2487  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  2488  		"Failed to dial : context canceled; please retry.",
  2489  	)
  2490  	te.startServer(&testServer{security: e.security})
  2491  
  2492  	defer te.tearDown()
  2493  	tc := testpb.NewTestServiceClient(te.clientConn())
  2494  
  2495  	const smallSize = 1
  2496  	const largeSize = 1024
  2497  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  2498  	if err != nil {
  2499  		t.Fatal(err)
  2500  	}
  2501  
  2502  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  2503  	if err != nil {
  2504  		t.Fatal(err)
  2505  	}
  2506  	req := &testpb.SimpleRequest{
  2507  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2508  		ResponseSize: int32(largeSize),
  2509  		Payload:      smallPayload,
  2510  	}
  2511  
  2512  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2513  	defer cancel()
  2514  	// Test for unary RPC send.
  2515  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2516  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2517  	}
  2518  
  2519  	// Test for unary RPC recv.
  2520  	req.Payload = largePayload
  2521  	req.ResponseSize = int32(smallSize)
  2522  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2523  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2524  	}
  2525  
  2526  	respParam := []*testpb.ResponseParameters{
  2527  		{
  2528  			Size: int32(largeSize),
  2529  		},
  2530  	}
  2531  	sreq := &testpb.StreamingOutputCallRequest{
  2532  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2533  		ResponseParameters: respParam,
  2534  		Payload:            smallPayload,
  2535  	}
  2536  
  2537  	// Test for streaming RPC send.
  2538  	stream, err := tc.FullDuplexCall(te.ctx)
  2539  	if err != nil {
  2540  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2541  	}
  2542  	if err := stream.Send(sreq); err != nil {
  2543  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2544  	}
  2545  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2546  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2547  	}
  2548  
  2549  	// Test for streaming RPC recv.
  2550  	respParam[0].Size = int32(smallSize)
  2551  	sreq.Payload = largePayload
  2552  	stream, err = tc.FullDuplexCall(te.ctx)
  2553  	if err != nil {
  2554  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2555  	}
  2556  	if err := stream.Send(sreq); err != nil {
  2557  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2558  	}
  2559  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2560  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2561  	}
  2562  }
  2563  
  2564  func (s) TestTap(t *testing.T) {
  2565  	for _, e := range listTestEnv() {
  2566  		if e.name == "handler-tls" {
  2567  			continue
  2568  		}
  2569  		testTap(t, e)
  2570  	}
  2571  }
  2572  
  2573  type myTap struct {
  2574  	cnt int
  2575  }
  2576  
  2577  func (t *myTap) handle(ctx context.Context, info *tap.Info) (context.Context, error) {
  2578  	if info != nil {
  2579  		switch info.FullMethodName {
  2580  		case "/grpc.testing.TestService/EmptyCall":
  2581  			t.cnt++
  2582  		case "/grpc.testing.TestService/UnaryCall":
  2583  			return nil, fmt.Errorf("tap error")
  2584  		case "/grpc.testing.TestService/FullDuplexCall":
  2585  			return nil, status.Errorf(codes.FailedPrecondition, "test custom error")
  2586  		}
  2587  	}
  2588  	return ctx, nil
  2589  }
  2590  
  2591  func testTap(t *testing.T, e env) {
  2592  	te := newTest(t, e)
  2593  	te.userAgent = testAppUA
  2594  	ttap := &myTap{}
  2595  	te.tapHandle = ttap.handle
  2596  	te.declareLogNoise(
  2597  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  2598  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  2599  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  2600  	)
  2601  	te.startServer(&testServer{security: e.security})
  2602  	defer te.tearDown()
  2603  
  2604  	cc := te.clientConn()
  2605  	tc := testpb.NewTestServiceClient(cc)
  2606  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2607  	defer cancel()
  2608  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  2609  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  2610  	}
  2611  	if ttap.cnt != 1 {
  2612  		t.Fatalf("Get the count in ttap %d, want 1", ttap.cnt)
  2613  	}
  2614  
  2615  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 31)
  2616  	if err != nil {
  2617  		t.Fatal(err)
  2618  	}
  2619  
  2620  	req := &testpb.SimpleRequest{
  2621  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2622  		ResponseSize: 45,
  2623  		Payload:      payload,
  2624  	}
  2625  	if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.PermissionDenied {
  2626  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.PermissionDenied)
  2627  	}
  2628  	str, err := tc.FullDuplexCall(ctx)
  2629  	if err != nil {
  2630  		t.Fatalf("Unexpected error creating stream: %v", err)
  2631  	}
  2632  	if _, err := str.Recv(); status.Code(err) != codes.FailedPrecondition {
  2633  		t.Fatalf("FullDuplexCall Recv() = _, %v, want _, %s", err, codes.FailedPrecondition)
  2634  	}
  2635  }
  2636  
  2637  // healthCheck is a helper function to make a unary health check RPC and return
  2638  // the response.
  2639  func healthCheck(d time.Duration, cc *grpc.ClientConn, service string) (*healthpb.HealthCheckResponse, error) {
  2640  	ctx, cancel := context.WithTimeout(context.Background(), d)
  2641  	defer cancel()
  2642  	hc := healthgrpc.NewHealthClient(cc)
  2643  	return hc.Check(ctx, &healthpb.HealthCheckRequest{Service: service})
  2644  }
  2645  
  2646  // verifyHealthCheckStatus is a helper function to verify that the current
  2647  // health status of the service matches the one passed in 'wantStatus'.
  2648  func verifyHealthCheckStatus(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantStatus healthpb.HealthCheckResponse_ServingStatus) {
  2649  	t.Helper()
  2650  	resp, err := healthCheck(d, cc, service)
  2651  	if err != nil {
  2652  		t.Fatalf("Health/Check(_, _) = _, %v, want _, <nil>", err)
  2653  	}
  2654  	if resp.Status != wantStatus {
  2655  		t.Fatalf("Got the serving status %v, want %v", resp.Status, wantStatus)
  2656  	}
  2657  }
  2658  
  2659  // verifyHealthCheckErrCode is a helper function to verify that a unary health
  2660  // check RPC returns an error with a code set to 'wantCode'.
  2661  func verifyHealthCheckErrCode(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantCode codes.Code) {
  2662  	t.Helper()
  2663  	if _, err := healthCheck(d, cc, service); status.Code(err) != wantCode {
  2664  		t.Fatalf("Health/Check() got errCode %v, want %v", status.Code(err), wantCode)
  2665  	}
  2666  }
  2667  
  2668  // newHealthCheckStream is a helper function to start a health check streaming
  2669  // RPC, and returns the stream.
  2670  func newHealthCheckStream(t *testing.T, cc *grpc.ClientConn, service string) (healthgrpc.Health_WatchClient, context.CancelFunc) {
  2671  	t.Helper()
  2672  	ctx, cancel := context.WithCancel(context.Background())
  2673  	hc := healthgrpc.NewHealthClient(cc)
  2674  	stream, err := hc.Watch(ctx, &healthpb.HealthCheckRequest{Service: service})
  2675  	if err != nil {
  2676  		t.Fatalf("hc.Watch(_, %v) failed: %v", service, err)
  2677  	}
  2678  	return stream, cancel
  2679  }
  2680  
  2681  // healthWatchChecker is a helper function to verify that the next health
  2682  // status returned on the given stream matches the one passed in 'wantStatus'.
  2683  func healthWatchChecker(t *testing.T, stream healthgrpc.Health_WatchClient, wantStatus healthpb.HealthCheckResponse_ServingStatus) {
  2684  	t.Helper()
  2685  	response, err := stream.Recv()
  2686  	if err != nil {
  2687  		t.Fatalf("stream.Recv() failed: %v", err)
  2688  	}
  2689  	if response.Status != wantStatus {
  2690  		t.Fatalf("got servingStatus %v, want %v", response.Status, wantStatus)
  2691  	}
  2692  }
  2693  
  2694  // TestHealthCheckSuccess invokes the unary Check() RPC on the health server in
  2695  // a successful case.
  2696  func (s) TestHealthCheckSuccess(t *testing.T) {
  2697  	for _, e := range listTestEnv() {
  2698  		testHealthCheckSuccess(t, e)
  2699  	}
  2700  }
  2701  
  2702  func testHealthCheckSuccess(t *testing.T, e env) {
  2703  	te := newTest(t, e)
  2704  	te.enableHealthServer = true
  2705  	te.startServer(&testServer{security: e.security})
  2706  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2707  	defer te.tearDown()
  2708  
  2709  	verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.OK)
  2710  }
  2711  
  2712  // TestHealthCheckFailure invokes the unary Check() RPC on the health server
  2713  // with an expired context and expects the RPC to fail.
  2714  func (s) TestHealthCheckFailure(t *testing.T) {
  2715  	for _, e := range listTestEnv() {
  2716  		testHealthCheckFailure(t, e)
  2717  	}
  2718  }
  2719  
  2720  func testHealthCheckFailure(t *testing.T, e env) {
  2721  	te := newTest(t, e)
  2722  	te.declareLogNoise(
  2723  		"Failed to dial ",
  2724  		"grpc: the client connection is closing; please retry",
  2725  	)
  2726  	te.enableHealthServer = true
  2727  	te.startServer(&testServer{security: e.security})
  2728  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2729  	defer te.tearDown()
  2730  
  2731  	verifyHealthCheckErrCode(t, 0*time.Second, te.clientConn(), defaultHealthService, codes.DeadlineExceeded)
  2732  	awaitNewConnLogOutput()
  2733  }
  2734  
  2735  // TestHealthCheckOff makes a unary Check() RPC on the health server where the
  2736  // health status of the defaultHealthService is not set, and therefore expects
  2737  // an error code 'codes.NotFound'.
  2738  func (s) TestHealthCheckOff(t *testing.T) {
  2739  	for _, e := range listTestEnv() {
  2740  		// TODO(bradfitz): Temporarily skip this env due to #619.
  2741  		if e.name == "handler-tls" {
  2742  			continue
  2743  		}
  2744  		testHealthCheckOff(t, e)
  2745  	}
  2746  }
  2747  
  2748  func testHealthCheckOff(t *testing.T, e env) {
  2749  	te := newTest(t, e)
  2750  	te.enableHealthServer = true
  2751  	te.startServer(&testServer{security: e.security})
  2752  	defer te.tearDown()
  2753  
  2754  	verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.NotFound)
  2755  }
  2756  
  2757  // TestHealthWatchMultipleClients makes a streaming Watch() RPC on the health
  2758  // server with multiple clients and expects the same status on both streams.
  2759  func (s) TestHealthWatchMultipleClients(t *testing.T) {
  2760  	for _, e := range listTestEnv() {
  2761  		testHealthWatchMultipleClients(t, e)
  2762  	}
  2763  }
  2764  
  2765  func testHealthWatchMultipleClients(t *testing.T, e env) {
  2766  	te := newTest(t, e)
  2767  	te.enableHealthServer = true
  2768  	te.startServer(&testServer{security: e.security})
  2769  	defer te.tearDown()
  2770  
  2771  	cc := te.clientConn()
  2772  	stream1, cf1 := newHealthCheckStream(t, cc, defaultHealthService)
  2773  	defer cf1()
  2774  	healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
  2775  
  2776  	stream2, cf2 := newHealthCheckStream(t, cc, defaultHealthService)
  2777  	defer cf2()
  2778  	healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
  2779  
  2780  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
  2781  	healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_NOT_SERVING)
  2782  	healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_NOT_SERVING)
  2783  }
  2784  
  2785  // TestHealthWatchSameStatusmakes a streaming Watch() RPC on the health server
  2786  // and makes sure that the health status of the server is as expected after
  2787  // multiple calls to SetServingStatus with the same status.
  2788  func (s) TestHealthWatchSameStatus(t *testing.T) {
  2789  	for _, e := range listTestEnv() {
  2790  		testHealthWatchSameStatus(t, e)
  2791  	}
  2792  }
  2793  
  2794  func testHealthWatchSameStatus(t *testing.T, e env) {
  2795  	te := newTest(t, e)
  2796  	te.enableHealthServer = true
  2797  	te.startServer(&testServer{security: e.security})
  2798  	defer te.tearDown()
  2799  
  2800  	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
  2801  	defer cf()
  2802  
  2803  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
  2804  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2805  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
  2806  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2807  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
  2808  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING)
  2809  }
  2810  
  2811  // TestHealthWatchServiceStatusSetBeforeStartingServer starts a health server
  2812  // on which the health status for the defaultService is set before the gRPC
  2813  // server is started, and expects the correct health status to be returned.
  2814  func (s) TestHealthWatchServiceStatusSetBeforeStartingServer(t *testing.T) {
  2815  	for _, e := range listTestEnv() {
  2816  		testHealthWatchSetServiceStatusBeforeStartingServer(t, e)
  2817  	}
  2818  }
  2819  
  2820  func testHealthWatchSetServiceStatusBeforeStartingServer(t *testing.T, e env) {
  2821  	hs := health.NewServer()
  2822  	te := newTest(t, e)
  2823  	te.healthServer = hs
  2824  	hs.SetServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2825  	te.startServer(&testServer{security: e.security})
  2826  	defer te.tearDown()
  2827  
  2828  	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
  2829  	defer cf()
  2830  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
  2831  }
  2832  
  2833  // TestHealthWatchDefaultStatusChange verifies the simple case where the
  2834  // service starts off with a SERVICE_UNKNOWN status (because SetServingStatus
  2835  // hasn't been called yet) and then moves to SERVING after SetServingStatus is
  2836  // called.
  2837  func (s) TestHealthWatchDefaultStatusChange(t *testing.T) {
  2838  	for _, e := range listTestEnv() {
  2839  		testHealthWatchDefaultStatusChange(t, e)
  2840  	}
  2841  }
  2842  
  2843  func testHealthWatchDefaultStatusChange(t *testing.T, e env) {
  2844  	te := newTest(t, e)
  2845  	te.enableHealthServer = true
  2846  	te.startServer(&testServer{security: e.security})
  2847  	defer te.tearDown()
  2848  
  2849  	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
  2850  	defer cf()
  2851  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
  2852  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2853  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
  2854  }
  2855  
  2856  // TestHealthWatchSetServiceStatusBeforeClientCallsWatch verifies the case
  2857  // where the health status is set to SERVING before the client calls Watch().
  2858  func (s) TestHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T) {
  2859  	for _, e := range listTestEnv() {
  2860  		testHealthWatchSetServiceStatusBeforeClientCallsWatch(t, e)
  2861  	}
  2862  }
  2863  
  2864  func testHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T, e env) {
  2865  	te := newTest(t, e)
  2866  	te.enableHealthServer = true
  2867  	te.startServer(&testServer{security: e.security})
  2868  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2869  	defer te.tearDown()
  2870  
  2871  	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
  2872  	defer cf()
  2873  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
  2874  }
  2875  
  2876  // TestHealthWatchOverallServerHealthChange verifies setting the overall status
  2877  // of the server by using the empty service name.
  2878  func (s) TestHealthWatchOverallServerHealthChange(t *testing.T) {
  2879  	for _, e := range listTestEnv() {
  2880  		testHealthWatchOverallServerHealthChange(t, e)
  2881  	}
  2882  }
  2883  
  2884  func testHealthWatchOverallServerHealthChange(t *testing.T, e env) {
  2885  	te := newTest(t, e)
  2886  	te.enableHealthServer = true
  2887  	te.startServer(&testServer{security: e.security})
  2888  	defer te.tearDown()
  2889  
  2890  	stream, cf := newHealthCheckStream(t, te.clientConn(), "")
  2891  	defer cf()
  2892  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
  2893  	te.setHealthServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING)
  2894  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING)
  2895  }
  2896  
  2897  // TestUnknownHandler verifies that an expected error is returned (by setting
  2898  // the unknownHandler on the server) for a service which is not exposed to the
  2899  // client.
  2900  func (s) TestUnknownHandler(t *testing.T) {
  2901  	// An example unknownHandler that returns a different code and a different
  2902  	// method, making sure that we do not expose what methods are implemented to
  2903  	// a client that is not authenticated.
  2904  	unknownHandler := func(srv interface{}, stream grpc.ServerStream) error {
  2905  		return status.Error(codes.Unauthenticated, "user unauthenticated")
  2906  	}
  2907  	for _, e := range listTestEnv() {
  2908  		// TODO(bradfitz): Temporarily skip this env due to #619.
  2909  		if e.name == "handler-tls" {
  2910  			continue
  2911  		}
  2912  		testUnknownHandler(t, e, unknownHandler)
  2913  	}
  2914  }
  2915  
  2916  func testUnknownHandler(t *testing.T, e env, unknownHandler grpc.StreamHandler) {
  2917  	te := newTest(t, e)
  2918  	te.unknownHandler = unknownHandler
  2919  	te.startServer(&testServer{security: e.security})
  2920  	defer te.tearDown()
  2921  	verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), "", codes.Unauthenticated)
  2922  }
  2923  
  2924  // TestHealthCheckServingStatus makes a streaming Watch() RPC on the health
  2925  // server and verifies a bunch of health status transitions.
  2926  func (s) TestHealthCheckServingStatus(t *testing.T) {
  2927  	for _, e := range listTestEnv() {
  2928  		testHealthCheckServingStatus(t, e)
  2929  	}
  2930  }
  2931  
  2932  func testHealthCheckServingStatus(t *testing.T, e env) {
  2933  	te := newTest(t, e)
  2934  	te.enableHealthServer = true
  2935  	te.startServer(&testServer{security: e.security})
  2936  	defer te.tearDown()
  2937  
  2938  	cc := te.clientConn()
  2939  	verifyHealthCheckStatus(t, 1*time.Second, cc, "", healthpb.HealthCheckResponse_SERVING)
  2940  	verifyHealthCheckErrCode(t, 1*time.Second, cc, defaultHealthService, codes.NotFound)
  2941  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2942  	verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2943  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
  2944  	verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
  2945  }
  2946  
  2947  func (s) TestEmptyUnaryWithUserAgent(t *testing.T) {
  2948  	for _, e := range listTestEnv() {
  2949  		testEmptyUnaryWithUserAgent(t, e)
  2950  	}
  2951  }
  2952  
  2953  func testEmptyUnaryWithUserAgent(t *testing.T, e env) {
  2954  	te := newTest(t, e)
  2955  	te.userAgent = testAppUA
  2956  	te.startServer(&testServer{security: e.security})
  2957  	defer te.tearDown()
  2958  
  2959  	cc := te.clientConn()
  2960  	tc := testpb.NewTestServiceClient(cc)
  2961  	var header metadata.MD
  2962  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2963  	defer cancel()
  2964  	reply, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Header(&header))
  2965  	if err != nil || !proto.Equal(&testpb.Empty{}, reply) {
  2966  		t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{})
  2967  	}
  2968  	if v, ok := header["ua"]; !ok || !strings.HasPrefix(v[0], testAppUA) {
  2969  		t.Fatalf("header[\"ua\"] = %q, %t, want string with prefix %q, true", v, ok, testAppUA)
  2970  	}
  2971  
  2972  	te.srv.Stop()
  2973  }
  2974  
  2975  func (s) TestFailedEmptyUnary(t *testing.T) {
  2976  	for _, e := range listTestEnv() {
  2977  		if e.name == "handler-tls" {
  2978  			// This test covers status details, but
  2979  			// Grpc-Status-Details-Bin is not support in handler_server.
  2980  			continue
  2981  		}
  2982  		testFailedEmptyUnary(t, e)
  2983  	}
  2984  }
  2985  
  2986  func testFailedEmptyUnary(t *testing.T, e env) {
  2987  	te := newTest(t, e)
  2988  	te.userAgent = failAppUA
  2989  	te.startServer(&testServer{security: e.security})
  2990  	defer te.tearDown()
  2991  	tc := testpb.NewTestServiceClient(te.clientConn())
  2992  
  2993  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  2994  	wantErr := detailedError
  2995  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !testutils.StatusErrEqual(err, wantErr) {
  2996  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr)
  2997  	}
  2998  }
  2999  
  3000  func (s) TestLargeUnary(t *testing.T) {
  3001  	for _, e := range listTestEnv() {
  3002  		testLargeUnary(t, e)
  3003  	}
  3004  }
  3005  
  3006  func testLargeUnary(t *testing.T, e env) {
  3007  	te := newTest(t, e)
  3008  	te.startServer(&testServer{security: e.security})
  3009  	defer te.tearDown()
  3010  	tc := testpb.NewTestServiceClient(te.clientConn())
  3011  
  3012  	const argSize = 271828
  3013  	const respSize = 314159
  3014  
  3015  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3016  	if err != nil {
  3017  		t.Fatal(err)
  3018  	}
  3019  
  3020  	req := &testpb.SimpleRequest{
  3021  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3022  		ResponseSize: respSize,
  3023  		Payload:      payload,
  3024  	}
  3025  
  3026  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3027  	defer cancel()
  3028  	reply, err := tc.UnaryCall(ctx, req)
  3029  	if err != nil {
  3030  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
  3031  	}
  3032  	pt := reply.GetPayload().GetType()
  3033  	ps := len(reply.GetPayload().GetBody())
  3034  	if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize {
  3035  		t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize)
  3036  	}
  3037  }
  3038  
  3039  // Test backward-compatibility API for setting msg size limit.
  3040  func (s) TestExceedMsgLimit(t *testing.T) {
  3041  	for _, e := range listTestEnv() {
  3042  		testExceedMsgLimit(t, e)
  3043  	}
  3044  }
  3045  
  3046  func testExceedMsgLimit(t *testing.T, e env) {
  3047  	te := newTest(t, e)
  3048  	maxMsgSize := 1024
  3049  	te.maxServerMsgSize, te.maxClientMsgSize = newInt(maxMsgSize), newInt(maxMsgSize)
  3050  	te.startServer(&testServer{security: e.security})
  3051  	defer te.tearDown()
  3052  	tc := testpb.NewTestServiceClient(te.clientConn())
  3053  
  3054  	largeSize := int32(maxMsgSize + 1)
  3055  	const smallSize = 1
  3056  
  3057  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  3058  	if err != nil {
  3059  		t.Fatal(err)
  3060  	}
  3061  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  3062  	if err != nil {
  3063  		t.Fatal(err)
  3064  	}
  3065  
  3066  	// Make sure the server cannot receive a unary RPC of largeSize.
  3067  	req := &testpb.SimpleRequest{
  3068  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3069  		ResponseSize: smallSize,
  3070  		Payload:      largePayload,
  3071  	}
  3072  
  3073  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3074  	defer cancel()
  3075  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  3076  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  3077  	}
  3078  	// Make sure the client cannot receive a unary RPC of largeSize.
  3079  	req.ResponseSize = largeSize
  3080  	req.Payload = smallPayload
  3081  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  3082  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  3083  	}
  3084  
  3085  	// Make sure the server cannot receive a streaming RPC of largeSize.
  3086  	stream, err := tc.FullDuplexCall(te.ctx)
  3087  	if err != nil {
  3088  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3089  	}
  3090  	respParam := []*testpb.ResponseParameters{
  3091  		{
  3092  			Size: 1,
  3093  		},
  3094  	}
  3095  
  3096  	sreq := &testpb.StreamingOutputCallRequest{
  3097  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3098  		ResponseParameters: respParam,
  3099  		Payload:            largePayload,
  3100  	}
  3101  	if err := stream.Send(sreq); err != nil {
  3102  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  3103  	}
  3104  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  3105  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  3106  	}
  3107  
  3108  	// Test on client side for streaming RPC.
  3109  	stream, err = tc.FullDuplexCall(te.ctx)
  3110  	if err != nil {
  3111  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3112  	}
  3113  	respParam[0].Size = largeSize
  3114  	sreq.Payload = smallPayload
  3115  	if err := stream.Send(sreq); err != nil {
  3116  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  3117  	}
  3118  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  3119  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  3120  	}
  3121  }
  3122  
  3123  func (s) TestPeerClientSide(t *testing.T) {
  3124  	for _, e := range listTestEnv() {
  3125  		testPeerClientSide(t, e)
  3126  	}
  3127  }
  3128  
  3129  func testPeerClientSide(t *testing.T, e env) {
  3130  	te := newTest(t, e)
  3131  	te.userAgent = testAppUA
  3132  	te.startServer(&testServer{security: e.security})
  3133  	defer te.tearDown()
  3134  	tc := testpb.NewTestServiceClient(te.clientConn())
  3135  	pr := new(peer.Peer)
  3136  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3137  	defer cancel()
  3138  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(pr), grpc.WaitForReady(true)); err != nil {
  3139  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  3140  	}
  3141  	pa := pr.Addr.String()
  3142  	if e.network == "unix" {
  3143  		if pa != te.srvAddr {
  3144  			t.Fatalf("pr.Addr = %v, want %v", pa, te.srvAddr)
  3145  		}
  3146  		return
  3147  	}
  3148  	_, pp, err := net.SplitHostPort(pa)
  3149  	if err != nil {
  3150  		t.Fatalf("Failed to parse address from pr.")
  3151  	}
  3152  	_, sp, err := net.SplitHostPort(te.srvAddr)
  3153  	if err != nil {
  3154  		t.Fatalf("Failed to parse address of test server.")
  3155  	}
  3156  	if pp != sp {
  3157  		t.Fatalf("pr.Addr = localhost:%v, want localhost:%v", pp, sp)
  3158  	}
  3159  }
  3160  
  3161  // TestPeerNegative tests that if call fails setting peer
  3162  // doesn't cause a segmentation fault.
  3163  // issue#1141 https://github.com/grpc/grpc-go/issues/1141
  3164  func (s) TestPeerNegative(t *testing.T) {
  3165  	for _, e := range listTestEnv() {
  3166  		testPeerNegative(t, e)
  3167  	}
  3168  }
  3169  
  3170  func testPeerNegative(t *testing.T, e env) {
  3171  	te := newTest(t, e)
  3172  	te.startServer(&testServer{security: e.security})
  3173  	defer te.tearDown()
  3174  
  3175  	cc := te.clientConn()
  3176  	tc := testpb.NewTestServiceClient(cc)
  3177  	pr := new(peer.Peer)
  3178  	ctx, cancel := context.WithCancel(context.Background())
  3179  	cancel()
  3180  	_, _ = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(pr))
  3181  }
  3182  
  3183  func (s) TestPeerFailedRPC(t *testing.T) {
  3184  	for _, e := range listTestEnv() {
  3185  		testPeerFailedRPC(t, e)
  3186  	}
  3187  }
  3188  
  3189  func testPeerFailedRPC(t *testing.T, e env) {
  3190  	te := newTest(t, e)
  3191  	te.maxServerReceiveMsgSize = newInt(1 * 1024)
  3192  	te.startServer(&testServer{security: e.security})
  3193  
  3194  	defer te.tearDown()
  3195  	tc := testpb.NewTestServiceClient(te.clientConn())
  3196  
  3197  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3198  	defer cancel()
  3199  	// first make a successful request to the server
  3200  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  3201  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  3202  	}
  3203  
  3204  	// make a second request that will be rejected by the server
  3205  	const largeSize = 5 * 1024
  3206  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  3207  	if err != nil {
  3208  		t.Fatal(err)
  3209  	}
  3210  	req := &testpb.SimpleRequest{
  3211  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3212  		Payload:      largePayload,
  3213  	}
  3214  
  3215  	pr := new(peer.Peer)
  3216  	if _, err := tc.UnaryCall(ctx, req, grpc.Peer(pr)); err == nil || status.Code(err) != codes.ResourceExhausted {
  3217  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  3218  	} else {
  3219  		pa := pr.Addr.String()
  3220  		if e.network == "unix" {
  3221  			if pa != te.srvAddr {
  3222  				t.Fatalf("pr.Addr = %v, want %v", pa, te.srvAddr)
  3223  			}
  3224  			return
  3225  		}
  3226  		_, pp, err := net.SplitHostPort(pa)
  3227  		if err != nil {
  3228  			t.Fatalf("Failed to parse address from pr.")
  3229  		}
  3230  		_, sp, err := net.SplitHostPort(te.srvAddr)
  3231  		if err != nil {
  3232  			t.Fatalf("Failed to parse address of test server.")
  3233  		}
  3234  		if pp != sp {
  3235  			t.Fatalf("pr.Addr = localhost:%v, want localhost:%v", pp, sp)
  3236  		}
  3237  	}
  3238  }
  3239  
  3240  func (s) TestMetadataUnaryRPC(t *testing.T) {
  3241  	for _, e := range listTestEnv() {
  3242  		testMetadataUnaryRPC(t, e)
  3243  	}
  3244  }
  3245  
  3246  func testMetadataUnaryRPC(t *testing.T, e env) {
  3247  	te := newTest(t, e)
  3248  	te.startServer(&testServer{security: e.security})
  3249  	defer te.tearDown()
  3250  	tc := testpb.NewTestServiceClient(te.clientConn())
  3251  
  3252  	const argSize = 2718
  3253  	const respSize = 314
  3254  
  3255  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3256  	if err != nil {
  3257  		t.Fatal(err)
  3258  	}
  3259  
  3260  	req := &testpb.SimpleRequest{
  3261  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3262  		ResponseSize: respSize,
  3263  		Payload:      payload,
  3264  	}
  3265  	var header, trailer metadata.MD
  3266  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3267  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil {
  3268  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  3269  	}
  3270  	// Ignore optional response headers that Servers may set:
  3271  	if header != nil {
  3272  		delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers
  3273  		delete(header, "date")    // the Date header is also optional
  3274  		delete(header, "user-agent")
  3275  		delete(header, "content-type")
  3276  	}
  3277  	if !reflect.DeepEqual(header, testMetadata) {
  3278  		t.Fatalf("Received header metadata %v, want %v", header, testMetadata)
  3279  	}
  3280  	if !reflect.DeepEqual(trailer, testTrailerMetadata) {
  3281  		t.Fatalf("Received trailer metadata %v, want %v", trailer, testTrailerMetadata)
  3282  	}
  3283  }
  3284  
  3285  func (s) TestMetadataOrderUnaryRPC(t *testing.T) {
  3286  	for _, e := range listTestEnv() {
  3287  		testMetadataOrderUnaryRPC(t, e)
  3288  	}
  3289  }
  3290  
  3291  func testMetadataOrderUnaryRPC(t *testing.T, e env) {
  3292  	te := newTest(t, e)
  3293  	te.startServer(&testServer{security: e.security})
  3294  	defer te.tearDown()
  3295  	tc := testpb.NewTestServiceClient(te.clientConn())
  3296  
  3297  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3298  	ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value2")
  3299  	ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value3")
  3300  
  3301  	// using Join to built expected metadata instead of FromOutgoingContext
  3302  	newMetadata := metadata.Join(testMetadata, metadata.Pairs("key1", "value2", "key1", "value3"))
  3303  
  3304  	var header metadata.MD
  3305  	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}, grpc.Header(&header)); err != nil {
  3306  		t.Fatal(err)
  3307  	}
  3308  
  3309  	// Ignore optional response headers that Servers may set:
  3310  	if header != nil {
  3311  		delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers
  3312  		delete(header, "date")    // the Date header is also optional
  3313  		delete(header, "user-agent")
  3314  		delete(header, "content-type")
  3315  	}
  3316  
  3317  	if !reflect.DeepEqual(header, newMetadata) {
  3318  		t.Fatalf("Received header metadata %v, want %v", header, newMetadata)
  3319  	}
  3320  }
  3321  
  3322  func (s) TestMultipleSetTrailerUnaryRPC(t *testing.T) {
  3323  	for _, e := range listTestEnv() {
  3324  		testMultipleSetTrailerUnaryRPC(t, e)
  3325  	}
  3326  }
  3327  
  3328  func testMultipleSetTrailerUnaryRPC(t *testing.T, e env) {
  3329  	te := newTest(t, e)
  3330  	te.startServer(&testServer{security: e.security, multipleSetTrailer: true})
  3331  	defer te.tearDown()
  3332  	tc := testpb.NewTestServiceClient(te.clientConn())
  3333  
  3334  	const (
  3335  		argSize  = 1
  3336  		respSize = 1
  3337  	)
  3338  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3339  	if err != nil {
  3340  		t.Fatal(err)
  3341  	}
  3342  
  3343  	req := &testpb.SimpleRequest{
  3344  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3345  		ResponseSize: respSize,
  3346  		Payload:      payload,
  3347  	}
  3348  	var trailer metadata.MD
  3349  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3350  	if _, err := tc.UnaryCall(ctx, req, grpc.Trailer(&trailer), grpc.WaitForReady(true)); err != nil {
  3351  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  3352  	}
  3353  	expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2)
  3354  	if !reflect.DeepEqual(trailer, expectedTrailer) {
  3355  		t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer)
  3356  	}
  3357  }
  3358  
  3359  func (s) TestMultipleSetTrailerStreamingRPC(t *testing.T) {
  3360  	for _, e := range listTestEnv() {
  3361  		testMultipleSetTrailerStreamingRPC(t, e)
  3362  	}
  3363  }
  3364  
  3365  func testMultipleSetTrailerStreamingRPC(t *testing.T, e env) {
  3366  	te := newTest(t, e)
  3367  	te.startServer(&testServer{security: e.security, multipleSetTrailer: true})
  3368  	defer te.tearDown()
  3369  	tc := testpb.NewTestServiceClient(te.clientConn())
  3370  
  3371  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3372  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  3373  	if err != nil {
  3374  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3375  	}
  3376  	if err := stream.CloseSend(); err != nil {
  3377  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  3378  	}
  3379  	if _, err := stream.Recv(); err != io.EOF {
  3380  		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
  3381  	}
  3382  
  3383  	trailer := stream.Trailer()
  3384  	expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2)
  3385  	if !reflect.DeepEqual(trailer, expectedTrailer) {
  3386  		t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer)
  3387  	}
  3388  }
  3389  
  3390  func (s) TestSetAndSendHeaderUnaryRPC(t *testing.T) {
  3391  	for _, e := range listTestEnv() {
  3392  		if e.name == "handler-tls" {
  3393  			continue
  3394  		}
  3395  		testSetAndSendHeaderUnaryRPC(t, e)
  3396  	}
  3397  }
  3398  
  3399  // To test header metadata is sent on SendHeader().
  3400  func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) {
  3401  	te := newTest(t, e)
  3402  	te.startServer(&testServer{security: e.security, setAndSendHeader: true})
  3403  	defer te.tearDown()
  3404  	tc := testpb.NewTestServiceClient(te.clientConn())
  3405  
  3406  	const (
  3407  		argSize  = 1
  3408  		respSize = 1
  3409  	)
  3410  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3411  	if err != nil {
  3412  		t.Fatal(err)
  3413  	}
  3414  
  3415  	req := &testpb.SimpleRequest{
  3416  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3417  		ResponseSize: respSize,
  3418  		Payload:      payload,
  3419  	}
  3420  	var header metadata.MD
  3421  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3422  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
  3423  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  3424  	}
  3425  	delete(header, "user-agent")
  3426  	delete(header, "content-type")
  3427  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  3428  	if !reflect.DeepEqual(header, expectedHeader) {
  3429  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  3430  	}
  3431  }
  3432  
  3433  func (s) TestMultipleSetHeaderUnaryRPC(t *testing.T) {
  3434  	for _, e := range listTestEnv() {
  3435  		if e.name == "handler-tls" {
  3436  			continue
  3437  		}
  3438  		testMultipleSetHeaderUnaryRPC(t, e)
  3439  	}
  3440  }
  3441  
  3442  // To test header metadata is sent when sending response.
  3443  func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) {
  3444  	te := newTest(t, e)
  3445  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  3446  	defer te.tearDown()
  3447  	tc := testpb.NewTestServiceClient(te.clientConn())
  3448  
  3449  	const (
  3450  		argSize  = 1
  3451  		respSize = 1
  3452  	)
  3453  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3454  	if err != nil {
  3455  		t.Fatal(err)
  3456  	}
  3457  
  3458  	req := &testpb.SimpleRequest{
  3459  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3460  		ResponseSize: respSize,
  3461  		Payload:      payload,
  3462  	}
  3463  
  3464  	var header metadata.MD
  3465  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3466  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
  3467  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  3468  	}
  3469  	delete(header, "user-agent")
  3470  	delete(header, "content-type")
  3471  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  3472  	if !reflect.DeepEqual(header, expectedHeader) {
  3473  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  3474  	}
  3475  }
  3476  
  3477  func (s) TestMultipleSetHeaderUnaryRPCError(t *testing.T) {
  3478  	for _, e := range listTestEnv() {
  3479  		if e.name == "handler-tls" {
  3480  			continue
  3481  		}
  3482  		testMultipleSetHeaderUnaryRPCError(t, e)
  3483  	}
  3484  }
  3485  
  3486  // To test header metadata is sent when sending status.
  3487  func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) {
  3488  	te := newTest(t, e)
  3489  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  3490  	defer te.tearDown()
  3491  	tc := testpb.NewTestServiceClient(te.clientConn())
  3492  
  3493  	const (
  3494  		argSize  = 1
  3495  		respSize = -1 // Invalid respSize to make RPC fail.
  3496  	)
  3497  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3498  	if err != nil {
  3499  		t.Fatal(err)
  3500  	}
  3501  
  3502  	req := &testpb.SimpleRequest{
  3503  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3504  		ResponseSize: respSize,
  3505  		Payload:      payload,
  3506  	}
  3507  	var header metadata.MD
  3508  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3509  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err == nil {
  3510  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <non-nil>", ctx, err)
  3511  	}
  3512  	delete(header, "user-agent")
  3513  	delete(header, "content-type")
  3514  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  3515  	if !reflect.DeepEqual(header, expectedHeader) {
  3516  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  3517  	}
  3518  }
  3519  
  3520  func (s) TestSetAndSendHeaderStreamingRPC(t *testing.T) {
  3521  	for _, e := range listTestEnv() {
  3522  		if e.name == "handler-tls" {
  3523  			continue
  3524  		}
  3525  		testSetAndSendHeaderStreamingRPC(t, e)
  3526  	}
  3527  }
  3528  
  3529  // To test header metadata is sent on SendHeader().
  3530  func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) {
  3531  	te := newTest(t, e)
  3532  	te.startServer(&testServer{security: e.security, setAndSendHeader: true})
  3533  	defer te.tearDown()
  3534  	tc := testpb.NewTestServiceClient(te.clientConn())
  3535  
  3536  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3537  	stream, err := tc.FullDuplexCall(ctx)
  3538  	if err != nil {
  3539  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3540  	}
  3541  	if err := stream.CloseSend(); err != nil {
  3542  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  3543  	}
  3544  	if _, err := stream.Recv(); err != io.EOF {
  3545  		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
  3546  	}
  3547  
  3548  	header, err := stream.Header()
  3549  	if err != nil {
  3550  		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
  3551  	}
  3552  	delete(header, "user-agent")
  3553  	delete(header, "content-type")
  3554  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  3555  	if !reflect.DeepEqual(header, expectedHeader) {
  3556  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  3557  	}
  3558  }
  3559  
  3560  func (s) TestMultipleSetHeaderStreamingRPC(t *testing.T) {
  3561  	for _, e := range listTestEnv() {
  3562  		if e.name == "handler-tls" {
  3563  			continue
  3564  		}
  3565  		testMultipleSetHeaderStreamingRPC(t, e)
  3566  	}
  3567  }
  3568  
  3569  // To test header metadata is sent when sending response.
  3570  func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) {
  3571  	te := newTest(t, e)
  3572  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  3573  	defer te.tearDown()
  3574  	tc := testpb.NewTestServiceClient(te.clientConn())
  3575  
  3576  	const (
  3577  		argSize  = 1
  3578  		respSize = 1
  3579  	)
  3580  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3581  	stream, err := tc.FullDuplexCall(ctx)
  3582  	if err != nil {
  3583  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3584  	}
  3585  
  3586  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3587  	if err != nil {
  3588  		t.Fatal(err)
  3589  	}
  3590  
  3591  	req := &testpb.StreamingOutputCallRequest{
  3592  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3593  		ResponseParameters: []*testpb.ResponseParameters{
  3594  			{Size: respSize},
  3595  		},
  3596  		Payload: payload,
  3597  	}
  3598  	if err := stream.Send(req); err != nil {
  3599  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  3600  	}
  3601  	if _, err := stream.Recv(); err != nil {
  3602  		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  3603  	}
  3604  	if err := stream.CloseSend(); err != nil {
  3605  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  3606  	}
  3607  	if _, err := stream.Recv(); err != io.EOF {
  3608  		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
  3609  	}
  3610  
  3611  	header, err := stream.Header()
  3612  	if err != nil {
  3613  		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
  3614  	}
  3615  	delete(header, "user-agent")
  3616  	delete(header, "content-type")
  3617  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  3618  	if !reflect.DeepEqual(header, expectedHeader) {
  3619  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  3620  	}
  3621  
  3622  }
  3623  
  3624  func (s) TestMultipleSetHeaderStreamingRPCError(t *testing.T) {
  3625  	for _, e := range listTestEnv() {
  3626  		if e.name == "handler-tls" {
  3627  			continue
  3628  		}
  3629  		testMultipleSetHeaderStreamingRPCError(t, e)
  3630  	}
  3631  }
  3632  
  3633  // To test header metadata is sent when sending status.
  3634  func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) {
  3635  	te := newTest(t, e)
  3636  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  3637  	defer te.tearDown()
  3638  	tc := testpb.NewTestServiceClient(te.clientConn())
  3639  
  3640  	const (
  3641  		argSize  = 1
  3642  		respSize = -1
  3643  	)
  3644  	ctx, cancel := context.WithCancel(context.Background())
  3645  	defer cancel()
  3646  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  3647  	stream, err := tc.FullDuplexCall(ctx)
  3648  	if err != nil {
  3649  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3650  	}
  3651  
  3652  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3653  	if err != nil {
  3654  		t.Fatal(err)
  3655  	}
  3656  
  3657  	req := &testpb.StreamingOutputCallRequest{
  3658  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3659  		ResponseParameters: []*testpb.ResponseParameters{
  3660  			{Size: respSize},
  3661  		},
  3662  		Payload: payload,
  3663  	}
  3664  	if err := stream.Send(req); err != nil {
  3665  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  3666  	}
  3667  	if _, err := stream.Recv(); err == nil {
  3668  		t.Fatalf("%v.Recv() = %v, want <non-nil>", stream, err)
  3669  	}
  3670  
  3671  	header, err := stream.Header()
  3672  	if err != nil {
  3673  		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
  3674  	}
  3675  	delete(header, "user-agent")
  3676  	delete(header, "content-type")
  3677  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  3678  	if !reflect.DeepEqual(header, expectedHeader) {
  3679  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  3680  	}
  3681  	if err := stream.CloseSend(); err != nil {
  3682  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  3683  	}
  3684  }
  3685  
  3686  // TestMalformedHTTP2Metadata verfies the returned error when the client
  3687  // sends an illegal metadata.
  3688  func (s) TestMalformedHTTP2Metadata(t *testing.T) {
  3689  	for _, e := range listTestEnv() {
  3690  		if e.name == "handler-tls" {
  3691  			// Failed with "server stops accepting new RPCs".
  3692  			// Server stops accepting new RPCs when the client sends an illegal http2 header.
  3693  			continue
  3694  		}
  3695  		testMalformedHTTP2Metadata(t, e)
  3696  	}
  3697  }
  3698  
  3699  func testMalformedHTTP2Metadata(t *testing.T, e env) {
  3700  	te := newTest(t, e)
  3701  	te.startServer(&testServer{security: e.security})
  3702  	defer te.tearDown()
  3703  	tc := testpb.NewTestServiceClient(te.clientConn())
  3704  
  3705  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 2718)
  3706  	if err != nil {
  3707  		t.Fatal(err)
  3708  	}
  3709  
  3710  	req := &testpb.SimpleRequest{
  3711  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3712  		ResponseSize: 314,
  3713  		Payload:      payload,
  3714  	}
  3715  	ctx := metadata.NewOutgoingContext(context.Background(), malformedHTTP2Metadata)
  3716  	if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Internal {
  3717  		t.Fatalf("TestService.UnaryCall(%v, _) = _, %v; want _, %s", ctx, err, codes.Internal)
  3718  	}
  3719  }
  3720  
  3721  // Tests that the client transparently retries correctly when receiving a
  3722  // RST_STREAM with code REFUSED_STREAM.
  3723  func (s) TestTransparentRetry(t *testing.T) {
  3724  	testCases := []struct {
  3725  		failFast bool
  3726  		errCode  codes.Code
  3727  	}{{
  3728  		// success attempt: 1, (stream ID 1)
  3729  	}, {
  3730  		// success attempt: 2, (stream IDs 3, 5)
  3731  	}, {
  3732  		// no success attempt (stream IDs 7, 9)
  3733  		errCode: codes.Unavailable,
  3734  	}, {
  3735  		// success attempt: 1 (stream ID 11),
  3736  		failFast: true,
  3737  	}, {
  3738  		// success attempt: 2 (stream IDs 13, 15),
  3739  		failFast: true,
  3740  	}, {
  3741  		// no success attempt (stream IDs 17, 19)
  3742  		failFast: true,
  3743  		errCode:  codes.Unavailable,
  3744  	}}
  3745  
  3746  	lis, err := net.Listen("tcp", "localhost:0")
  3747  	if err != nil {
  3748  		t.Fatalf("Failed to listen. Err: %v", err)
  3749  	}
  3750  	defer func(lis net.Listener) {
  3751  		_ = lis.Close()
  3752  	}(lis)
  3753  	server := &httpServer{
  3754  		responses: []httpServerResponse{{
  3755  			trailers: [][]string{{
  3756  				":status", "200",
  3757  				"content-type", "application/grpc",
  3758  				"grpc-status", "0",
  3759  			}},
  3760  		}},
  3761  		refuseStream: func(i uint32) bool {
  3762  			switch i {
  3763  			case 1, 5, 11, 15: // these stream IDs succeed
  3764  				return false
  3765  			}
  3766  			return true // these are refused
  3767  		},
  3768  	}
  3769  	server.start(t, lis)
  3770  	// grpc.WithInsecure is deprecated, use WithTransportCredentials and insecure.NewCredentials() instead.
  3771  	//cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
  3772  	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
  3773  	if err != nil {
  3774  		t.Fatalf("failed to dial due to err: %v", err)
  3775  	}
  3776  	defer func(cc *grpc.ClientConn) {
  3777  		_ = cc.Close()
  3778  	}(cc)
  3779  
  3780  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  3781  	defer cancel()
  3782  
  3783  	client := testpb.NewTestServiceClient(cc)
  3784  
  3785  	for i, tc := range testCases {
  3786  		stream, err := client.FullDuplexCall(ctx)
  3787  		if err != nil {
  3788  			t.Fatalf("error creating stream due to err: %v", err)
  3789  		}
  3790  		code := func(err error) codes.Code {
  3791  			if err == io.EOF {
  3792  				return codes.OK
  3793  			}
  3794  			return status.Code(err)
  3795  		}
  3796  		if _, err := stream.Recv(); code(err) != tc.errCode {
  3797  			t.Fatalf("%v: stream.Recv() = _, %v, want error code: %v", i, err, tc.errCode)
  3798  		}
  3799  
  3800  	}
  3801  }
  3802  
  3803  func (s) TestCancel(t *testing.T) {
  3804  	for _, e := range listTestEnv() {
  3805  		testCancel(t, e)
  3806  	}
  3807  }
  3808  
  3809  func testCancel(t *testing.T, e env) {
  3810  	te := newTest(t, e)
  3811  	te.declareLogNoise("grpc: the client connection is closing; please retry")
  3812  	te.startServer(&testServer{security: e.security, unaryCallSleepTime: time.Second})
  3813  	defer te.tearDown()
  3814  
  3815  	cc := te.clientConn()
  3816  	tc := testpb.NewTestServiceClient(cc)
  3817  
  3818  	const argSize = 2718
  3819  	const respSize = 314
  3820  
  3821  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3822  	if err != nil {
  3823  		t.Fatal(err)
  3824  	}
  3825  
  3826  	req := &testpb.SimpleRequest{
  3827  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3828  		ResponseSize: respSize,
  3829  		Payload:      payload,
  3830  	}
  3831  	ctx, cancel := context.WithCancel(context.Background())
  3832  	time.AfterFunc(1*time.Millisecond, cancel)
  3833  	if r, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Canceled {
  3834  		t.Fatalf("TestService/UnaryCall(_, _) = %v, %v; want _, error code: %s", r, err, codes.Canceled)
  3835  	}
  3836  	awaitNewConnLogOutput()
  3837  }
  3838  
  3839  func (s) TestCancelNoIO(t *testing.T) {
  3840  	for _, e := range listTestEnv() {
  3841  		testCancelNoIO(t, e)
  3842  	}
  3843  }
  3844  
  3845  func testCancelNoIO(t *testing.T, e env) {
  3846  	te := newTest(t, e)
  3847  	te.declareLogNoise("http2Client.notifyError got notified that the client transport was broken")
  3848  	te.maxStream = 1 // Only allows 1 live stream per server transport.
  3849  	te.startServer(&testServer{security: e.security})
  3850  	defer te.tearDown()
  3851  
  3852  	cc := te.clientConn()
  3853  	tc := testpb.NewTestServiceClient(cc)
  3854  
  3855  	// Start one blocked RPC for which we'll never send streaming
  3856  	// input. This will consume the 1 maximum concurrent streams,
  3857  	// causing future RPCs to hang.
  3858  	ctx, cancelFirst := context.WithCancel(context.Background())
  3859  	_, err := tc.StreamingInputCall(ctx)
  3860  	if err != nil {
  3861  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
  3862  	}
  3863  
  3864  	// Loop until the ClientConn receives the initial settings
  3865  	// frame from the server, notifying it about the maximum
  3866  	// concurrent streams. We know when it's received it because
  3867  	// an RPC will fail with codes.DeadlineExceeded instead of
  3868  	// succeeding.
  3869  	// TODO(bradfitz): add internal test hook for this (Issue 534)
  3870  	for {
  3871  		ctx, cancelSecond := context.WithTimeout(context.Background(), 50*time.Millisecond)
  3872  		_, err := tc.StreamingInputCall(ctx)
  3873  		cancelSecond()
  3874  		if err == nil {
  3875  			continue
  3876  		}
  3877  		if status.Code(err) == codes.DeadlineExceeded {
  3878  			break
  3879  		}
  3880  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded)
  3881  	}
  3882  	// If there are any RPCs in flight before the client receives
  3883  	// the max streams setting, let them be expired.
  3884  	// TODO(bradfitz): add internal test hook for this (Issue 534)
  3885  	time.Sleep(50 * time.Millisecond)
  3886  
  3887  	go func() {
  3888  		time.Sleep(50 * time.Millisecond)
  3889  		cancelFirst()
  3890  	}()
  3891  
  3892  	// This should be blocked until the 1st is canceled, then succeed.
  3893  	ctx, cancelThird := context.WithTimeout(context.Background(), 500*time.Millisecond)
  3894  	if _, err := tc.StreamingInputCall(ctx); err != nil {
  3895  		t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
  3896  	}
  3897  	cancelThird()
  3898  }
  3899  
  3900  // The following tests the gRPC streaming RPC implementations.
  3901  // TODO(zhaoq): Have better coverage on error cases.
  3902  var (
  3903  	reqSizes  = []int{27182, 8, 1828, 45904}
  3904  	respSizes = []int{31415, 9, 2653, 58979}
  3905  )
  3906  
  3907  func (s) TestNoService(t *testing.T) {
  3908  	for _, e := range listTestEnv() {
  3909  		testNoService(t, e)
  3910  	}
  3911  }
  3912  
  3913  func testNoService(t *testing.T, e env) {
  3914  	te := newTest(t, e)
  3915  	te.startServer(nil)
  3916  	defer te.tearDown()
  3917  
  3918  	cc := te.clientConn()
  3919  	tc := testpb.NewTestServiceClient(cc)
  3920  
  3921  	stream, err := tc.FullDuplexCall(te.ctx, grpc.WaitForReady(true))
  3922  	if err != nil {
  3923  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3924  	}
  3925  	if _, err := stream.Recv(); status.Code(err) != codes.Unimplemented {
  3926  		t.Fatalf("stream.Recv() = _, %v, want _, error code %s", err, codes.Unimplemented)
  3927  	}
  3928  }
  3929  
  3930  func (s) TestPingPong(t *testing.T) {
  3931  	for _, e := range listTestEnv() {
  3932  		testPingPong(t, e)
  3933  	}
  3934  }
  3935  
  3936  func testPingPong(t *testing.T, e env) {
  3937  	te := newTest(t, e)
  3938  	te.startServer(&testServer{security: e.security})
  3939  	defer te.tearDown()
  3940  	tc := testpb.NewTestServiceClient(te.clientConn())
  3941  
  3942  	stream, err := tc.FullDuplexCall(te.ctx)
  3943  	if err != nil {
  3944  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3945  	}
  3946  	var index int
  3947  	for index < len(reqSizes) {
  3948  		respParam := []*testpb.ResponseParameters{
  3949  			{
  3950  				Size: int32(respSizes[index]),
  3951  			},
  3952  		}
  3953  
  3954  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
  3955  		if err != nil {
  3956  			t.Fatal(err)
  3957  		}
  3958  
  3959  		req := &testpb.StreamingOutputCallRequest{
  3960  			ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3961  			ResponseParameters: respParam,
  3962  			Payload:            payload,
  3963  		}
  3964  		if err := stream.Send(req); err != nil {
  3965  			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  3966  		}
  3967  		reply, err := stream.Recv()
  3968  		if err != nil {
  3969  			t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  3970  		}
  3971  		pt := reply.GetPayload().GetType()
  3972  		if pt != testpb.PayloadType_COMPRESSABLE {
  3973  			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
  3974  		}
  3975  		size := len(reply.GetPayload().GetBody())
  3976  		if size != respSizes[index] {
  3977  			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
  3978  		}
  3979  		index++
  3980  	}
  3981  	if err := stream.CloseSend(); err != nil {
  3982  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  3983  	}
  3984  	if _, err := stream.Recv(); err != io.EOF {
  3985  		t.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
  3986  	}
  3987  }
  3988  
  3989  func (s) TestMetadataStreamingRPC(t *testing.T) {
  3990  	for _, e := range listTestEnv() {
  3991  		testMetadataStreamingRPC(t, e)
  3992  	}
  3993  }
  3994  
  3995  func testMetadataStreamingRPC(t *testing.T, e env) {
  3996  	te := newTest(t, e)
  3997  	te.startServer(&testServer{security: e.security})
  3998  	defer te.tearDown()
  3999  	tc := testpb.NewTestServiceClient(te.clientConn())
  4000  
  4001  	ctx := metadata.NewOutgoingContext(te.ctx, testMetadata)
  4002  	stream, err := tc.FullDuplexCall(ctx)
  4003  	if err != nil {
  4004  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4005  	}
  4006  	go func() {
  4007  		headerMD, err := stream.Header()
  4008  		if e.security == "tls" {
  4009  			delete(headerMD, "transport_security_type")
  4010  		}
  4011  		delete(headerMD, "trailer") // ignore if present
  4012  		delete(headerMD, "user-agent")
  4013  		delete(headerMD, "content-type")
  4014  		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
  4015  			t.Errorf("#1 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
  4016  		}
  4017  		// test the cached value.
  4018  		headerMD, err = stream.Header()
  4019  		delete(headerMD, "trailer") // ignore if present
  4020  		delete(headerMD, "user-agent")
  4021  		delete(headerMD, "content-type")
  4022  		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
  4023  			t.Errorf("#2 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
  4024  		}
  4025  		err = func() error {
  4026  			for index := 0; index < len(reqSizes); index++ {
  4027  				respParam := []*testpb.ResponseParameters{
  4028  					{
  4029  						Size: int32(respSizes[index]),
  4030  					},
  4031  				}
  4032  
  4033  				payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
  4034  				if err != nil {
  4035  					return err
  4036  				}
  4037  
  4038  				req := &testpb.StreamingOutputCallRequest{
  4039  					ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4040  					ResponseParameters: respParam,
  4041  					Payload:            payload,
  4042  				}
  4043  				if err := stream.Send(req); err != nil {
  4044  					return fmt.Errorf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  4045  				}
  4046  			}
  4047  			return nil
  4048  		}()
  4049  		// Tell the server we're done sending args.
  4050  		_ = stream.CloseSend()
  4051  		if err != nil {
  4052  			t.Error(err)
  4053  		}
  4054  	}()
  4055  	for {
  4056  		if _, err := stream.Recv(); err != nil {
  4057  			break
  4058  		}
  4059  	}
  4060  	trailerMD := stream.Trailer()
  4061  	if !reflect.DeepEqual(testTrailerMetadata, trailerMD) {
  4062  		t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testTrailerMetadata)
  4063  	}
  4064  }
  4065  
  4066  func (s) TestServerStreaming(t *testing.T) {
  4067  	for _, e := range listTestEnv() {
  4068  		testServerStreaming(t, e)
  4069  	}
  4070  }
  4071  
  4072  func testServerStreaming(t *testing.T, e env) {
  4073  	te := newTest(t, e)
  4074  	te.startServer(&testServer{security: e.security})
  4075  	defer te.tearDown()
  4076  	tc := testpb.NewTestServiceClient(te.clientConn())
  4077  
  4078  	respParam := make([]*testpb.ResponseParameters, len(respSizes))
  4079  	for i, s := range respSizes {
  4080  		respParam[i] = &testpb.ResponseParameters{
  4081  			Size: int32(s),
  4082  		}
  4083  	}
  4084  	req := &testpb.StreamingOutputCallRequest{
  4085  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4086  		ResponseParameters: respParam,
  4087  	}
  4088  
  4089  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4090  	defer cancel()
  4091  	stream, err := tc.StreamingOutputCall(ctx, req)
  4092  	if err != nil {
  4093  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
  4094  	}
  4095  	var rpcStatus error
  4096  	var respCnt int
  4097  	var index int
  4098  	for {
  4099  		reply, err := stream.Recv()
  4100  		if err != nil {
  4101  			rpcStatus = err
  4102  			break
  4103  		}
  4104  		pt := reply.GetPayload().GetType()
  4105  		if pt != testpb.PayloadType_COMPRESSABLE {
  4106  			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
  4107  		}
  4108  		size := len(reply.GetPayload().GetBody())
  4109  		if size != respSizes[index] {
  4110  			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
  4111  		}
  4112  		index++
  4113  		respCnt++
  4114  	}
  4115  	if rpcStatus != io.EOF {
  4116  		t.Fatalf("Failed to finish the server streaming rpc: %v, want <EOF>", rpcStatus)
  4117  	}
  4118  	if respCnt != len(respSizes) {
  4119  		t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt)
  4120  	}
  4121  }
  4122  
  4123  func (s) TestFailedServerStreaming(t *testing.T) {
  4124  	for _, e := range listTestEnv() {
  4125  		testFailedServerStreaming(t, e)
  4126  	}
  4127  }
  4128  
  4129  func testFailedServerStreaming(t *testing.T, e env) {
  4130  	te := newTest(t, e)
  4131  	te.userAgent = failAppUA
  4132  	te.startServer(&testServer{security: e.security})
  4133  	defer te.tearDown()
  4134  	tc := testpb.NewTestServiceClient(te.clientConn())
  4135  
  4136  	respParam := make([]*testpb.ResponseParameters, len(respSizes))
  4137  	for i, s := range respSizes {
  4138  		respParam[i] = &testpb.ResponseParameters{
  4139  			Size: int32(s),
  4140  		}
  4141  	}
  4142  	req := &testpb.StreamingOutputCallRequest{
  4143  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4144  		ResponseParameters: respParam,
  4145  	}
  4146  	ctx := metadata.NewOutgoingContext(te.ctx, testMetadata)
  4147  	stream, err := tc.StreamingOutputCall(ctx, req)
  4148  	if err != nil {
  4149  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
  4150  	}
  4151  	wantErr := status.Error(codes.DataLoss, "error for testing: "+failAppUA)
  4152  	if _, err := stream.Recv(); !equalError(err, wantErr) {
  4153  		t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, wantErr)
  4154  	}
  4155  }
  4156  
  4157  func equalError(x, y error) bool {
  4158  	return errors.Is(x, y) || (x != nil && y != nil && x.Error() == y.Error())
  4159  }
  4160  
  4161  // concurrentSendServer is a TestServiceServer whose
  4162  // StreamingOutputCall makes ten serial Send calls, sending payloads
  4163  // "0".."9", inclusive.  TestServerStreamingConcurrent verifies they
  4164  // were received in the correct order, and that there were no races.
  4165  //
  4166  // All other TestServiceServer methods crash if called.
  4167  type concurrentSendServer struct {
  4168  	testpb.TestServiceServer
  4169  }
  4170  
  4171  //goland:noinspection GoUnusedParameter
  4172  func (s concurrentSendServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error {
  4173  	for i := 0; i < 10; i++ {
  4174  		_ = stream.Send(&testpb.StreamingOutputCallResponse{
  4175  			Payload: &testpb.Payload{
  4176  				Body: []byte{'0' + uint8(i)},
  4177  			},
  4178  		})
  4179  	}
  4180  	return nil
  4181  }
  4182  
  4183  // Tests doing a bunch of concurrent streaming output calls.
  4184  func (s) TestServerStreamingConcurrent(t *testing.T) {
  4185  	for _, e := range listTestEnv() {
  4186  		testServerStreamingConcurrent(t, e)
  4187  	}
  4188  }
  4189  
  4190  func testServerStreamingConcurrent(t *testing.T, e env) {
  4191  	te := newTest(t, e)
  4192  	te.startServer(concurrentSendServer{})
  4193  	defer te.tearDown()
  4194  
  4195  	cc := te.clientConn()
  4196  	tc := testpb.NewTestServiceClient(cc)
  4197  
  4198  	doStreamingCall := func() {
  4199  		req := &testpb.StreamingOutputCallRequest{}
  4200  		ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4201  		defer cancel()
  4202  		stream, err := tc.StreamingOutputCall(ctx, req)
  4203  		if err != nil {
  4204  			t.Errorf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
  4205  			return
  4206  		}
  4207  		var ngot int
  4208  		var buf bytes.Buffer
  4209  		for {
  4210  			reply, err := stream.Recv()
  4211  			if err == io.EOF {
  4212  				break
  4213  			}
  4214  			if err != nil {
  4215  				t.Fatal(err)
  4216  			}
  4217  			ngot++
  4218  			if buf.Len() > 0 {
  4219  				buf.WriteByte(',')
  4220  			}
  4221  			buf.Write(reply.GetPayload().GetBody())
  4222  		}
  4223  		if want := 10; ngot != want {
  4224  			t.Errorf("Got %d replies, want %d", ngot, want)
  4225  		}
  4226  		if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want {
  4227  			t.Errorf("Got replies %q; want %q", got, want)
  4228  		}
  4229  	}
  4230  
  4231  	var wg sync.WaitGroup
  4232  	for i := 0; i < 20; i++ {
  4233  		wg.Add(1)
  4234  		go func() {
  4235  			defer wg.Done()
  4236  			doStreamingCall()
  4237  		}()
  4238  	}
  4239  	wg.Wait()
  4240  
  4241  }
  4242  
  4243  func generatePayloadSizes() [][]int {
  4244  	reqSizes := [][]int{
  4245  		{27182, 8, 1828, 45904},
  4246  	}
  4247  
  4248  	num8KPayloads := 1024
  4249  	var eightKPayloads []int
  4250  	for i := 0; i < num8KPayloads; i++ {
  4251  		eightKPayloads = append(eightKPayloads, 1<<13)
  4252  	}
  4253  	reqSizes = append(reqSizes, eightKPayloads)
  4254  
  4255  	num2MPayloads := 8
  4256  	var twoMPayloads []int
  4257  	for i := 0; i < num2MPayloads; i++ {
  4258  		twoMPayloads = append(twoMPayloads, 1<<21)
  4259  	}
  4260  	reqSizes = append(reqSizes, twoMPayloads)
  4261  
  4262  	return reqSizes
  4263  }
  4264  
  4265  func (s) TestClientStreaming(t *testing.T) {
  4266  	for _, s := range generatePayloadSizes() {
  4267  		for _, e := range listTestEnv() {
  4268  			testClientStreaming(t, e, s)
  4269  		}
  4270  	}
  4271  }
  4272  
  4273  func testClientStreaming(t *testing.T, e env, sizes []int) {
  4274  	te := newTest(t, e)
  4275  	te.startServer(&testServer{security: e.security})
  4276  	defer te.tearDown()
  4277  	tc := testpb.NewTestServiceClient(te.clientConn())
  4278  
  4279  	ctx, cancel := context.WithTimeout(te.ctx, time.Second*30)
  4280  	defer cancel()
  4281  	stream, err := tc.StreamingInputCall(ctx)
  4282  	if err != nil {
  4283  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err)
  4284  	}
  4285  
  4286  	var sum int
  4287  	for _, s := range sizes {
  4288  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s))
  4289  		if err != nil {
  4290  			t.Fatal(err)
  4291  		}
  4292  
  4293  		req := &testpb.StreamingInputCallRequest{
  4294  			Payload: payload,
  4295  		}
  4296  		if err := stream.Send(req); err != nil {
  4297  			t.Fatalf("%v.Send(_) = %v, want <nil>", stream, err)
  4298  		}
  4299  		sum += s
  4300  	}
  4301  	reply, err := stream.CloseAndRecv()
  4302  	if err != nil {
  4303  		t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil)
  4304  	}
  4305  	if reply.GetAggregatedPayloadSize() != int32(sum) {
  4306  		t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum)
  4307  	}
  4308  }
  4309  
  4310  func (s) TestClientStreamingError(t *testing.T) {
  4311  	for _, e := range listTestEnv() {
  4312  		if e.name == "handler-tls" {
  4313  			continue
  4314  		}
  4315  		testClientStreamingError(t, e)
  4316  	}
  4317  }
  4318  
  4319  func testClientStreamingError(t *testing.T, e env) {
  4320  	te := newTest(t, e)
  4321  	te.startServer(&testServer{security: e.security, earlyFail: true})
  4322  	defer te.tearDown()
  4323  	tc := testpb.NewTestServiceClient(te.clientConn())
  4324  
  4325  	stream, err := tc.StreamingInputCall(te.ctx)
  4326  	if err != nil {
  4327  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err)
  4328  	}
  4329  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1)
  4330  	if err != nil {
  4331  		t.Fatal(err)
  4332  	}
  4333  
  4334  	req := &testpb.StreamingInputCallRequest{
  4335  		Payload: payload,
  4336  	}
  4337  	// The 1st request should go through.
  4338  	if err := stream.Send(req); err != nil {
  4339  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  4340  	}
  4341  	for {
  4342  		if err := stream.Send(req); err != io.EOF {
  4343  			continue
  4344  		}
  4345  		if _, err := stream.CloseAndRecv(); status.Code(err) != codes.NotFound {
  4346  			t.Fatalf("%v.CloseAndRecv() = %v, want error %s", stream, err, codes.NotFound)
  4347  		}
  4348  		break
  4349  	}
  4350  }
  4351  
  4352  func (s) TestExceedMaxStreamsLimit(t *testing.T) {
  4353  	for _, e := range listTestEnv() {
  4354  		testExceedMaxStreamsLimit(t, e)
  4355  	}
  4356  }
  4357  
  4358  func testExceedMaxStreamsLimit(t *testing.T, e env) {
  4359  	te := newTest(t, e)
  4360  	te.declareLogNoise(
  4361  		"http2Client.notifyError got notified that the client transport was broken",
  4362  		"Conn.resetTransport failed to create client transport",
  4363  		"grpc: the connection is closing",
  4364  	)
  4365  	te.maxStream = 1 // Only allows 1 live stream per server transport.
  4366  	te.startServer(&testServer{security: e.security})
  4367  	defer te.tearDown()
  4368  
  4369  	cc := te.clientConn()
  4370  	tc := testpb.NewTestServiceClient(cc)
  4371  
  4372  	_, err := tc.StreamingInputCall(te.ctx)
  4373  	if err != nil {
  4374  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
  4375  	}
  4376  	// Loop until receiving the new max stream setting from the server.
  4377  	for {
  4378  		ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
  4379  		//goland:noinspection GoDeferInLoop
  4380  		defer cancel()
  4381  		_, err := tc.StreamingInputCall(ctx)
  4382  		if err == nil {
  4383  			time.Sleep(50 * time.Millisecond)
  4384  			continue
  4385  		}
  4386  		if status.Code(err) == codes.DeadlineExceeded {
  4387  			break
  4388  		}
  4389  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded)
  4390  	}
  4391  }
  4392  
  4393  func (s) TestStreamsQuotaRecovery(t *testing.T) {
  4394  	for _, e := range listTestEnv() {
  4395  		testStreamsQuotaRecovery(t, e)
  4396  	}
  4397  }
  4398  
  4399  func testStreamsQuotaRecovery(t *testing.T, e env) {
  4400  	te := newTest(t, e)
  4401  	te.declareLogNoise(
  4402  		"http2Client.notifyError got notified that the client transport was broken",
  4403  		"Conn.resetTransport failed to create client transport",
  4404  		"grpc: the connection is closing",
  4405  	)
  4406  	te.maxStream = 1 // Allows 1 live stream.
  4407  	te.startServer(&testServer{security: e.security})
  4408  	defer te.tearDown()
  4409  
  4410  	cc := te.clientConn()
  4411  	tc := testpb.NewTestServiceClient(cc)
  4412  	ctx, cancel := context.WithCancel(context.Background())
  4413  	defer cancel()
  4414  	if _, err := tc.StreamingInputCall(ctx); err != nil {
  4415  		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, <nil>", err)
  4416  	}
  4417  	// Loop until the new max stream setting is effective.
  4418  	for {
  4419  		ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
  4420  		_, err := tc.StreamingInputCall(ctx)
  4421  		cancel()
  4422  		if err == nil {
  4423  			time.Sleep(5 * time.Millisecond)
  4424  			continue
  4425  		}
  4426  		if status.Code(err) == codes.DeadlineExceeded {
  4427  			break
  4428  		}
  4429  		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  4430  	}
  4431  
  4432  	var wg sync.WaitGroup
  4433  	for i := 0; i < 10; i++ {
  4434  		wg.Add(1)
  4435  		go func() {
  4436  			defer wg.Done()
  4437  			payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 314)
  4438  			if err != nil {
  4439  				t.Error(err)
  4440  				return
  4441  			}
  4442  			req := &testpb.SimpleRequest{
  4443  				ResponseType: testpb.PayloadType_COMPRESSABLE,
  4444  				ResponseSize: 1592,
  4445  				Payload:      payload,
  4446  			}
  4447  			// No rpc should go through due to the max streams limit.
  4448  			ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
  4449  			defer cancel()
  4450  			if _, err := tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  4451  				t.Errorf("tc.UnaryCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  4452  			}
  4453  		}()
  4454  	}
  4455  	wg.Wait()
  4456  
  4457  	cancel()
  4458  	// A new stream should be allowed after canceling the first one.
  4459  	ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
  4460  	defer cancel()
  4461  	if _, err := tc.StreamingInputCall(ctx); err != nil {
  4462  		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %v", err, nil)
  4463  	}
  4464  }
  4465  
  4466  func (s) TestCompressServerHasNoSupport(t *testing.T) {
  4467  	for _, e := range listTestEnv() {
  4468  		testCompressServerHasNoSupport(t, e)
  4469  	}
  4470  }
  4471  
  4472  func testCompressServerHasNoSupport(t *testing.T, e env) {
  4473  	te := newTest(t, e)
  4474  	te.serverCompression = false
  4475  	te.clientCompression = false
  4476  	te.clientNopCompression = true
  4477  	te.startServer(&testServer{security: e.security})
  4478  	defer te.tearDown()
  4479  	tc := testpb.NewTestServiceClient(te.clientConn())
  4480  
  4481  	const argSize = 271828
  4482  	const respSize = 314159
  4483  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  4484  	if err != nil {
  4485  		t.Fatal(err)
  4486  	}
  4487  	req := &testpb.SimpleRequest{
  4488  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  4489  		ResponseSize: respSize,
  4490  		Payload:      payload,
  4491  	}
  4492  
  4493  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4494  	defer cancel()
  4495  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.Unimplemented {
  4496  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code %s", err, codes.Unimplemented)
  4497  	}
  4498  	// Streaming RPC
  4499  	stream, err := tc.FullDuplexCall(ctx)
  4500  	if err != nil {
  4501  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4502  	}
  4503  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Unimplemented {
  4504  		t.Fatalf("%v.Recv() = %v, want error code %s", stream, err, codes.Unimplemented)
  4505  	}
  4506  }
  4507  
  4508  func (s) TestCompressOK(t *testing.T) {
  4509  	for _, e := range listTestEnv() {
  4510  		testCompressOK(t, e)
  4511  	}
  4512  }
  4513  
  4514  func testCompressOK(t *testing.T, e env) {
  4515  	te := newTest(t, e)
  4516  	te.serverCompression = true
  4517  	te.clientCompression = true
  4518  	te.startServer(&testServer{security: e.security})
  4519  	defer te.tearDown()
  4520  	tc := testpb.NewTestServiceClient(te.clientConn())
  4521  
  4522  	// Unary call
  4523  	const argSize = 271828
  4524  	const respSize = 314159
  4525  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  4526  	if err != nil {
  4527  		t.Fatal(err)
  4528  	}
  4529  	req := &testpb.SimpleRequest{
  4530  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  4531  		ResponseSize: respSize,
  4532  		Payload:      payload,
  4533  	}
  4534  	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
  4535  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  4536  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
  4537  	}
  4538  	// Streaming RPC
  4539  	ctx, cancel := context.WithCancel(context.Background())
  4540  	defer cancel()
  4541  	stream, err := tc.FullDuplexCall(ctx)
  4542  	if err != nil {
  4543  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4544  	}
  4545  	respParam := []*testpb.ResponseParameters{
  4546  		{
  4547  			Size: 31415,
  4548  		},
  4549  	}
  4550  	payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415))
  4551  	if err != nil {
  4552  		t.Fatal(err)
  4553  	}
  4554  	sreq := &testpb.StreamingOutputCallRequest{
  4555  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4556  		ResponseParameters: respParam,
  4557  		Payload:            payload,
  4558  	}
  4559  	if err := stream.Send(sreq); err != nil {
  4560  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  4561  	}
  4562  	_ = stream.CloseSend()
  4563  	if _, err := stream.Recv(); err != nil {
  4564  		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  4565  	}
  4566  	if _, err := stream.Recv(); err != io.EOF {
  4567  		t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err)
  4568  	}
  4569  }
  4570  
  4571  func (s) TestIdentityEncoding(t *testing.T) {
  4572  	for _, e := range listTestEnv() {
  4573  		testIdentityEncoding(t, e)
  4574  	}
  4575  }
  4576  
  4577  func testIdentityEncoding(t *testing.T, e env) {
  4578  	te := newTest(t, e)
  4579  	te.startServer(&testServer{security: e.security})
  4580  	defer te.tearDown()
  4581  	tc := testpb.NewTestServiceClient(te.clientConn())
  4582  
  4583  	// Unary call
  4584  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 5)
  4585  	if err != nil {
  4586  		t.Fatal(err)
  4587  	}
  4588  	req := &testpb.SimpleRequest{
  4589  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  4590  		ResponseSize: 10,
  4591  		Payload:      payload,
  4592  	}
  4593  	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
  4594  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  4595  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
  4596  	}
  4597  	// Streaming RPC
  4598  	ctx, cancel := context.WithCancel(context.Background())
  4599  	defer cancel()
  4600  	stream, err := tc.FullDuplexCall(ctx, grpc.UseCompressor("identity"))
  4601  	if err != nil {
  4602  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4603  	}
  4604  	payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415))
  4605  	if err != nil {
  4606  		t.Fatal(err)
  4607  	}
  4608  	sreq := &testpb.StreamingOutputCallRequest{
  4609  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4610  		ResponseParameters: []*testpb.ResponseParameters{{Size: 10}},
  4611  		Payload:            payload,
  4612  	}
  4613  	if err := stream.Send(sreq); err != nil {
  4614  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  4615  	}
  4616  	_ = stream.CloseSend()
  4617  	if _, err := stream.Recv(); err != nil {
  4618  		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  4619  	}
  4620  	if _, err := stream.Recv(); err != io.EOF {
  4621  		t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err)
  4622  	}
  4623  }
  4624  
  4625  func (s) TestUnaryClientInterceptor(t *testing.T) {
  4626  	for _, e := range listTestEnv() {
  4627  		testUnaryClientInterceptor(t, e)
  4628  	}
  4629  }
  4630  
  4631  func failOkayRPC(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
  4632  	err := invoker(ctx, method, req, reply, cc, opts...)
  4633  	if err == nil {
  4634  		return status.Error(codes.NotFound, "")
  4635  	}
  4636  	return err
  4637  }
  4638  
  4639  func testUnaryClientInterceptor(t *testing.T, e env) {
  4640  	te := newTest(t, e)
  4641  	te.userAgent = testAppUA
  4642  	te.unaryClientInt = failOkayRPC
  4643  	te.startServer(&testServer{security: e.security})
  4644  	defer te.tearDown()
  4645  
  4646  	tc := testpb.NewTestServiceClient(te.clientConn())
  4647  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4648  	defer cancel()
  4649  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.NotFound {
  4650  		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.NotFound)
  4651  	}
  4652  }
  4653  
  4654  func (s) TestStreamClientInterceptor(t *testing.T) {
  4655  	for _, e := range listTestEnv() {
  4656  		testStreamClientInterceptor(t, e)
  4657  	}
  4658  }
  4659  
  4660  func failOkayStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
  4661  	s, err := streamer(ctx, desc, cc, method, opts...)
  4662  	if err == nil {
  4663  		return nil, status.Error(codes.NotFound, "")
  4664  	}
  4665  	return s, nil
  4666  }
  4667  
  4668  func testStreamClientInterceptor(t *testing.T, e env) {
  4669  	te := newTest(t, e)
  4670  	te.streamClientInt = failOkayStream
  4671  	te.startServer(&testServer{security: e.security})
  4672  	defer te.tearDown()
  4673  
  4674  	tc := testpb.NewTestServiceClient(te.clientConn())
  4675  	respParam := []*testpb.ResponseParameters{
  4676  		{
  4677  			Size: int32(1),
  4678  		},
  4679  	}
  4680  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1))
  4681  	if err != nil {
  4682  		t.Fatal(err)
  4683  	}
  4684  	req := &testpb.StreamingOutputCallRequest{
  4685  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4686  		ResponseParameters: respParam,
  4687  		Payload:            payload,
  4688  	}
  4689  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4690  	defer cancel()
  4691  	if _, err := tc.StreamingOutputCall(ctx, req); status.Code(err) != codes.NotFound {
  4692  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, error code %s", tc, err, codes.NotFound)
  4693  	}
  4694  }
  4695  
  4696  func (s) TestUnaryServerInterceptor(t *testing.T) {
  4697  	for _, e := range listTestEnv() {
  4698  		testUnaryServerInterceptor(t, e)
  4699  	}
  4700  }
  4701  
  4702  //goland:noinspection GoUnusedParameter
  4703  func errInjector(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
  4704  	return nil, status.Error(codes.PermissionDenied, "")
  4705  }
  4706  
  4707  func testUnaryServerInterceptor(t *testing.T, e env) {
  4708  	te := newTest(t, e)
  4709  	te.unaryServerInt = errInjector
  4710  	te.startServer(&testServer{security: e.security})
  4711  	defer te.tearDown()
  4712  
  4713  	tc := testpb.NewTestServiceClient(te.clientConn())
  4714  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4715  	defer cancel()
  4716  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.PermissionDenied {
  4717  		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied)
  4718  	}
  4719  }
  4720  
  4721  func (s) TestStreamServerInterceptor(t *testing.T) {
  4722  	for _, e := range listTestEnv() {
  4723  		// TODO(bradfitz): Temporarily skip this env due to #619.
  4724  		if e.name == "handler-tls" {
  4725  			continue
  4726  		}
  4727  		testStreamServerInterceptor(t, e)
  4728  	}
  4729  }
  4730  
  4731  func fullDuplexOnly(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
  4732  	if info.FullMethod == "/grpc.testing.TestService/FullDuplexCall" {
  4733  		return handler(srv, ss)
  4734  	}
  4735  	// Reject the other methods.
  4736  	return status.Error(codes.PermissionDenied, "")
  4737  }
  4738  
  4739  func testStreamServerInterceptor(t *testing.T, e env) {
  4740  	te := newTest(t, e)
  4741  	te.streamServerInt = fullDuplexOnly
  4742  	te.startServer(&testServer{security: e.security})
  4743  	defer te.tearDown()
  4744  
  4745  	tc := testpb.NewTestServiceClient(te.clientConn())
  4746  	respParam := []*testpb.ResponseParameters{
  4747  		{
  4748  			Size: int32(1),
  4749  		},
  4750  	}
  4751  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1))
  4752  	if err != nil {
  4753  		t.Fatal(err)
  4754  	}
  4755  	req := &testpb.StreamingOutputCallRequest{
  4756  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4757  		ResponseParameters: respParam,
  4758  		Payload:            payload,
  4759  	}
  4760  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4761  	defer cancel()
  4762  	s1, err := tc.StreamingOutputCall(ctx, req)
  4763  	if err != nil {
  4764  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, <nil>", tc, err)
  4765  	}
  4766  	if _, err := s1.Recv(); status.Code(err) != codes.PermissionDenied {
  4767  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied)
  4768  	}
  4769  	s2, err := tc.FullDuplexCall(ctx)
  4770  	if err != nil {
  4771  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4772  	}
  4773  	if err := s2.Send(req); err != nil {
  4774  		t.Fatalf("%v.Send(_) = %v, want <nil>", s2, err)
  4775  	}
  4776  	if _, err := s2.Recv(); err != nil {
  4777  		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", s2, err)
  4778  	}
  4779  }
  4780  
  4781  // funcServer implements methods of TestServiceServer using funcs,
  4782  // similar to an http.HandlerFunc.
  4783  // Any unimplemented method will crash. Tests implement the method(s)
  4784  // they need.
  4785  type funcServer struct {
  4786  	testpb.TestServiceServer
  4787  	unaryCall          func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error)
  4788  	streamingInputCall func(stream testpb.TestService_StreamingInputCallServer) error
  4789  	fullDuplexCall     func(stream testpb.TestService_FullDuplexCallServer) error
  4790  }
  4791  
  4792  func (s *funcServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  4793  	return s.unaryCall(ctx, in)
  4794  }
  4795  
  4796  func (s *funcServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error {
  4797  	return s.streamingInputCall(stream)
  4798  }
  4799  
  4800  func (s *funcServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error {
  4801  	return s.fullDuplexCall(stream)
  4802  }
  4803  
  4804  func (s) TestClientRequestBodyErrorUnexpectedEOF(t *testing.T) {
  4805  	for _, e := range listTestEnv() {
  4806  		testClientRequestBodyErrorUnexpectedEOF(t, e)
  4807  	}
  4808  }
  4809  
  4810  func testClientRequestBodyErrorUnexpectedEOF(t *testing.T, e env) {
  4811  	te := newTest(t, e)
  4812  	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  4813  		errUnexpectedCall := errors.New("unexpected call func server method")
  4814  		t.Error(errUnexpectedCall)
  4815  		return nil, errUnexpectedCall
  4816  	}}
  4817  	te.startServer(ts)
  4818  	defer te.tearDown()
  4819  	te.withServerTester(func(st *serverTester) {
  4820  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
  4821  		// Say we have 5 bytes coming, but set END_STREAM flag:
  4822  		st.writeData(1, true, []byte{0, 0, 0, 0, 5})
  4823  		st.wantAnyFrame() // wait for server to crash (it used to crash)
  4824  	})
  4825  }
  4826  
  4827  func (s) TestClientRequestBodyErrorCloseAfterLength(t *testing.T) {
  4828  	for _, e := range listTestEnv() {
  4829  		testClientRequestBodyErrorCloseAfterLength(t, e)
  4830  	}
  4831  }
  4832  
  4833  func testClientRequestBodyErrorCloseAfterLength(t *testing.T, e env) {
  4834  	te := newTest(t, e)
  4835  	te.declareLogNoise("Server.processUnaryRPC failed to write status")
  4836  	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  4837  		errUnexpectedCall := errors.New("unexpected call func server method")
  4838  		t.Error(errUnexpectedCall)
  4839  		return nil, errUnexpectedCall
  4840  	}}
  4841  	te.startServer(ts)
  4842  	defer te.tearDown()
  4843  	te.withServerTester(func(st *serverTester) {
  4844  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
  4845  		// say we're sending 5 bytes, but then close the connection instead.
  4846  		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
  4847  		_ = st.cc.Close()
  4848  	})
  4849  }
  4850  
  4851  func (s) TestClientRequestBodyErrorCancel(t *testing.T) {
  4852  	for _, e := range listTestEnv() {
  4853  		testClientRequestBodyErrorCancel(t, e)
  4854  	}
  4855  }
  4856  
  4857  func testClientRequestBodyErrorCancel(t *testing.T, e env) {
  4858  	te := newTest(t, e)
  4859  	gotCall := make(chan bool, 1)
  4860  	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  4861  		gotCall <- true
  4862  		return new(testpb.SimpleResponse), nil
  4863  	}}
  4864  	te.startServer(ts)
  4865  	defer te.tearDown()
  4866  	te.withServerTester(func(st *serverTester) {
  4867  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
  4868  		// Say we have 5 bytes coming, but cancel it instead.
  4869  		st.writeRSTStream(1, http2.ErrCodeCancel)
  4870  		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
  4871  
  4872  		// Verify we didn't a call yet.
  4873  		select {
  4874  		case <-gotCall:
  4875  			t.Fatal("unexpected call")
  4876  		default:
  4877  		}
  4878  
  4879  		// And now send an uncanceled (but still invalid), just to get a response.
  4880  		st.writeHeadersGRPC(3, "/grpc.testing.TestService/UnaryCall", false)
  4881  		st.writeData(3, true, []byte{0, 0, 0, 0, 0})
  4882  		<-gotCall
  4883  		st.wantAnyFrame()
  4884  	})
  4885  }
  4886  
  4887  func (s) TestClientRequestBodyErrorCancelStreamingInput(t *testing.T) {
  4888  	for _, e := range listTestEnv() {
  4889  		testClientRequestBodyErrorCancelStreamingInput(t, e)
  4890  	}
  4891  }
  4892  
  4893  func testClientRequestBodyErrorCancelStreamingInput(t *testing.T, e env) {
  4894  	te := newTest(t, e)
  4895  	recvErr := make(chan error, 1)
  4896  	ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error {
  4897  		_, err := stream.Recv()
  4898  		recvErr <- err
  4899  		return nil
  4900  	}}
  4901  	te.startServer(ts)
  4902  	defer te.tearDown()
  4903  	te.withServerTester(func(st *serverTester) {
  4904  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", false)
  4905  		// Say we have 5 bytes coming, but cancel it instead.
  4906  		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
  4907  		st.writeRSTStream(1, http2.ErrCodeCancel)
  4908  
  4909  		var got error
  4910  		select {
  4911  		case got = <-recvErr:
  4912  		case <-time.After(3 * time.Second):
  4913  			t.Fatal("timeout waiting for error")
  4914  		}
  4915  		// grpc.Code is deprecated, use status.Code instead.
  4916  		//if grpc.Code(got) != codes.Canceled {
  4917  		if status.Code(got) != codes.Canceled {
  4918  			t.Errorf("error = %#v; want error code %s", got, codes.Canceled)
  4919  		}
  4920  	})
  4921  }
  4922  
  4923  func (s) TestClientInitialHeaderEndStream(t *testing.T) {
  4924  	for _, e := range listTestEnv() {
  4925  		if e.httpHandler {
  4926  			continue
  4927  		}
  4928  		testClientInitialHeaderEndStream(t, e)
  4929  	}
  4930  }
  4931  
  4932  func testClientInitialHeaderEndStream(t *testing.T, e env) {
  4933  	// To ensure RST_STREAM is sent for illegal data write and not normal stream
  4934  	// close.
  4935  	frameCheckingDone := make(chan struct{})
  4936  	// To ensure goroutine for test does not end before RPC handler performs error
  4937  	// checking.
  4938  	handlerDone := make(chan struct{})
  4939  	te := newTest(t, e)
  4940  	ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error {
  4941  		defer close(handlerDone)
  4942  		// Block on serverTester receiving RST_STREAM. This ensures server has closed
  4943  		// stream before stream.Recv().
  4944  		<-frameCheckingDone
  4945  		data, err := stream.Recv()
  4946  		if err == nil {
  4947  			t.Errorf("unexpected data received in func server method: '%v'", data)
  4948  		} else if status.Code(err) != codes.Canceled {
  4949  			t.Errorf("expected canceled error, instead received '%v'", err)
  4950  		}
  4951  		return nil
  4952  	}}
  4953  	te.startServer(ts)
  4954  	defer te.tearDown()
  4955  	te.withServerTester(func(st *serverTester) {
  4956  		// Send a headers with END_STREAM flag, but then write data.
  4957  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", true)
  4958  		st.writeData(1, false, []byte{0, 0, 0, 0, 0})
  4959  		st.wantAnyFrame()
  4960  		st.wantAnyFrame()
  4961  		st.wantRSTStream(http2.ErrCodeStreamClosed)
  4962  		close(frameCheckingDone)
  4963  		<-handlerDone
  4964  	})
  4965  }
  4966  
  4967  func (s) TestClientSendDataAfterCloseSend(t *testing.T) {
  4968  	for _, e := range listTestEnv() {
  4969  		if e.httpHandler {
  4970  			continue
  4971  		}
  4972  		testClientSendDataAfterCloseSend(t, e)
  4973  	}
  4974  }
  4975  
  4976  func testClientSendDataAfterCloseSend(t *testing.T, e env) {
  4977  	// To ensure RST_STREAM is sent for illegal data write prior to execution of RPC
  4978  	// handler.
  4979  	frameCheckingDone := make(chan struct{})
  4980  	// To ensure goroutine for test does not end before RPC handler performs error
  4981  	// checking.
  4982  	handlerDone := make(chan struct{})
  4983  	te := newTest(t, e)
  4984  	ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error {
  4985  		defer close(handlerDone)
  4986  		// Block on serverTester receiving RST_STREAM. This ensures server has closed
  4987  		// stream before stream.Recv().
  4988  		<-frameCheckingDone
  4989  		for {
  4990  			_, err := stream.Recv()
  4991  			if err == io.EOF {
  4992  				break
  4993  			}
  4994  			if err != nil {
  4995  				if status.Code(err) != codes.Canceled {
  4996  					t.Errorf("expected canceled error, instead received '%v'", err)
  4997  				}
  4998  				break
  4999  			}
  5000  		}
  5001  		if err := stream.SendMsg(nil); err == nil {
  5002  			t.Error("expected error sending message on stream after stream closed due to illegal data")
  5003  		} else if status.Code(err) != codes.Internal {
  5004  			t.Errorf("expected internal error, instead received '%v'", err)
  5005  		}
  5006  		return nil
  5007  	}}
  5008  	te.startServer(ts)
  5009  	defer te.tearDown()
  5010  	te.withServerTester(func(st *serverTester) {
  5011  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", false)
  5012  		// Send data with END_STREAM flag, but then write more data.
  5013  		st.writeData(1, true, []byte{0, 0, 0, 0, 0})
  5014  		st.writeData(1, false, []byte{0, 0, 0, 0, 0})
  5015  		st.wantAnyFrame()
  5016  		st.wantAnyFrame()
  5017  		st.wantRSTStream(http2.ErrCodeStreamClosed)
  5018  		close(frameCheckingDone)
  5019  		<-handlerDone
  5020  	})
  5021  }
  5022  
  5023  func (s) TestClientResourceExhaustedCancelFullDuplex(t *testing.T) {
  5024  	for _, e := range listTestEnv() {
  5025  		if e.httpHandler {
  5026  			// httpHandler write won't be blocked on flow control window.
  5027  			continue
  5028  		}
  5029  		testClientResourceExhaustedCancelFullDuplex(t, e)
  5030  	}
  5031  }
  5032  
  5033  func testClientResourceExhaustedCancelFullDuplex(t *testing.T, e env) {
  5034  	te := newTest(t, e)
  5035  	recvErr := make(chan error, 1)
  5036  	ts := &funcServer{fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
  5037  		defer close(recvErr)
  5038  		_, err := stream.Recv()
  5039  		if err != nil {
  5040  			return status.Errorf(codes.Internal, "stream.Recv() got error: %v, want <nil>", err)
  5041  		}
  5042  		// create a payload that's larger than the default flow control window.
  5043  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 10)
  5044  		if err != nil {
  5045  			return err
  5046  		}
  5047  		resp := &testpb.StreamingOutputCallResponse{
  5048  			Payload: payload,
  5049  		}
  5050  		ce := make(chan error, 1)
  5051  		go func() {
  5052  			var err error
  5053  			for {
  5054  				if err = stream.Send(resp); err != nil {
  5055  					break
  5056  				}
  5057  			}
  5058  			ce <- err
  5059  		}()
  5060  		select {
  5061  		case err = <-ce:
  5062  		case <-time.After(10 * time.Second):
  5063  			err = errors.New("10s timeout reached")
  5064  		}
  5065  		recvErr <- err
  5066  		return err
  5067  	}}
  5068  	te.startServer(ts)
  5069  	defer te.tearDown()
  5070  	// set a low limit on receive message size to error with Resource Exhausted on
  5071  	// client side when server send a large message.
  5072  	te.maxClientReceiveMsgSize = newInt(10)
  5073  	cc := te.clientConn()
  5074  	tc := testpb.NewTestServiceClient(cc)
  5075  
  5076  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5077  	defer cancel()
  5078  	stream, err := tc.FullDuplexCall(ctx)
  5079  	if err != nil {
  5080  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  5081  	}
  5082  	req := &testpb.StreamingOutputCallRequest{}
  5083  	if err := stream.Send(req); err != nil {
  5084  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  5085  	}
  5086  	if _, err := stream.Recv(); status.Code(err) != codes.ResourceExhausted {
  5087  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  5088  	}
  5089  	err = <-recvErr
  5090  	if status.Code(err) != codes.Canceled {
  5091  		t.Fatalf("server got error %v, want error code: %s", err, codes.Canceled)
  5092  	}
  5093  }
  5094  
  5095  type clientFailCreds struct{}
  5096  
  5097  func (c *clientFailCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
  5098  	return rawConn, nil, nil
  5099  }
  5100  
  5101  //goland:noinspection GoUnusedParameter
  5102  func (c *clientFailCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
  5103  	return nil, nil, fmt.Errorf("client handshake fails with fatal error")
  5104  }
  5105  func (c *clientFailCreds) Info() credentials.ProtocolInfo {
  5106  	return credentials.ProtocolInfo{}
  5107  }
  5108  func (c *clientFailCreds) Clone() credentials.TransportCredentials {
  5109  	return c
  5110  }
  5111  
  5112  //goland:noinspection GoUnusedParameter
  5113  func (c *clientFailCreds) OverrideServerName(s string) error {
  5114  	return nil
  5115  }
  5116  
  5117  // This test makes sure that failfast RPCs fail if client handshake fails with
  5118  // fatal errors.
  5119  func (s) TestFailfastRPCFailOnFatalHandshakeError(t *testing.T) {
  5120  	lis, err := net.Listen("tcp", "localhost:0")
  5121  	if err != nil {
  5122  		t.Fatalf("Failed to listen: %v", err)
  5123  	}
  5124  	defer func(lis net.Listener) {
  5125  		_ = lis.Close()
  5126  	}(lis)
  5127  
  5128  	cc, err := grpc.Dial("passthrough:///"+lis.Addr().String(), grpc.WithTransportCredentials(&clientFailCreds{}))
  5129  	if err != nil {
  5130  		t.Fatalf("grpc.Dial(_) = %v", err)
  5131  	}
  5132  	defer func(cc *grpc.ClientConn) {
  5133  		_ = cc.Close()
  5134  	}(cc)
  5135  
  5136  	tc := testpb.NewTestServiceClient(cc)
  5137  	// This unary call should fail, but not timeout.
  5138  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
  5139  	defer cancel()
  5140  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(false)); status.Code(err) != codes.Unavailable {
  5141  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want <Unavailable>", err)
  5142  	}
  5143  }
  5144  
  5145  func (s) TestFlowControlLogicalRace(t *testing.T) {
  5146  	// Test for a regression of https://github.com/grpc/grpc-go/issues/632,
  5147  	// and other flow control bugs.
  5148  
  5149  	const (
  5150  		itemCount   = 100
  5151  		itemSize    = 1 << 10
  5152  		recvCount   = 2
  5153  		maxFailures = 3
  5154  
  5155  		requestTimeout = time.Second * 5
  5156  	)
  5157  
  5158  	requestCount := 10000
  5159  	if raceMode {
  5160  		requestCount = 1000
  5161  	}
  5162  
  5163  	lis, err := net.Listen("tcp", "localhost:0")
  5164  	if err != nil {
  5165  		t.Fatalf("Failed to listen: %v", err)
  5166  	}
  5167  	defer func(lis net.Listener) {
  5168  		_ = lis.Close()
  5169  	}(lis)
  5170  
  5171  	s := grpc.NewServer()
  5172  	testpb.RegisterTestServiceServer(s, &flowControlLogicalRaceServer{
  5173  		itemCount: itemCount,
  5174  		itemSize:  itemSize,
  5175  	})
  5176  	defer s.Stop()
  5177  
  5178  	go func() {
  5179  		_ = s.Serve(lis)
  5180  	}()
  5181  
  5182  	// grpc.WithInsecure is deprecated, use WithTransportCredentials and insecure.NewCredentials() instead.
  5183  	//cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
  5184  	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
  5185  	if err != nil {
  5186  		t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err)
  5187  	}
  5188  	defer func(cc *grpc.ClientConn) {
  5189  		_ = cc.Close()
  5190  	}(cc)
  5191  	cl := testpb.NewTestServiceClient(cc)
  5192  
  5193  	failures := 0
  5194  	for i := 0; i < requestCount; i++ {
  5195  		ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  5196  		output, err := cl.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{})
  5197  		if err != nil {
  5198  			t.Fatalf("StreamingOutputCall; err = %q", err)
  5199  		}
  5200  
  5201  		j := 0
  5202  	loop:
  5203  		for ; j < recvCount; j++ {
  5204  			_, err := output.Recv()
  5205  			if err != nil {
  5206  				if err == io.EOF {
  5207  					break loop
  5208  				}
  5209  				switch status.Code(err) {
  5210  				case codes.DeadlineExceeded:
  5211  					break loop
  5212  				default:
  5213  					t.Fatalf("Recv; err = %q", err)
  5214  				}
  5215  			}
  5216  		}
  5217  		cancel()
  5218  		<-ctx.Done()
  5219  
  5220  		if j < recvCount {
  5221  			t.Errorf("got %d responses to request %d", j, i)
  5222  			failures++
  5223  			if failures >= maxFailures {
  5224  				// Continue past the first failure to see if the connection is
  5225  				// entirely broken, or if only a single RPC was affected
  5226  				break
  5227  			}
  5228  		}
  5229  	}
  5230  }
  5231  
  5232  type flowControlLogicalRaceServer struct {
  5233  	testpb.TestServiceServer
  5234  
  5235  	itemSize  int
  5236  	itemCount int
  5237  }
  5238  
  5239  //goland:noinspection GoUnusedParameter
  5240  func (s *flowControlLogicalRaceServer) StreamingOutputCall(req *testpb.StreamingOutputCallRequest, srv testpb.TestService_StreamingOutputCallServer) error {
  5241  	for i := 0; i < s.itemCount; i++ {
  5242  		err := srv.Send(&testpb.StreamingOutputCallResponse{
  5243  			Payload: &testpb.Payload{
  5244  				// Sending a large stream of data which the client reject
  5245  				// helps to trigger some types of flow control bugs.
  5246  				//
  5247  				// Reallocating memory here is inefficient, but the stress it
  5248  				// puts on the GC leads to more frequent flow control
  5249  				// failures. The GC likely causes more variety in the
  5250  				// goroutine scheduling orders.
  5251  				Body: bytes.Repeat([]byte("a"), s.itemSize),
  5252  			},
  5253  		})
  5254  		if err != nil {
  5255  			return err
  5256  		}
  5257  	}
  5258  	return nil
  5259  }
  5260  
  5261  type lockingWriter struct {
  5262  	mu sync.Mutex
  5263  	w  io.Writer
  5264  }
  5265  
  5266  func (lw *lockingWriter) Write(p []byte) (n int, err error) {
  5267  	lw.mu.Lock()
  5268  	defer lw.mu.Unlock()
  5269  	return lw.w.Write(p)
  5270  }
  5271  
  5272  func (lw *lockingWriter) setWriter(w io.Writer) {
  5273  	lw.mu.Lock()
  5274  	defer lw.mu.Unlock()
  5275  	lw.w = w
  5276  }
  5277  
  5278  var testLogOutput = &lockingWriter{w: os.Stderr}
  5279  
  5280  // awaitNewConnLogOutput waits for any of grpc.NewConn's goroutines to
  5281  // terminate, if they're still running. It spams logs with this
  5282  // message.  We wait for it so our log filter is still
  5283  // active. Otherwise the "defer restore()" at the top of various test
  5284  // functions restores our log filter and then the goroutine spams.
  5285  func awaitNewConnLogOutput() {
  5286  	awaitLogOutput(50*time.Millisecond, "grpc: the client connection is closing; please retry")
  5287  }
  5288  
  5289  func awaitLogOutput(maxWait time.Duration, phrase string) {
  5290  	pb := []byte(phrase)
  5291  
  5292  	timer := time.NewTimer(maxWait)
  5293  	defer timer.Stop()
  5294  	wakeup := make(chan bool, 1)
  5295  	for {
  5296  		if logOutputHasContents(pb, wakeup) {
  5297  			return
  5298  		}
  5299  		select {
  5300  		case <-timer.C:
  5301  			// Too slow. Oh well.
  5302  			return
  5303  		case <-wakeup:
  5304  		}
  5305  	}
  5306  }
  5307  
  5308  func logOutputHasContents(v []byte, wakeup chan<- bool) bool {
  5309  	testLogOutput.mu.Lock()
  5310  	defer testLogOutput.mu.Unlock()
  5311  	fw, ok := testLogOutput.w.(*filterWriter)
  5312  	if !ok {
  5313  		return false
  5314  	}
  5315  	fw.mu.Lock()
  5316  	defer fw.mu.Unlock()
  5317  	if bytes.Contains(fw.buf.Bytes(), v) {
  5318  		return true
  5319  	}
  5320  	fw.wakeup = wakeup
  5321  	return false
  5322  }
  5323  
  5324  var verboseLogs = flag.Bool("verbose_logs", false, "show all log output, without filtering")
  5325  
  5326  func noop() {}
  5327  
  5328  // declareLogNoise declares that t is expected to emit the following noisy
  5329  // phrases, even on success. Those phrases will be filtered from log output and
  5330  // only be shown if *verbose_logs or t ends up failing. The returned restore
  5331  // function should be called with defer to be run before the test ends.
  5332  func declareLogNoise(t *testing.T, phrases ...string) (restore func()) {
  5333  	if *verboseLogs {
  5334  		return noop
  5335  	}
  5336  	fw := &filterWriter{dst: os.Stderr, filter: phrases}
  5337  	testLogOutput.setWriter(fw)
  5338  	return func() {
  5339  		if t.Failed() {
  5340  			fw.mu.Lock()
  5341  			defer fw.mu.Unlock()
  5342  			if fw.buf.Len() > 0 {
  5343  				t.Logf("Complete log output:\n%s", fw.buf.Bytes())
  5344  			}
  5345  		}
  5346  		testLogOutput.setWriter(os.Stderr)
  5347  	}
  5348  }
  5349  
  5350  type filterWriter struct {
  5351  	dst    io.Writer
  5352  	filter []string
  5353  
  5354  	mu     sync.Mutex
  5355  	buf    bytes.Buffer
  5356  	wakeup chan<- bool // if non-nil, gets true on write
  5357  }
  5358  
  5359  func (fw *filterWriter) Write(p []byte) (n int, err error) {
  5360  	fw.mu.Lock()
  5361  	fw.buf.Write(p)
  5362  	if fw.wakeup != nil {
  5363  		select {
  5364  		case fw.wakeup <- true:
  5365  		default:
  5366  		}
  5367  	}
  5368  	fw.mu.Unlock()
  5369  
  5370  	ps := string(p)
  5371  	for _, f := range fw.filter {
  5372  		if strings.Contains(ps, f) {
  5373  			return len(p), nil
  5374  		}
  5375  	}
  5376  	return fw.dst.Write(p)
  5377  }
  5378  
  5379  func (s) TestGRPCMethod(t *testing.T) {
  5380  	var method string
  5381  	var ok bool
  5382  
  5383  	ss := &stubserver.StubServer{
  5384  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5385  			method, ok = grpc.Method(ctx)
  5386  			return &testpb.Empty{}, nil
  5387  		},
  5388  	}
  5389  	if err := ss.Start(nil); err != nil {
  5390  		t.Fatalf("Error starting endpoint server: %v", err)
  5391  	}
  5392  	defer ss.Stop()
  5393  
  5394  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5395  	defer cancel()
  5396  
  5397  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  5398  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  5399  	}
  5400  
  5401  	if want := "/grpc.testing.TestService/EmptyCall"; !ok || method != want {
  5402  		t.Fatalf("grpc.Method(_) = %q, %v; want %q, true", method, ok, want)
  5403  	}
  5404  }
  5405  
  5406  // renameProtoCodec is an encoding.Codec wrapper that allows customizing the
  5407  // Name() of another codec.
  5408  type renameProtoCodec struct {
  5409  	encoding.Codec
  5410  	name string
  5411  }
  5412  
  5413  func (r *renameProtoCodec) Name() string { return r.name }
  5414  
  5415  // TestForceCodecName confirms that the ForceCodec call option sets the subtype
  5416  // in the content-type header according to the Name() of the codec provided.
  5417  func (s) TestForceCodecName(t *testing.T) {
  5418  	wantContentTypeCh := make(chan []string, 1)
  5419  	defer close(wantContentTypeCh)
  5420  
  5421  	ss := &stubserver.StubServer{
  5422  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5423  			md, ok := metadata.FromIncomingContext(ctx)
  5424  			if !ok {
  5425  				return nil, status.Errorf(codes.Internal, "no metadata in context")
  5426  			}
  5427  			if got, want := md["content-type"], <-wantContentTypeCh; !reflect.DeepEqual(got, want) {
  5428  				return nil, status.Errorf(codes.Internal, "got content-type=%q; want [%q]", got, want)
  5429  			}
  5430  			return &testpb.Empty{}, nil
  5431  		},
  5432  	}
  5433  	if err := ss.Start([]grpc.ServerOption{grpc.ForceServerCodec(encoding.GetCodec("proto"))}); err != nil {
  5434  		t.Fatalf("Error starting endpoint server: %v", err)
  5435  	}
  5436  	defer ss.Stop()
  5437  
  5438  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5439  	defer cancel()
  5440  
  5441  	codec := &renameProtoCodec{Codec: encoding.GetCodec("proto"), name: "some-test-name"}
  5442  	wantContentTypeCh <- []string{"application/grpc+some-test-name"}
  5443  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.ForceCodec(codec)); err != nil {
  5444  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  5445  	}
  5446  
  5447  	// Confirm the name is converted to lowercase before transmitting.
  5448  	codec.name = "aNoTHeRNaME"
  5449  	wantContentTypeCh <- []string{"application/grpc+anothername"}
  5450  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.ForceCodec(codec)); err != nil {
  5451  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  5452  	}
  5453  }
  5454  
  5455  func (s) TestForceServerCodec(t *testing.T) {
  5456  	ss := &stubserver.StubServer{
  5457  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5458  			return &testpb.Empty{}, nil
  5459  		},
  5460  	}
  5461  	codec := &countingProtoCodec{}
  5462  	if err := ss.Start([]grpc.ServerOption{grpc.ForceServerCodec(codec)}); err != nil {
  5463  		t.Fatalf("Error starting endpoint server: %v", err)
  5464  	}
  5465  	defer ss.Stop()
  5466  
  5467  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5468  	defer cancel()
  5469  
  5470  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  5471  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  5472  	}
  5473  
  5474  	unmarshalCount := atomic.LoadInt32(&codec.unmarshalCount)
  5475  	const wantUnmarshalCount = 1
  5476  	if unmarshalCount != wantUnmarshalCount {
  5477  		t.Fatalf("protoCodec.unmarshalCount = %d; want %d", unmarshalCount, wantUnmarshalCount)
  5478  	}
  5479  	marshalCount := atomic.LoadInt32(&codec.marshalCount)
  5480  	const wantMarshalCount = 1
  5481  	if marshalCount != wantMarshalCount {
  5482  		t.Fatalf("protoCodec.marshalCount = %d; want %d", marshalCount, wantMarshalCount)
  5483  	}
  5484  }
  5485  
  5486  func (s) TestUnaryProxyDoesNotForwardMetadata(t *testing.T) {
  5487  	const mdkey = "somedata"
  5488  
  5489  	// endpoint ensures mdkey is NOT in metadata and returns an error if it is.
  5490  	endpoint := &stubserver.StubServer{
  5491  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5492  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil {
  5493  				return nil, status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
  5494  			}
  5495  			return &testpb.Empty{}, nil
  5496  		},
  5497  	}
  5498  	if err := endpoint.Start(nil); err != nil {
  5499  		t.Fatalf("Error starting endpoint server: %v", err)
  5500  	}
  5501  	defer endpoint.Stop()
  5502  
  5503  	// proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint
  5504  	// without explicitly copying the metadata.
  5505  	proxy := &stubserver.StubServer{
  5506  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5507  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil {
  5508  				return nil, status.Errorf(codes.Internal, "proxy: md=%v; want contains(%q)", md, mdkey)
  5509  			}
  5510  			return endpoint.Client.EmptyCall(ctx, in)
  5511  		},
  5512  	}
  5513  	if err := proxy.Start(nil); err != nil {
  5514  		t.Fatalf("Error starting proxy server: %v", err)
  5515  	}
  5516  	defer proxy.Stop()
  5517  
  5518  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  5519  	defer cancel()
  5520  	md := metadata.Pairs(mdkey, "val")
  5521  	ctx = metadata.NewOutgoingContext(ctx, md)
  5522  
  5523  	// Sanity check that endpoint properly errors when it sees mdkey.
  5524  	_, err := endpoint.Client.EmptyCall(ctx, &testpb.Empty{})
  5525  	if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal {
  5526  		t.Fatalf("endpoint.Client.EmptyCall(_, _) = _, %v; want _, <status with Code()=Internal>", err)
  5527  	}
  5528  
  5529  	if _, err := proxy.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  5530  		t.Fatal(err.Error())
  5531  	}
  5532  }
  5533  
  5534  func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) {
  5535  	const mdkey = "somedata"
  5536  
  5537  	// doFDC performs a FullDuplexCall with client and returns the error from the
  5538  	// first stream.Recv call, or nil if that error is io.EOF.  Calls t.Fatal if
  5539  	// the stream cannot be established.
  5540  	doFDC := func(ctx context.Context, client testpb.TestServiceClient) error {
  5541  		stream, err := client.FullDuplexCall(ctx)
  5542  		if err != nil {
  5543  			t.Fatalf("Unwanted error: %v", err)
  5544  		}
  5545  		if _, err := stream.Recv(); err != io.EOF {
  5546  			return err
  5547  		}
  5548  		return nil
  5549  	}
  5550  
  5551  	// endpoint ensures mdkey is NOT in metadata and returns an error if it is.
  5552  	endpoint := &stubserver.StubServer{
  5553  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  5554  			ctx := stream.Context()
  5555  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil {
  5556  				return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
  5557  			}
  5558  			return nil
  5559  		},
  5560  	}
  5561  	if err := endpoint.Start(nil); err != nil {
  5562  		t.Fatalf("Error starting endpoint server: %v", err)
  5563  	}
  5564  	defer endpoint.Stop()
  5565  
  5566  	// proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint
  5567  	// without explicitly copying the metadata.
  5568  	proxy := &stubserver.StubServer{
  5569  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  5570  			ctx := stream.Context()
  5571  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil {
  5572  				return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
  5573  			}
  5574  			return doFDC(ctx, endpoint.Client)
  5575  		},
  5576  	}
  5577  	if err := proxy.Start(nil); err != nil {
  5578  		t.Fatalf("Error starting proxy server: %v", err)
  5579  	}
  5580  	defer proxy.Stop()
  5581  
  5582  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  5583  	defer cancel()
  5584  	md := metadata.Pairs(mdkey, "val")
  5585  	ctx = metadata.NewOutgoingContext(ctx, md)
  5586  
  5587  	// Sanity check that endpoint properly errors when it sees mdkey in ctx.
  5588  	err := doFDC(ctx, endpoint.Client)
  5589  	if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal {
  5590  		t.Fatalf("stream.Recv() = _, %v; want _, <status with Code()=Internal>", err)
  5591  	}
  5592  
  5593  	if err := doFDC(ctx, proxy.Client); err != nil {
  5594  		t.Fatalf("doFDC(_, proxy.Client) = %v; want nil", err)
  5595  	}
  5596  }
  5597  
  5598  func (s) TestStatsTagsAndTrace(t *testing.T) {
  5599  	// Data added to context by client (typically in a stats handler).
  5600  	tags := []byte{1, 5, 2, 4, 3}
  5601  	trace := []byte{5, 2, 1, 3, 4}
  5602  
  5603  	// endpoint ensures Tags() and Trace() in context match those that were added
  5604  	// by the client and returns an error if not.
  5605  	endpoint := &stubserver.StubServer{
  5606  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5607  			md, _ := metadata.FromIncomingContext(ctx)
  5608  			if tg := stats.Tags(ctx); !reflect.DeepEqual(tg, tags) {
  5609  				return nil, status.Errorf(codes.Internal, "stats.Tags(%v)=%v; want %v", ctx, tg, tags)
  5610  			}
  5611  			if !reflect.DeepEqual(md["grpc-tags-bin"], []string{string(tags)}) {
  5612  				return nil, status.Errorf(codes.Internal, "md['grpc-tags-bin']=%v; want %v", md["grpc-tags-bin"], tags)
  5613  			}
  5614  			if tr := stats.Trace(ctx); !reflect.DeepEqual(tr, trace) {
  5615  				return nil, status.Errorf(codes.Internal, "stats.Trace(%v)=%v; want %v", ctx, tr, trace)
  5616  			}
  5617  			if !reflect.DeepEqual(md["grpc-trace-bin"], []string{string(trace)}) {
  5618  				return nil, status.Errorf(codes.Internal, "md['grpc-trace-bin']=%v; want %v", md["grpc-trace-bin"], trace)
  5619  			}
  5620  			return &testpb.Empty{}, nil
  5621  		},
  5622  	}
  5623  	if err := endpoint.Start(nil); err != nil {
  5624  		t.Fatalf("Error starting endpoint server: %v", err)
  5625  	}
  5626  	defer endpoint.Stop()
  5627  
  5628  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  5629  	defer cancel()
  5630  
  5631  	testCases := []struct {
  5632  		ctx  context.Context
  5633  		want codes.Code
  5634  	}{
  5635  		{ctx: ctx, want: codes.Internal},
  5636  		{ctx: stats.SetTags(ctx, tags), want: codes.Internal},
  5637  		{ctx: stats.SetTrace(ctx, trace), want: codes.Internal},
  5638  		{ctx: stats.SetTags(stats.SetTrace(ctx, tags), tags), want: codes.Internal},
  5639  		{ctx: stats.SetTags(stats.SetTrace(ctx, trace), tags), want: codes.OK},
  5640  	}
  5641  
  5642  	for _, tc := range testCases {
  5643  		_, err := endpoint.Client.EmptyCall(tc.ctx, &testpb.Empty{})
  5644  		if tc.want == codes.OK && err != nil {
  5645  			t.Fatalf("endpoint.Client.EmptyCall(%v, _) = _, %v; want _, nil", tc.ctx, err)
  5646  		}
  5647  		if s, ok := status.FromError(err); !ok || s.Code() != tc.want {
  5648  			t.Fatalf("endpoint.Client.EmptyCall(%v, _) = _, %v; want _, <status with Code()=%v>", tc.ctx, err, tc.want)
  5649  		}
  5650  	}
  5651  }
  5652  
  5653  func (s) TestTapTimeout(t *testing.T) {
  5654  	sopts := []grpc.ServerOption{
  5655  		grpc.InTapHandle(func(ctx context.Context, _ *tap.Info) (context.Context, error) {
  5656  			c, cancel := context.WithCancel(ctx)
  5657  			// Call cancel instead of setting a deadline so we can detect which error
  5658  			// occurred -- this cancellation (desired) or the client's deadline
  5659  			// expired (indicating this cancellation did not affect the RPC).
  5660  			time.AfterFunc(10*time.Millisecond, cancel)
  5661  			return c, nil
  5662  		}),
  5663  	}
  5664  
  5665  	ss := &stubserver.StubServer{
  5666  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5667  			<-ctx.Done()
  5668  			return nil, status.Errorf(codes.Canceled, ctx.Err().Error())
  5669  		},
  5670  	}
  5671  	if err := ss.Start(sopts); err != nil {
  5672  		t.Fatalf("Error starting endpoint server: %v", err)
  5673  	}
  5674  	defer ss.Stop()
  5675  
  5676  	// This was known to be flaky; test several times.
  5677  	for i := 0; i < 10; i++ {
  5678  		// Set our own deadline in case the server hangs.
  5679  		ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  5680  		res, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
  5681  		cancel()
  5682  		if s, ok := status.FromError(err); !ok || s.Code() != codes.Canceled {
  5683  			t.Fatalf("ss.Client.EmptyCall(ctx, _) = %v, %v; want nil, <status with Code()=Canceled>", res, err)
  5684  		}
  5685  	}
  5686  
  5687  }
  5688  
  5689  func (s) TestClientWriteFailsAfterServerClosesStream(t *testing.T) {
  5690  	ss := &stubserver.StubServer{
  5691  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  5692  			return status.Errorf(codes.Internal, "")
  5693  		},
  5694  	}
  5695  	var sopts []grpc.ServerOption
  5696  	if err := ss.Start(sopts); err != nil {
  5697  		t.Fatalf("Error starting endpoint server: %v", err)
  5698  	}
  5699  	defer ss.Stop()
  5700  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  5701  	defer cancel()
  5702  	stream, err := ss.Client.FullDuplexCall(ctx)
  5703  	if err != nil {
  5704  		t.Fatalf("Error while creating stream: %v", err)
  5705  	}
  5706  	for {
  5707  		if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err == nil {
  5708  			time.Sleep(5 * time.Millisecond)
  5709  		} else if err == io.EOF {
  5710  			break // Success.
  5711  		} else {
  5712  			t.Fatalf("stream.Send(_) = %v, want io.EOF", err)
  5713  		}
  5714  	}
  5715  }
  5716  
  5717  type windowSizeConfig struct {
  5718  	serverStream int32
  5719  	serverConn   int32
  5720  	clientStream int32
  5721  	clientConn   int32
  5722  }
  5723  
  5724  func max(a, b int32) int32 {
  5725  	if a > b {
  5726  		return a
  5727  	}
  5728  	return b
  5729  }
  5730  
  5731  func (s) TestConfigurableWindowSizeWithLargeWindow(t *testing.T) {
  5732  	wc := windowSizeConfig{
  5733  		serverStream: 8 * 1024 * 1024,
  5734  		serverConn:   12 * 1024 * 1024,
  5735  		clientStream: 6 * 1024 * 1024,
  5736  		clientConn:   8 * 1024 * 1024,
  5737  	}
  5738  	for _, e := range listTestEnv() {
  5739  		testConfigurableWindowSize(t, e, wc)
  5740  	}
  5741  }
  5742  
  5743  func (s) TestConfigurableWindowSizeWithSmallWindow(t *testing.T) {
  5744  	wc := windowSizeConfig{
  5745  		serverStream: 1,
  5746  		serverConn:   1,
  5747  		clientStream: 1,
  5748  		clientConn:   1,
  5749  	}
  5750  	for _, e := range listTestEnv() {
  5751  		testConfigurableWindowSize(t, e, wc)
  5752  	}
  5753  }
  5754  
  5755  func testConfigurableWindowSize(t *testing.T, e env, wc windowSizeConfig) {
  5756  	te := newTest(t, e)
  5757  	te.serverInitialWindowSize = wc.serverStream
  5758  	te.serverInitialConnWindowSize = wc.serverConn
  5759  	te.clientInitialWindowSize = wc.clientStream
  5760  	te.clientInitialConnWindowSize = wc.clientConn
  5761  
  5762  	te.startServer(&testServer{security: e.security})
  5763  	defer te.tearDown()
  5764  
  5765  	cc := te.clientConn()
  5766  	tc := testpb.NewTestServiceClient(cc)
  5767  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5768  	defer cancel()
  5769  	stream, err := tc.FullDuplexCall(ctx)
  5770  	if err != nil {
  5771  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  5772  	}
  5773  	numOfIter := 11
  5774  	// Set message size to exhaust largest of window sizes.
  5775  	messageSize := max(max(wc.serverStream, wc.serverConn), max(wc.clientStream, wc.clientConn)) / int32(numOfIter-1)
  5776  	messageSize = max(messageSize, 64*1024)
  5777  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, messageSize)
  5778  	if err != nil {
  5779  		t.Fatal(err)
  5780  	}
  5781  	respParams := []*testpb.ResponseParameters{
  5782  		{
  5783  			Size: messageSize,
  5784  		},
  5785  	}
  5786  	req := &testpb.StreamingOutputCallRequest{
  5787  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  5788  		ResponseParameters: respParams,
  5789  		Payload:            payload,
  5790  	}
  5791  	for i := 0; i < numOfIter; i++ {
  5792  		if err := stream.Send(req); err != nil {
  5793  			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  5794  		}
  5795  		if _, err := stream.Recv(); err != nil {
  5796  			t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
  5797  		}
  5798  	}
  5799  	if err := stream.CloseSend(); err != nil {
  5800  		t.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
  5801  	}
  5802  }
  5803  
  5804  func (s) TestWaitForReadyConnection(t *testing.T) {
  5805  	for _, e := range listTestEnv() {
  5806  		testWaitForReadyConnection(t, e)
  5807  	}
  5808  
  5809  }
  5810  
  5811  func testWaitForReadyConnection(t *testing.T, e env) {
  5812  	te := newTest(t, e)
  5813  	te.userAgent = testAppUA
  5814  	te.startServer(&testServer{security: e.security})
  5815  	defer te.tearDown()
  5816  
  5817  	cc := te.clientConn() // Non-blocking dial.
  5818  	tc := testpb.NewTestServiceClient(cc)
  5819  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
  5820  	defer cancel()
  5821  	state := cc.GetState()
  5822  	// Wait for connection to be Ready.
  5823  	for ; state != connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() {
  5824  	}
  5825  	if state != connectivity.Ready {
  5826  		t.Fatalf("Want connection state to be Ready, got %v", state)
  5827  	}
  5828  	ctx, cancel = context.WithTimeout(context.Background(), time.Second)
  5829  	defer cancel()
  5830  	// Make a fail-fast RPC.
  5831  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  5832  		t.Fatalf("TestService/EmptyCall(_,_) = _, %v, want _, nil", err)
  5833  	}
  5834  }
  5835  
  5836  type errCodec struct {
  5837  	noError bool
  5838  }
  5839  
  5840  //goland:noinspection GoUnusedParameter
  5841  func (c *errCodec) Marshal(v interface{}) ([]byte, error) {
  5842  	if c.noError {
  5843  		return []byte{}, nil
  5844  	}
  5845  	return nil, fmt.Errorf("3987^12 + 4365^12 = 4472^12")
  5846  }
  5847  
  5848  //goland:noinspection GoUnusedParameter
  5849  func (c *errCodec) Unmarshal(data []byte, v interface{}) error {
  5850  	return nil
  5851  }
  5852  
  5853  func (c *errCodec) Name() string {
  5854  	return "Fermat's near-miss."
  5855  }
  5856  
  5857  type countingProtoCodec struct {
  5858  	marshalCount   int32
  5859  	unmarshalCount int32
  5860  }
  5861  
  5862  func (p *countingProtoCodec) Marshal(v interface{}) ([]byte, error) {
  5863  	atomic.AddInt32(&p.marshalCount, 1)
  5864  	vv, ok := v.(proto.Message)
  5865  	if !ok {
  5866  		return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
  5867  	}
  5868  	return proto.Marshal(vv)
  5869  }
  5870  
  5871  func (p *countingProtoCodec) Unmarshal(data []byte, v interface{}) error {
  5872  	atomic.AddInt32(&p.unmarshalCount, 1)
  5873  	vv, ok := v.(proto.Message)
  5874  	if !ok {
  5875  		return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
  5876  	}
  5877  	return proto.Unmarshal(data, vv)
  5878  }
  5879  
  5880  func (*countingProtoCodec) Name() string {
  5881  	return "proto"
  5882  }
  5883  
  5884  func (s) TestEncodeDoesntPanic(t *testing.T) {
  5885  	for _, e := range listTestEnv() {
  5886  		testEncodeDoesntPanic(t, e)
  5887  	}
  5888  }
  5889  
  5890  func testEncodeDoesntPanic(t *testing.T, e env) {
  5891  	te := newTest(t, e)
  5892  	erc := &errCodec{}
  5893  	te.customCodec = erc
  5894  	te.startServer(&testServer{security: e.security})
  5895  	defer te.tearDown()
  5896  	te.customCodec = nil
  5897  	tc := testpb.NewTestServiceClient(te.clientConn())
  5898  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5899  	defer cancel()
  5900  	// Failure case, should not panic.
  5901  	_, _ = tc.EmptyCall(ctx, &testpb.Empty{})
  5902  	erc.noError = true
  5903  	// Passing case.
  5904  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  5905  		t.Fatalf("EmptyCall(_, _) = _, %v, want _, <nil>", err)
  5906  	}
  5907  }
  5908  
  5909  func (s) TestSvrWriteStatusEarlyWrite(t *testing.T) {
  5910  	for _, e := range listTestEnv() {
  5911  		testSvrWriteStatusEarlyWrite(t, e)
  5912  	}
  5913  }
  5914  
  5915  func testSvrWriteStatusEarlyWrite(t *testing.T, e env) {
  5916  	te := newTest(t, e)
  5917  	const smallSize = 1024
  5918  	const largeSize = 2048
  5919  	const extraLargeSize = 4096
  5920  	te.maxServerReceiveMsgSize = newInt(largeSize)
  5921  	te.maxServerSendMsgSize = newInt(largeSize)
  5922  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  5923  	if err != nil {
  5924  		t.Fatal(err)
  5925  	}
  5926  	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
  5927  	if err != nil {
  5928  		t.Fatal(err)
  5929  	}
  5930  	te.startServer(&testServer{security: e.security})
  5931  	defer te.tearDown()
  5932  	tc := testpb.NewTestServiceClient(te.clientConn())
  5933  	respParam := []*testpb.ResponseParameters{
  5934  		{
  5935  			Size: int32(smallSize),
  5936  		},
  5937  	}
  5938  	sreq := &testpb.StreamingOutputCallRequest{
  5939  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  5940  		ResponseParameters: respParam,
  5941  		Payload:            extraLargePayload,
  5942  	}
  5943  	// Test recv case: server receives a message larger than maxServerReceiveMsgSize.
  5944  	stream, err := tc.FullDuplexCall(te.ctx)
  5945  	if err != nil {
  5946  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  5947  	}
  5948  	if err = stream.Send(sreq); err != nil {
  5949  		t.Fatalf("%v.Send() = _, %v, want <nil>", stream, err)
  5950  	}
  5951  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  5952  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  5953  	}
  5954  	// Test send case: server sends a message larger than maxServerSendMsgSize.
  5955  	sreq.Payload = smallPayload
  5956  	respParam[0].Size = int32(extraLargeSize)
  5957  
  5958  	stream, err = tc.FullDuplexCall(te.ctx)
  5959  	if err != nil {
  5960  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  5961  	}
  5962  	if err = stream.Send(sreq); err != nil {
  5963  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  5964  	}
  5965  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  5966  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  5967  	}
  5968  }
  5969  
  5970  // The following functions with function name ending with TD indicates that they
  5971  // should be deleted after old service config API is deprecated and deleted.
  5972  func testServiceConfigSetupTD(t *testing.T, e env) (*test, chan grpc.ServiceConfig) {
  5973  	te := newTest(t, e)
  5974  	// We write before read.
  5975  	ch := make(chan grpc.ServiceConfig, 1)
  5976  	te.sc = ch
  5977  	te.userAgent = testAppUA
  5978  	te.declareLogNoise(
  5979  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  5980  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  5981  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  5982  		"Failed to dial : context canceled; please retry.",
  5983  	)
  5984  	return te, ch
  5985  }
  5986  
  5987  func (s) TestServiceConfigGetMethodConfigTD(t *testing.T) {
  5988  	for _, e := range listTestEnv() {
  5989  		testGetMethodConfigTD(t, e)
  5990  	}
  5991  }
  5992  
  5993  func testGetMethodConfigTD(t *testing.T, e env) {
  5994  	te, ch := testServiceConfigSetupTD(t, e)
  5995  	defer te.tearDown()
  5996  
  5997  	mc1 := grpc.MethodConfig{
  5998  		WaitForReady: newBool(true),
  5999  		Timeout:      newDuration(time.Millisecond),
  6000  	}
  6001  	mc2 := grpc.MethodConfig{WaitForReady: newBool(false)}
  6002  	m := make(map[string]grpc.MethodConfig)
  6003  	m["/grpc.testing.TestService/EmptyCall"] = mc1
  6004  	m["/grpc.testing.TestService/"] = mc2
  6005  	sc := grpc.ServiceConfig{
  6006  		Methods: m,
  6007  	}
  6008  	ch <- sc
  6009  
  6010  	cc := te.clientConn()
  6011  	tc := testpb.NewTestServiceClient(cc)
  6012  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6013  	defer cancel()
  6014  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  6015  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  6016  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  6017  	}
  6018  
  6019  	m = make(map[string]grpc.MethodConfig)
  6020  	m["/grpc.testing.TestService/UnaryCall"] = mc1
  6021  	m["/grpc.testing.TestService/"] = mc2
  6022  	sc = grpc.ServiceConfig{
  6023  		Methods: m,
  6024  	}
  6025  	ch <- sc
  6026  	// Wait for the new service config to propagate.
  6027  	for {
  6028  		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  6029  			break
  6030  		}
  6031  	}
  6032  	// The following RPCs are expected to become fail-fast.
  6033  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
  6034  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable)
  6035  	}
  6036  }
  6037  
  6038  func (s) TestServiceConfigWaitForReadyTD(t *testing.T) {
  6039  	for _, e := range listTestEnv() {
  6040  		testServiceConfigWaitForReadyTD(t, e)
  6041  	}
  6042  }
  6043  
  6044  func testServiceConfigWaitForReadyTD(t *testing.T, e env) {
  6045  	te, ch := testServiceConfigSetupTD(t, e)
  6046  	defer te.tearDown()
  6047  
  6048  	// Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds.
  6049  	mc := grpc.MethodConfig{
  6050  		WaitForReady: newBool(false),
  6051  		Timeout:      newDuration(time.Millisecond),
  6052  	}
  6053  	m := make(map[string]grpc.MethodConfig)
  6054  	m["/grpc.testing.TestService/EmptyCall"] = mc
  6055  	m["/grpc.testing.TestService/FullDuplexCall"] = mc
  6056  	sc := grpc.ServiceConfig{
  6057  		Methods: m,
  6058  	}
  6059  	ch <- sc
  6060  
  6061  	cc := te.clientConn()
  6062  	tc := testpb.NewTestServiceClient(cc)
  6063  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6064  	defer cancel()
  6065  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  6066  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  6067  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  6068  	}
  6069  	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  6070  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  6071  	}
  6072  
  6073  	// Generate a service config update.
  6074  	// Case2: Client API does not set failfast, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds.
  6075  	mc.WaitForReady = newBool(true)
  6076  	m = make(map[string]grpc.MethodConfig)
  6077  	m["/grpc.testing.TestService/EmptyCall"] = mc
  6078  	m["/grpc.testing.TestService/FullDuplexCall"] = mc
  6079  	sc = grpc.ServiceConfig{
  6080  		Methods: m,
  6081  	}
  6082  	ch <- sc
  6083  
  6084  	// Wait for the new service config to take effect.
  6085  	mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall")
  6086  	for {
  6087  		if !*mc.WaitForReady {
  6088  			time.Sleep(100 * time.Millisecond)
  6089  			mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall")
  6090  			continue
  6091  		}
  6092  		break
  6093  	}
  6094  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  6095  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  6096  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  6097  	}
  6098  	if _, err := tc.FullDuplexCall(ctx); status.Code(err) != codes.DeadlineExceeded {
  6099  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  6100  	}
  6101  }
  6102  
  6103  func (s) TestServiceConfigTimeoutTD(t *testing.T) {
  6104  	for _, e := range listTestEnv() {
  6105  		testServiceConfigTimeoutTD(t, e)
  6106  	}
  6107  }
  6108  
  6109  func testServiceConfigTimeoutTD(t *testing.T, e env) {
  6110  	te, ch := testServiceConfigSetupTD(t, e)
  6111  	defer te.tearDown()
  6112  
  6113  	// Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
  6114  	mc := grpc.MethodConfig{
  6115  		Timeout: newDuration(time.Hour),
  6116  	}
  6117  	m := make(map[string]grpc.MethodConfig)
  6118  	m["/grpc.testing.TestService/EmptyCall"] = mc
  6119  	m["/grpc.testing.TestService/FullDuplexCall"] = mc
  6120  	sc := grpc.ServiceConfig{
  6121  		Methods: m,
  6122  	}
  6123  	ch <- sc
  6124  
  6125  	cc := te.clientConn()
  6126  	tc := testpb.NewTestServiceClient(cc)
  6127  	// The following RPCs are expected to become non-fail-fast ones with 1ns deadline.
  6128  	ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
  6129  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  6130  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  6131  	}
  6132  	cancel()
  6133  	ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond)
  6134  	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  6135  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  6136  	}
  6137  	cancel()
  6138  
  6139  	// Generate a service config update.
  6140  	// Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
  6141  	mc.Timeout = newDuration(time.Nanosecond)
  6142  	m = make(map[string]grpc.MethodConfig)
  6143  	m["/grpc.testing.TestService/EmptyCall"] = mc
  6144  	m["/grpc.testing.TestService/FullDuplexCall"] = mc
  6145  	sc = grpc.ServiceConfig{
  6146  		Methods: m,
  6147  	}
  6148  	ch <- sc
  6149  
  6150  	// Wait for the new service config to take effect.
  6151  	mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall")
  6152  	for {
  6153  		if *mc.Timeout != time.Nanosecond {
  6154  			time.Sleep(100 * time.Millisecond)
  6155  			mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall")
  6156  			continue
  6157  		}
  6158  		break
  6159  	}
  6160  
  6161  	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
  6162  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  6163  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  6164  	}
  6165  	cancel()
  6166  
  6167  	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
  6168  	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  6169  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  6170  	}
  6171  	cancel()
  6172  }
  6173  
  6174  func (s) TestServiceConfigMaxMsgSizeTD(t *testing.T) {
  6175  	for _, e := range listTestEnv() {
  6176  		testServiceConfigMaxMsgSizeTD(t, e)
  6177  	}
  6178  }
  6179  
  6180  func testServiceConfigMaxMsgSizeTD(t *testing.T, e env) {
  6181  	// Setting up values and objects shared across all test cases.
  6182  	const smallSize = 1
  6183  	const largeSize = 1024
  6184  	const extraLargeSize = 2048
  6185  
  6186  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  6187  	if err != nil {
  6188  		t.Fatal(err)
  6189  	}
  6190  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  6191  	if err != nil {
  6192  		t.Fatal(err)
  6193  	}
  6194  	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
  6195  	if err != nil {
  6196  		t.Fatal(err)
  6197  	}
  6198  
  6199  	mc := grpc.MethodConfig{
  6200  		MaxReqSize:  newInt(extraLargeSize),
  6201  		MaxRespSize: newInt(extraLargeSize),
  6202  	}
  6203  
  6204  	m := make(map[string]grpc.MethodConfig)
  6205  	m["/grpc.testing.TestService/UnaryCall"] = mc
  6206  	m["/grpc.testing.TestService/FullDuplexCall"] = mc
  6207  	sc := grpc.ServiceConfig{
  6208  		Methods: m,
  6209  	}
  6210  	// Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  6211  	te1, ch1 := testServiceConfigSetupTD(t, e)
  6212  	te1.startServer(&testServer{security: e.security})
  6213  	defer te1.tearDown()
  6214  
  6215  	ch1 <- sc
  6216  	tc := testpb.NewTestServiceClient(te1.clientConn())
  6217  
  6218  	req := &testpb.SimpleRequest{
  6219  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  6220  		ResponseSize: int32(extraLargeSize),
  6221  		Payload:      smallPayload,
  6222  	}
  6223  
  6224  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6225  	defer cancel()
  6226  	// Test for unary RPC recv.
  6227  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  6228  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  6229  	}
  6230  
  6231  	// Test for unary RPC send.
  6232  	req.Payload = extraLargePayload
  6233  	req.ResponseSize = int32(smallSize)
  6234  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  6235  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  6236  	}
  6237  
  6238  	// Test for streaming RPC recv.
  6239  	respParam := []*testpb.ResponseParameters{
  6240  		{
  6241  			Size: int32(extraLargeSize),
  6242  		},
  6243  	}
  6244  	sreq := &testpb.StreamingOutputCallRequest{
  6245  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  6246  		ResponseParameters: respParam,
  6247  		Payload:            smallPayload,
  6248  	}
  6249  	stream, err := tc.FullDuplexCall(te1.ctx)
  6250  	if err != nil {
  6251  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6252  	}
  6253  	if err := stream.Send(sreq); err != nil {
  6254  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  6255  	}
  6256  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  6257  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  6258  	}
  6259  
  6260  	// Test for streaming RPC send.
  6261  	respParam[0].Size = int32(smallSize)
  6262  	sreq.Payload = extraLargePayload
  6263  	stream, err = tc.FullDuplexCall(te1.ctx)
  6264  	if err != nil {
  6265  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6266  	}
  6267  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  6268  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  6269  	}
  6270  
  6271  	// Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  6272  	te2, ch2 := testServiceConfigSetupTD(t, e)
  6273  	te2.maxClientReceiveMsgSize = newInt(1024)
  6274  	te2.maxClientSendMsgSize = newInt(1024)
  6275  	te2.startServer(&testServer{security: e.security})
  6276  	defer te2.tearDown()
  6277  	ch2 <- sc
  6278  	tc = testpb.NewTestServiceClient(te2.clientConn())
  6279  
  6280  	// Test for unary RPC recv.
  6281  	req.Payload = smallPayload
  6282  	req.ResponseSize = int32(largeSize)
  6283  
  6284  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  6285  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  6286  	}
  6287  
  6288  	// Test for unary RPC send.
  6289  	req.Payload = largePayload
  6290  	req.ResponseSize = int32(smallSize)
  6291  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  6292  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  6293  	}
  6294  
  6295  	// Test for streaming RPC recv.
  6296  	stream, err = tc.FullDuplexCall(te2.ctx)
  6297  	respParam[0].Size = int32(largeSize)
  6298  	sreq.Payload = smallPayload
  6299  	if err != nil {
  6300  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6301  	}
  6302  	if err := stream.Send(sreq); err != nil {
  6303  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  6304  	}
  6305  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  6306  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  6307  	}
  6308  
  6309  	// Test for streaming RPC send.
  6310  	respParam[0].Size = int32(smallSize)
  6311  	sreq.Payload = largePayload
  6312  	stream, err = tc.FullDuplexCall(te2.ctx)
  6313  	if err != nil {
  6314  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6315  	}
  6316  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  6317  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  6318  	}
  6319  
  6320  	// Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  6321  	te3, ch3 := testServiceConfigSetupTD(t, e)
  6322  	te3.maxClientReceiveMsgSize = newInt(4096)
  6323  	te3.maxClientSendMsgSize = newInt(4096)
  6324  	te3.startServer(&testServer{security: e.security})
  6325  	defer te3.tearDown()
  6326  	ch3 <- sc
  6327  	tc = testpb.NewTestServiceClient(te3.clientConn())
  6328  
  6329  	// Test for unary RPC recv.
  6330  	req.Payload = smallPayload
  6331  	req.ResponseSize = int32(largeSize)
  6332  
  6333  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  6334  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
  6335  	}
  6336  
  6337  	req.ResponseSize = int32(extraLargeSize)
  6338  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  6339  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  6340  	}
  6341  
  6342  	// Test for unary RPC send.
  6343  	req.Payload = largePayload
  6344  	req.ResponseSize = int32(smallSize)
  6345  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  6346  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
  6347  	}
  6348  
  6349  	req.Payload = extraLargePayload
  6350  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  6351  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  6352  	}
  6353  
  6354  	// Test for streaming RPC recv.
  6355  	stream, err = tc.FullDuplexCall(te3.ctx)
  6356  	if err != nil {
  6357  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6358  	}
  6359  	respParam[0].Size = int32(largeSize)
  6360  	sreq.Payload = smallPayload
  6361  
  6362  	if err := stream.Send(sreq); err != nil {
  6363  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  6364  	}
  6365  	if _, err := stream.Recv(); err != nil {
  6366  		t.Fatalf("%v.Recv() = _, %v, want <nil>", stream, err)
  6367  	}
  6368  
  6369  	respParam[0].Size = int32(extraLargeSize)
  6370  
  6371  	if err := stream.Send(sreq); err != nil {
  6372  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  6373  	}
  6374  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  6375  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  6376  	}
  6377  
  6378  	// Test for streaming RPC send.
  6379  	respParam[0].Size = int32(smallSize)
  6380  	sreq.Payload = largePayload
  6381  	stream, err = tc.FullDuplexCall(te3.ctx)
  6382  	if err != nil {
  6383  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6384  	}
  6385  	if err := stream.Send(sreq); err != nil {
  6386  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  6387  	}
  6388  	sreq.Payload = extraLargePayload
  6389  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  6390  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  6391  	}
  6392  }
  6393  
  6394  // TestMalformedStreamMethod starts a test server and sends an RPC with a
  6395  // malformed method name. The server should respond with an UNIMPLEMENTED status
  6396  // code in this case.
  6397  func (s) TestMalformedStreamMethod(t *testing.T) {
  6398  	const testMethod = "a-method-name-without-any-slashes"
  6399  	te := newTest(t, tcpClearRREnv)
  6400  	te.startServer(nil)
  6401  	defer te.tearDown()
  6402  
  6403  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6404  	defer cancel()
  6405  	err := te.clientConn().Invoke(ctx, testMethod, nil, nil)
  6406  	if gotCode := status.Code(err); gotCode != codes.Unimplemented {
  6407  		t.Fatalf("Invoke with method %q, got code %s, want %s", testMethod, gotCode, codes.Unimplemented)
  6408  	}
  6409  }
  6410  
  6411  func (s) TestMethodFromServerStream(t *testing.T) {
  6412  	const testMethod = "/package.service/method"
  6413  	e := tcpClearRREnv
  6414  	te := newTest(t, e)
  6415  	var method string
  6416  	var ok bool
  6417  	te.unknownHandler = func(srv interface{}, stream grpc.ServerStream) error {
  6418  		method, ok = grpc.MethodFromServerStream(stream)
  6419  		return nil
  6420  	}
  6421  
  6422  	te.startServer(nil)
  6423  	defer te.tearDown()
  6424  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6425  	defer cancel()
  6426  	_ = te.clientConn().Invoke(ctx, testMethod, nil, nil)
  6427  	if !ok || method != testMethod {
  6428  		t.Fatalf("Invoke with method %q, got %q, %v, want %q, true", testMethod, method, ok, testMethod)
  6429  	}
  6430  }
  6431  
  6432  func (s) TestInterceptorCanAccessCallOptions(t *testing.T) {
  6433  	e := tcpClearRREnv
  6434  	te := newTest(t, e)
  6435  	te.startServer(&testServer{security: e.security})
  6436  	defer te.tearDown()
  6437  
  6438  	type observedOptions struct {
  6439  		headers     []*metadata.MD
  6440  		trailers    []*metadata.MD
  6441  		peer        []*peer.Peer
  6442  		creds       []credentials.PerRPCCredentials
  6443  		failFast    []bool
  6444  		maxRecvSize []int
  6445  		maxSendSize []int
  6446  		compressor  []string
  6447  		subtype     []string
  6448  	}
  6449  	var observedOpts observedOptions
  6450  	populateOpts := func(opts []grpc.CallOption) {
  6451  		for _, o := range opts {
  6452  			switch o := o.(type) {
  6453  			case grpc.HeaderCallOption:
  6454  				observedOpts.headers = append(observedOpts.headers, o.HeaderAddr)
  6455  			case grpc.TrailerCallOption:
  6456  				observedOpts.trailers = append(observedOpts.trailers, o.TrailerAddr)
  6457  			case grpc.PeerCallOption:
  6458  				observedOpts.peer = append(observedOpts.peer, o.PeerAddr)
  6459  			case grpc.PerRPCCredsCallOption:
  6460  				observedOpts.creds = append(observedOpts.creds, o.Creds)
  6461  			case grpc.FailFastCallOption:
  6462  				observedOpts.failFast = append(observedOpts.failFast, o.FailFast)
  6463  			case grpc.MaxRecvMsgSizeCallOption:
  6464  				observedOpts.maxRecvSize = append(observedOpts.maxRecvSize, o.MaxRecvMsgSize)
  6465  			case grpc.MaxSendMsgSizeCallOption:
  6466  				observedOpts.maxSendSize = append(observedOpts.maxSendSize, o.MaxSendMsgSize)
  6467  			case grpc.CompressorCallOption:
  6468  				observedOpts.compressor = append(observedOpts.compressor, o.CompressorType)
  6469  			case grpc.ContentSubtypeCallOption:
  6470  				observedOpts.subtype = append(observedOpts.subtype, o.ContentSubtype)
  6471  			}
  6472  		}
  6473  	}
  6474  
  6475  	te.unaryClientInt = func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
  6476  		populateOpts(opts)
  6477  		return nil
  6478  	}
  6479  	te.streamClientInt = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
  6480  		populateOpts(opts)
  6481  		return nil, nil
  6482  	}
  6483  
  6484  	defaults := []grpc.CallOption{
  6485  		grpc.WaitForReady(true),
  6486  		grpc.MaxCallRecvMsgSize(1010),
  6487  	}
  6488  	tc := testpb.NewTestServiceClient(te.clientConn(grpc.WithDefaultCallOptions(defaults...)))
  6489  
  6490  	var headers metadata.MD
  6491  	var trailers metadata.MD
  6492  	var pr peer.Peer
  6493  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6494  	defer cancel()
  6495  	_, _ = tc.UnaryCall(ctx, &testpb.SimpleRequest{},
  6496  		grpc.MaxCallRecvMsgSize(100),
  6497  		grpc.MaxCallSendMsgSize(200),
  6498  		grpc.PerRPCCredentials(testPerRPCCredentials{}),
  6499  		grpc.Header(&headers),
  6500  		grpc.Trailer(&trailers),
  6501  		grpc.Peer(&pr))
  6502  	expected := observedOptions{
  6503  		failFast:    []bool{false},
  6504  		maxRecvSize: []int{1010, 100},
  6505  		maxSendSize: []int{200},
  6506  		creds:       []credentials.PerRPCCredentials{testPerRPCCredentials{}},
  6507  		headers:     []*metadata.MD{&headers},
  6508  		trailers:    []*metadata.MD{&trailers},
  6509  		peer:        []*peer.Peer{&pr},
  6510  	}
  6511  
  6512  	if !reflect.DeepEqual(expected, observedOpts) {
  6513  		t.Errorf("unary call did not observe expected options: expected %#v, got %#v", expected, observedOpts)
  6514  	}
  6515  
  6516  	observedOpts = observedOptions{} // reset
  6517  
  6518  	_, _ = tc.StreamingInputCall(ctx,
  6519  		grpc.WaitForReady(false),
  6520  		grpc.MaxCallSendMsgSize(2020),
  6521  		grpc.UseCompressor("comp-type"),
  6522  		grpc.CallContentSubtype("json"))
  6523  	expected = observedOptions{
  6524  		failFast:    []bool{false, true},
  6525  		maxRecvSize: []int{1010},
  6526  		maxSendSize: []int{2020},
  6527  		compressor:  []string{"comp-type"},
  6528  		subtype:     []string{"json"},
  6529  	}
  6530  
  6531  	if !reflect.DeepEqual(expected, observedOpts) {
  6532  		t.Errorf("streaming call did not observe expected options: expected %#v, got %#v", expected, observedOpts)
  6533  	}
  6534  }
  6535  
  6536  func (s) TestCompressorRegister(t *testing.T) {
  6537  	for _, e := range listTestEnv() {
  6538  		testCompressorRegister(t, e)
  6539  	}
  6540  }
  6541  
  6542  func testCompressorRegister(t *testing.T, e env) {
  6543  	te := newTest(t, e)
  6544  	te.clientCompression = false
  6545  	te.serverCompression = false
  6546  	te.clientUseCompression = true
  6547  
  6548  	te.startServer(&testServer{security: e.security})
  6549  	defer te.tearDown()
  6550  	tc := testpb.NewTestServiceClient(te.clientConn())
  6551  
  6552  	// Unary call
  6553  	const argSize = 271828
  6554  	const respSize = 314159
  6555  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  6556  	if err != nil {
  6557  		t.Fatal(err)
  6558  	}
  6559  	req := &testpb.SimpleRequest{
  6560  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  6561  		ResponseSize: respSize,
  6562  		Payload:      payload,
  6563  	}
  6564  	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
  6565  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  6566  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
  6567  	}
  6568  	// Streaming RPC
  6569  	ctx, cancel := context.WithCancel(context.Background())
  6570  	defer cancel()
  6571  	stream, err := tc.FullDuplexCall(ctx)
  6572  	if err != nil {
  6573  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6574  	}
  6575  	respParam := []*testpb.ResponseParameters{
  6576  		{
  6577  			Size: 31415,
  6578  		},
  6579  	}
  6580  	payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415))
  6581  	if err != nil {
  6582  		t.Fatal(err)
  6583  	}
  6584  	sreq := &testpb.StreamingOutputCallRequest{
  6585  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  6586  		ResponseParameters: respParam,
  6587  		Payload:            payload,
  6588  	}
  6589  	if err := stream.Send(sreq); err != nil {
  6590  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  6591  	}
  6592  	if _, err := stream.Recv(); err != nil {
  6593  		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  6594  	}
  6595  }
  6596  
  6597  func (s) TestServeExitsWhenListenerClosed(t *testing.T) {
  6598  	ss := &stubserver.StubServer{
  6599  		EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) {
  6600  			return &testpb.Empty{}, nil
  6601  		},
  6602  	}
  6603  
  6604  	s := grpc.NewServer()
  6605  	defer s.Stop()
  6606  	testpb.RegisterTestServiceServer(s, ss)
  6607  
  6608  	lis, err := net.Listen("tcp", "localhost:0")
  6609  	if err != nil {
  6610  		t.Fatalf("Failed to create listener: %v", err)
  6611  	}
  6612  
  6613  	done := make(chan struct{})
  6614  	go func() {
  6615  		_ = s.Serve(lis)
  6616  		close(done)
  6617  	}()
  6618  
  6619  	// grpc.WithInsecure is deprecated, use WithTransportCredentials and insecure.NewCredentials() instead.
  6620  	//cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
  6621  	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
  6622  	if err != nil {
  6623  		t.Fatalf("Failed to dial server: %v", err)
  6624  	}
  6625  	defer func(cc *grpc.ClientConn) {
  6626  		_ = cc.Close()
  6627  	}(cc)
  6628  	c := testpb.NewTestServiceClient(cc)
  6629  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  6630  	defer cancel()
  6631  	if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  6632  		t.Fatalf("Failed to send test RPC to server: %v", err)
  6633  	}
  6634  
  6635  	if err := lis.Close(); err != nil {
  6636  		t.Fatalf("Failed to close listener: %v", err)
  6637  	}
  6638  	const timeout = 5 * time.Second
  6639  	timer := time.NewTimer(timeout)
  6640  	select {
  6641  	case <-done:
  6642  		return
  6643  	case <-timer.C:
  6644  		t.Fatalf("Serve did not return after %v", timeout)
  6645  	}
  6646  }
  6647  
  6648  // Service handler returns status with invalid utf8 message.
  6649  func (s) TestStatusInvalidUTF8Message(t *testing.T) {
  6650  	var (
  6651  		origMsg = string([]byte{0xff, 0xfe, 0xfd})
  6652  		wantMsg = "���"
  6653  	)
  6654  
  6655  	ss := &stubserver.StubServer{
  6656  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  6657  			return nil, status.Errorf(codes.Internal, origMsg)
  6658  		},
  6659  	}
  6660  	if err := ss.Start(nil); err != nil {
  6661  		t.Fatalf("Error starting endpoint server: %v", err)
  6662  	}
  6663  	defer ss.Stop()
  6664  
  6665  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  6666  	defer cancel()
  6667  
  6668  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Message() != wantMsg {
  6669  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, status.Convert(err).Message(), wantMsg)
  6670  	}
  6671  }
  6672  
  6673  // Service handler returns status with details and invalid utf8 message. Proto
  6674  // will fail to marshal the status because of the invalid utf8 message. Details
  6675  // will be dropped when sending.
  6676  func (s) TestStatusInvalidUTF8Details(t *testing.T) {
  6677  	grpctest.TLogger.ExpectError("transport: failed to marshal rpc status")
  6678  
  6679  	var (
  6680  		origMsg = string([]byte{0xff, 0xfe, 0xfd})
  6681  		wantMsg = "���"
  6682  	)
  6683  
  6684  	ss := &stubserver.StubServer{
  6685  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  6686  			st := status.New(codes.Internal, origMsg)
  6687  			st, err := st.WithDetails(&testpb.Empty{})
  6688  			if err != nil {
  6689  				return nil, err
  6690  			}
  6691  			return nil, st.Err()
  6692  		},
  6693  	}
  6694  	if err := ss.Start(nil); err != nil {
  6695  		t.Fatalf("Error starting endpoint server: %v", err)
  6696  	}
  6697  	defer ss.Stop()
  6698  
  6699  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  6700  	defer cancel()
  6701  
  6702  	_, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
  6703  	st := status.Convert(err)
  6704  	if st.Message() != wantMsg {
  6705  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, st.Message(), wantMsg)
  6706  	}
  6707  	if len(st.Details()) != 0 {
  6708  		// Details should be dropped on the server side.
  6709  		t.Fatalf("RPC status contain details: %v, want no details", st.Details())
  6710  	}
  6711  }
  6712  
  6713  func (s) TestClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T) {
  6714  	for _, e := range listTestEnv() {
  6715  		if e.httpHandler {
  6716  			continue
  6717  		}
  6718  		testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t, e)
  6719  	}
  6720  }
  6721  
  6722  func testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T, e env) {
  6723  	te := newTest(t, e)
  6724  	te.userAgent = testAppUA
  6725  	smallSize := 1024
  6726  	te.maxServerReceiveMsgSize = &smallSize
  6727  	te.startServer(&testServer{security: e.security})
  6728  	defer te.tearDown()
  6729  	tc := testpb.NewTestServiceClient(te.clientConn())
  6730  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1048576)
  6731  	if err != nil {
  6732  		t.Fatal(err)
  6733  	}
  6734  	req := &testpb.SimpleRequest{
  6735  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  6736  		Payload:      payload,
  6737  	}
  6738  	var wg sync.WaitGroup
  6739  	for i := 0; i < 10; i++ {
  6740  		wg.Add(1)
  6741  		go func() {
  6742  			defer wg.Done()
  6743  			for j := 0; j < 100; j++ {
  6744  				ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
  6745  				//goland:noinspection GoDeferInLoop
  6746  				defer cancel()
  6747  				if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.ResourceExhausted {
  6748  					t.Errorf("TestService/UnaryCall(_,_) = _. %v, want code: %s", err, codes.ResourceExhausted)
  6749  					return
  6750  				}
  6751  			}
  6752  		}()
  6753  	}
  6754  	wg.Wait()
  6755  }
  6756  
  6757  func (s) TestRPCTimeout(t *testing.T) {
  6758  	for _, e := range listTestEnv() {
  6759  		testRPCTimeout(t, e)
  6760  	}
  6761  }
  6762  
  6763  func testRPCTimeout(t *testing.T, e env) {
  6764  	te := newTest(t, e)
  6765  	te.startServer(&testServer{security: e.security, unaryCallSleepTime: 500 * time.Millisecond})
  6766  	defer te.tearDown()
  6767  
  6768  	cc := te.clientConn()
  6769  	tc := testpb.NewTestServiceClient(cc)
  6770  
  6771  	const argSize = 2718
  6772  	const respSize = 314
  6773  
  6774  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  6775  	if err != nil {
  6776  		t.Fatal(err)
  6777  	}
  6778  
  6779  	req := &testpb.SimpleRequest{
  6780  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  6781  		ResponseSize: respSize,
  6782  		Payload:      payload,
  6783  	}
  6784  	for i := -1; i <= 10; i++ {
  6785  		ctx, cancel := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond)
  6786  		if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.DeadlineExceeded {
  6787  			t.Fatalf("TestService/UnaryCallv(_, _) = _, %v; want <nil>, error code: %s", err, codes.DeadlineExceeded)
  6788  		}
  6789  		cancel()
  6790  	}
  6791  }
  6792  
  6793  func (s) TestDisabledIOBuffers(t *testing.T) {
  6794  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(60000))
  6795  	if err != nil {
  6796  		t.Fatalf("Failed to create payload: %v", err)
  6797  	}
  6798  	req := &testpb.StreamingOutputCallRequest{
  6799  		Payload: payload,
  6800  	}
  6801  	resp := &testpb.StreamingOutputCallResponse{
  6802  		Payload: payload,
  6803  	}
  6804  
  6805  	ss := &stubserver.StubServer{
  6806  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  6807  			for {
  6808  				in, err := stream.Recv()
  6809  				if err == io.EOF {
  6810  					return nil
  6811  				}
  6812  				if err != nil {
  6813  					t.Errorf("stream.Recv() = _, %v, want _, <nil>", err)
  6814  					return err
  6815  				}
  6816  				if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
  6817  					t.Errorf("Received message(len: %v) on server not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
  6818  					return err
  6819  				}
  6820  				if err := stream.Send(resp); err != nil {
  6821  					t.Errorf("stream.Send(_)= %v, want <nil>", err)
  6822  					return err
  6823  				}
  6824  
  6825  			}
  6826  		},
  6827  	}
  6828  
  6829  	s := grpc.NewServer(grpc.WriteBufferSize(0), grpc.ReadBufferSize(0))
  6830  	testpb.RegisterTestServiceServer(s, ss)
  6831  
  6832  	lis, err := net.Listen("tcp", "localhost:0")
  6833  	if err != nil {
  6834  		t.Fatalf("Failed to create listener: %v", err)
  6835  	}
  6836  
  6837  	go func() {
  6838  		_ = s.Serve(lis)
  6839  	}()
  6840  	defer s.Stop()
  6841  	dctx, dcancel := context.WithTimeout(context.Background(), 5*time.Second)
  6842  	defer dcancel()
  6843  	// grpc.WithInsecure is deprecated, use WithTransportCredentials and insecure.NewCredentials() instead.
  6844  	//cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithInsecure(), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0))
  6845  	cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0))
  6846  	if err != nil {
  6847  		t.Fatalf("Failed to dial server")
  6848  	}
  6849  	defer func(cc *grpc.ClientConn) {
  6850  		_ = cc.Close()
  6851  	}(cc)
  6852  	c := testpb.NewTestServiceClient(cc)
  6853  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  6854  	defer cancel()
  6855  	stream, err := c.FullDuplexCall(ctx, grpc.WaitForReady(true))
  6856  	if err != nil {
  6857  		t.Fatalf("Failed to send test RPC to server")
  6858  	}
  6859  	for i := 0; i < 10; i++ {
  6860  		if err := stream.Send(req); err != nil {
  6861  			t.Fatalf("stream.Send(_) = %v, want <nil>", err)
  6862  		}
  6863  		in, err := stream.Recv()
  6864  		if err != nil {
  6865  			t.Fatalf("stream.Recv() = _, %v, want _, <nil>", err)
  6866  		}
  6867  		if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
  6868  			t.Fatalf("Received message(len: %v) on client not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
  6869  		}
  6870  	}
  6871  	_ = stream.CloseSend()
  6872  	if _, err := stream.Recv(); err != io.EOF {
  6873  		t.Fatalf("stream.Recv() = _, %v, want _, io.EOF", err)
  6874  	}
  6875  }
  6876  
  6877  func (s) TestServerMaxHeaderListSizeClientUserViolation(t *testing.T) {
  6878  	for _, e := range listTestEnv() {
  6879  		if e.httpHandler {
  6880  			continue
  6881  		}
  6882  		testServerMaxHeaderListSizeClientUserViolation(t, e)
  6883  	}
  6884  }
  6885  
  6886  func testServerMaxHeaderListSizeClientUserViolation(t *testing.T, e env) {
  6887  	te := newTest(t, e)
  6888  	te.maxServerHeaderListSize = new(uint32)
  6889  	*te.maxServerHeaderListSize = 216
  6890  	te.startServer(&testServer{security: e.security})
  6891  	defer te.tearDown()
  6892  
  6893  	cc := te.clientConn()
  6894  	tc := testpb.NewTestServiceClient(cc)
  6895  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  6896  	defer cancel()
  6897  	metadata.AppendToOutgoingContext(ctx, "oversize", string(make([]byte, 216)))
  6898  	var err error
  6899  	if err = verifyResultWithDelay(func() (bool, error) {
  6900  		if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
  6901  			return true, nil
  6902  		}
  6903  		return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
  6904  	}); err != nil {
  6905  		t.Fatal(err)
  6906  	}
  6907  }
  6908  
  6909  func (s) TestClientMaxHeaderListSizeServerUserViolation(t *testing.T) {
  6910  	for _, e := range listTestEnv() {
  6911  		if e.httpHandler {
  6912  			continue
  6913  		}
  6914  		testClientMaxHeaderListSizeServerUserViolation(t, e)
  6915  	}
  6916  }
  6917  
  6918  func testClientMaxHeaderListSizeServerUserViolation(t *testing.T, e env) {
  6919  	te := newTest(t, e)
  6920  	te.maxClientHeaderListSize = new(uint32)
  6921  	*te.maxClientHeaderListSize = 1 // any header server sends will violate
  6922  	te.startServer(&testServer{security: e.security})
  6923  	defer te.tearDown()
  6924  
  6925  	cc := te.clientConn()
  6926  	tc := testpb.NewTestServiceClient(cc)
  6927  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  6928  	defer cancel()
  6929  	var err error
  6930  	if err = verifyResultWithDelay(func() (bool, error) {
  6931  		if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
  6932  			return true, nil
  6933  		}
  6934  		return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
  6935  	}); err != nil {
  6936  		t.Fatal(err)
  6937  	}
  6938  }
  6939  
  6940  func (s) TestServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T) {
  6941  	for _, e := range listTestEnv() {
  6942  		if e.httpHandler || e.security == "tls" {
  6943  			continue
  6944  		}
  6945  		testServerMaxHeaderListSizeClientIntentionalViolation(t, e)
  6946  	}
  6947  }
  6948  
  6949  func testServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T, e env) {
  6950  	te := newTest(t, e)
  6951  	te.maxServerHeaderListSize = new(uint32)
  6952  	*te.maxServerHeaderListSize = 512
  6953  	te.startServer(&testServer{security: e.security})
  6954  	defer te.tearDown()
  6955  
  6956  	cc, dw := te.clientConnWithConnControl()
  6957  	tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)}
  6958  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  6959  	defer cancel()
  6960  	stream, err := tc.FullDuplexCall(ctx)
  6961  	if err != nil {
  6962  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
  6963  	}
  6964  	rcw := dw.getRawConnWrapper()
  6965  	val := make([]string, 512)
  6966  	for i := range val {
  6967  		val[i] = "a"
  6968  	}
  6969  	// allow for client to send the initial header
  6970  	time.Sleep(100 * time.Millisecond)
  6971  	_ = rcw.writeHeaders(http2.HeadersFrameParam{
  6972  		StreamID:      tc.getCurrentStreamID(),
  6973  		BlockFragment: rcw.encodeHeader("oversize", strings.Join(val, "")),
  6974  		EndStream:     false,
  6975  		EndHeaders:    true,
  6976  	})
  6977  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
  6978  		t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
  6979  	}
  6980  }
  6981  
  6982  func (s) TestClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T) {
  6983  	for _, e := range listTestEnv() {
  6984  		if e.httpHandler || e.security == "tls" {
  6985  			continue
  6986  		}
  6987  		testClientMaxHeaderListSizeServerIntentionalViolation(t, e)
  6988  	}
  6989  }
  6990  
  6991  func testClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T, e env) {
  6992  	te := newTest(t, e)
  6993  	te.maxClientHeaderListSize = new(uint32)
  6994  	*te.maxClientHeaderListSize = 200
  6995  	lw := te.startServerWithConnControl(&testServer{security: e.security, setHeaderOnly: true})
  6996  	defer te.tearDown()
  6997  	cc, _ := te.clientConnWithConnControl()
  6998  	tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)}
  6999  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  7000  	defer cancel()
  7001  	stream, err := tc.FullDuplexCall(ctx)
  7002  	if err != nil {
  7003  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
  7004  	}
  7005  	var i int
  7006  	var rcw *rawConnWrapper
  7007  	for i = 0; i < 100; i++ {
  7008  		rcw = lw.getLastConn()
  7009  		if rcw != nil {
  7010  			break
  7011  		}
  7012  		time.Sleep(10 * time.Millisecond)
  7013  		continue
  7014  	}
  7015  	if i == 100 {
  7016  		t.Fatalf("failed to create server transport after 1s")
  7017  	}
  7018  
  7019  	val := make([]string, 200)
  7020  	for i := range val {
  7021  		val[i] = "a"
  7022  	}
  7023  	// allow for client to send the initial header.
  7024  	time.Sleep(100 * time.Millisecond)
  7025  	_ = rcw.writeHeaders(http2.HeadersFrameParam{
  7026  		StreamID:      tc.getCurrentStreamID(),
  7027  		BlockFragment: rcw.encodeRawHeader("oversize", strings.Join(val, "")),
  7028  		EndStream:     false,
  7029  		EndHeaders:    true,
  7030  	})
  7031  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
  7032  		t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
  7033  	}
  7034  }
  7035  
  7036  func (s) TestNetPipeConn(t *testing.T) {
  7037  	// This test will block indefinitely if grpc writes both client and server
  7038  	// prefaces without either reading from the Conn.
  7039  	pl := testutils.NewPipeListener()
  7040  	s := grpc.NewServer()
  7041  	defer s.Stop()
  7042  	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  7043  		return &testpb.SimpleResponse{}, nil
  7044  	}}
  7045  	testpb.RegisterTestServiceServer(s, ts)
  7046  	go func() {
  7047  		_ = s.Serve(pl)
  7048  	}()
  7049  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  7050  	defer cancel()
  7051  	// grpc.WithInsecure is deprecated, use WithTransportCredentials and insecure.NewCredentials() instead.
  7052  	// grpc.WithDialer is deprecated, use WithContextDialer instead.
  7053  	//cc, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithDialer(pl.Dialer()))
  7054  	cc, err := grpc.DialContext(ctx, "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
  7055  		return pl.Dialer()(addr, 0)
  7056  	}))
  7057  	if err != nil {
  7058  		t.Fatalf("Error creating client: %v", err)
  7059  	}
  7060  	defer func(cc *grpc.ClientConn) {
  7061  		_ = cc.Close()
  7062  	}(cc)
  7063  	client := testpb.NewTestServiceClient(cc)
  7064  	if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  7065  		t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
  7066  	}
  7067  }
  7068  
  7069  func (s) TestLargeTimeout(t *testing.T) {
  7070  	for _, e := range listTestEnv() {
  7071  		testLargeTimeout(t, e)
  7072  	}
  7073  }
  7074  
  7075  func testLargeTimeout(t *testing.T, e env) {
  7076  	te := newTest(t, e)
  7077  	te.declareLogNoise("Server.processUnaryRPC failed to write status")
  7078  
  7079  	ts := &funcServer{}
  7080  	te.startServer(ts)
  7081  	defer te.tearDown()
  7082  	tc := testpb.NewTestServiceClient(te.clientConn())
  7083  
  7084  	timeouts := []time.Duration{
  7085  		time.Duration(math.MaxInt64), // will be (correctly) converted to
  7086  		// 2562048 hours, which overflows upon converting back to an int64
  7087  		2562047 * time.Hour, // the largest timeout that does not overflow
  7088  	}
  7089  
  7090  	for i, maxTimeout := range timeouts {
  7091  		ts.unaryCall = func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  7092  			deadline, ok := ctx.Deadline()
  7093  			timeout := time.Until(deadline)
  7094  			minTimeout := maxTimeout - 5*time.Second
  7095  			if !ok || timeout < minTimeout || timeout > maxTimeout {
  7096  				t.Errorf("ctx.Deadline() = (now+%v), %v; want [%v, %v], true", timeout, ok, minTimeout, maxTimeout)
  7097  				return nil, status.Error(codes.OutOfRange, "deadline error")
  7098  			}
  7099  			return &testpb.SimpleResponse{}, nil
  7100  		}
  7101  
  7102  		ctx, cancel := context.WithTimeout(context.Background(), maxTimeout)
  7103  		//goland:noinspection GoDeferInLoop
  7104  		defer cancel()
  7105  
  7106  		if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  7107  			t.Errorf("case %v: UnaryCall(_) = _, %v; want _, nil", i, err)
  7108  		}
  7109  	}
  7110  }
  7111  
  7112  // Proxies typically send GO_AWAY followed by connection closure a minute or so later. This
  7113  // test ensures that the connection is re-created after GO_AWAY and not affected by the
  7114  // subsequent (old) connection closure.
  7115  func (s) TestGoAwayThenClose(t *testing.T) {
  7116  	ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
  7117  	defer cancel()
  7118  
  7119  	lis1, err := net.Listen("tcp", "localhost:0")
  7120  	if err != nil {
  7121  		t.Fatalf("Error while listening. Err: %v", err)
  7122  	}
  7123  	s1 := grpc.NewServer()
  7124  	defer s1.Stop()
  7125  	ts := &funcServer{
  7126  		unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  7127  			return &testpb.SimpleResponse{}, nil
  7128  		},
  7129  		fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
  7130  			if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil {
  7131  				t.Errorf("unexpected error from send: %v", err)
  7132  				return err
  7133  			}
  7134  			// Wait forever.
  7135  			_, err := stream.Recv()
  7136  			if err == nil {
  7137  				t.Error("expected to never receive any message")
  7138  			}
  7139  			return err
  7140  		},
  7141  	}
  7142  	testpb.RegisterTestServiceServer(s1, ts)
  7143  	go func() {
  7144  		_ = s1.Serve(lis1)
  7145  	}()
  7146  
  7147  	conn2Established := grpcsync.NewEvent()
  7148  	lis2, err := listenWithNotifyingListener("tcp", "localhost:0", conn2Established)
  7149  	if err != nil {
  7150  		t.Fatalf("Error while listening. Err: %v", err)
  7151  	}
  7152  	s2 := grpc.NewServer()
  7153  	defer s2.Stop()
  7154  	testpb.RegisterTestServiceServer(s2, ts)
  7155  
  7156  	r := manual.NewBuilderWithScheme("whatever")
  7157  	r.InitialState(resolver.State{Addresses: []resolver.Address{
  7158  		{Addr: lis1.Addr().String()},
  7159  		{Addr: lis2.Addr().String()},
  7160  	}})
  7161  	// grpc.WithInsecure is deprecated, use WithTransportCredentials and insecure.NewCredentials() instead.
  7162  	//cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithInsecure())
  7163  	cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials()))
  7164  	if err != nil {
  7165  		t.Fatalf("Error creating client: %v", err)
  7166  	}
  7167  	defer func(cc *grpc.ClientConn) {
  7168  		_ = cc.Close()
  7169  	}(cc)
  7170  
  7171  	client := testpb.NewTestServiceClient(cc)
  7172  
  7173  	// We make a streaming RPC and do an one-message-round-trip to make sure
  7174  	// it's created on connection 1.
  7175  	//
  7176  	// We use a long-lived RPC because it will cause GracefulStop to send
  7177  	// GO_AWAY, but the connection doesn't get closed until the server stops and
  7178  	// the client receives the error.
  7179  	stream, err := client.FullDuplexCall(ctx)
  7180  	if err != nil {
  7181  		t.Fatalf("FullDuplexCall(_) = _, %v; want _, nil", err)
  7182  	}
  7183  	if _, err = stream.Recv(); err != nil {
  7184  		t.Fatalf("unexpected error from first recv: %v", err)
  7185  	}
  7186  
  7187  	go func() {
  7188  		_ = s2.Serve(lis2)
  7189  	}()
  7190  
  7191  	// Send GO_AWAY to connection 1.
  7192  	go s1.GracefulStop()
  7193  
  7194  	// Wait for the ClientConn to enter IDLE state.
  7195  	state := cc.GetState()
  7196  	for ; state != connectivity.Idle && cc.WaitForStateChange(ctx, state); state = cc.GetState() {
  7197  	}
  7198  	if state != connectivity.Idle {
  7199  		t.Fatalf("timed out waiting for IDLE channel state; last state = %v", state)
  7200  	}
  7201  
  7202  	// Initiate another RPC to create another connection.
  7203  	if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  7204  		t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
  7205  	}
  7206  
  7207  	// Assert that connection 2 has been established.
  7208  	<-conn2Established.Done()
  7209  
  7210  	// Close the listener for server2 to prevent it from allowing new connections.
  7211  	_ = lis2.Close()
  7212  
  7213  	// Close connection 1.
  7214  	s1.Stop()
  7215  
  7216  	// Wait for client to close.
  7217  	if _, err = stream.Recv(); err == nil {
  7218  		t.Fatal("expected the stream to die, but got a successful Recv")
  7219  	}
  7220  
  7221  	// Do a bunch of RPCs, make sure it stays stable. These should go to connection 2.
  7222  	for i := 0; i < 10; i++ {
  7223  		if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  7224  			t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
  7225  		}
  7226  	}
  7227  }
  7228  
  7229  func listenWithNotifyingListener(network, address string, event *grpcsync.Event) (net.Listener, error) {
  7230  	lis, err := net.Listen(network, address)
  7231  	if err != nil {
  7232  		return nil, err
  7233  	}
  7234  	return notifyingListener{connEstablished: event, Listener: lis}, nil
  7235  }
  7236  
  7237  type notifyingListener struct {
  7238  	connEstablished *grpcsync.Event
  7239  	net.Listener
  7240  }
  7241  
  7242  func (lis notifyingListener) Accept() (net.Conn, error) {
  7243  	defer lis.connEstablished.Fire()
  7244  	return lis.Listener.Accept()
  7245  }
  7246  
  7247  func (s) TestRPCWaitsForResolver(t *testing.T) {
  7248  	te := testServiceConfigSetup(t, tcpClearRREnv)
  7249  	te.startServer(&testServer{security: tcpClearRREnv.security})
  7250  	defer te.tearDown()
  7251  	r := manual.NewBuilderWithScheme("whatever")
  7252  
  7253  	te.resolverScheme = r.Scheme()
  7254  	cc := te.clientConn(grpc.WithResolvers(r))
  7255  	tc := testpb.NewTestServiceClient(cc)
  7256  
  7257  	ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
  7258  	defer cancel()
  7259  	// With no resolved addresses yet, this will timeout.
  7260  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  7261  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  7262  	}
  7263  
  7264  	ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
  7265  	defer cancel()
  7266  	go func() {
  7267  		time.Sleep(time.Second)
  7268  		r.UpdateState(resolver.State{
  7269  			Addresses: []resolver.Address{{Addr: te.srvAddr}},
  7270  			ServiceConfig: parseCfg(r, `{
  7271  		    "methodConfig": [
  7272  		        {
  7273  		            "name": [
  7274  		                {
  7275  		                    "service": "grpc.testing.TestService",
  7276  		                    "method": "UnaryCall"
  7277  		                }
  7278  		            ],
  7279                      "maxRequestMessageBytes": 0
  7280  		        }
  7281  		    ]
  7282  		}`)})
  7283  	}()
  7284  	// We wait a second before providing a service config and resolving
  7285  	// addresses.  So this will wait for that and then honor the
  7286  	// maxRequestMessageBytes it contains.
  7287  	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{ResponseType: testpb.PayloadType_UNCOMPRESSABLE}); status.Code(err) != codes.ResourceExhausted {
  7288  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err)
  7289  	}
  7290  	if got := ctx.Err(); got != nil {
  7291  		t.Fatalf("ctx.Err() = %v; want nil (deadline should be set short by service config)", got)
  7292  	}
  7293  	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  7294  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err)
  7295  	}
  7296  }
  7297  
  7298  func (s) TestHTTPHeaderFrameErrorHandlingHTTPMode(t *testing.T) {
  7299  	// Non-gRPC content-type fallback path.
  7300  	for httpCode := range transport.HTTPStatusConvTab {
  7301  		doHTTPHeaderTest(t, transport.HTTPStatusConvTab[httpCode], []string{
  7302  			":status", fmt.Sprintf("%d", httpCode),
  7303  			"content-type", "text/html", // non-gRPC content type to switch to HTTP mode.
  7304  			"grpc-status", "1", // Make up a gRPC status error
  7305  			"grpc-status-details-bin", "???", // Make up a gRPC field parsing error
  7306  		})
  7307  	}
  7308  
  7309  	// Missing content-type fallback path.
  7310  	for httpCode := range transport.HTTPStatusConvTab {
  7311  		doHTTPHeaderTest(t, transport.HTTPStatusConvTab[httpCode], []string{
  7312  			":status", fmt.Sprintf("%d", httpCode),
  7313  			// Omitting content type to switch to HTTP mode.
  7314  			"grpc-status", "1", // Make up a gRPC status error
  7315  			"grpc-status-details-bin", "???", // Make up a gRPC field parsing error
  7316  		})
  7317  	}
  7318  
  7319  	// Malformed HTTP status when fallback.
  7320  	doHTTPHeaderTest(t, codes.Internal, []string{
  7321  		":status", "abc",
  7322  		// Omitting content type to switch to HTTP mode.
  7323  		"grpc-status", "1", // Make up a gRPC status error
  7324  		"grpc-status-details-bin", "???", // Make up a gRPC field parsing error
  7325  	})
  7326  }
  7327  
  7328  // Testing erroneous ResponseHeader or Trailers-only (delivered in the first HEADERS frame).
  7329  func (s) TestHTTPHeaderFrameErrorHandlingInitialHeader(t *testing.T) {
  7330  	for _, test := range []struct {
  7331  		header  []string
  7332  		errCode codes.Code
  7333  	}{
  7334  		{
  7335  			// missing gRPC status.
  7336  			header: []string{
  7337  				":status", "403",
  7338  				"content-type", "application/grpc",
  7339  			},
  7340  			errCode: codes.PermissionDenied,
  7341  		},
  7342  		{
  7343  			// malformed grpc-status.
  7344  			header: []string{
  7345  				":status", "502",
  7346  				"content-type", "application/grpc",
  7347  				"grpc-status", "abc",
  7348  			},
  7349  			errCode: codes.Internal,
  7350  		},
  7351  		{
  7352  			// Malformed grpc-tags-bin field.
  7353  			header: []string{
  7354  				":status", "502",
  7355  				"content-type", "application/grpc",
  7356  				"grpc-status", "0",
  7357  				"grpc-tags-bin", "???",
  7358  			},
  7359  			errCode: codes.Unavailable,
  7360  		},
  7361  		{
  7362  			// gRPC status error.
  7363  			header: []string{
  7364  				":status", "502",
  7365  				"content-type", "application/grpc",
  7366  				"grpc-status", "3",
  7367  			},
  7368  			errCode: codes.Unavailable,
  7369  		},
  7370  	} {
  7371  		doHTTPHeaderTest(t, test.errCode, test.header)
  7372  	}
  7373  }
  7374  
  7375  // Testing non-Trailers-only Trailers (delivered in second HEADERS frame)
  7376  func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) {
  7377  	for _, test := range []struct {
  7378  		responseHeader []string
  7379  		trailer        []string
  7380  		errCode        codes.Code
  7381  	}{
  7382  		{
  7383  			responseHeader: []string{
  7384  				":status", "200",
  7385  				"content-type", "application/grpc",
  7386  			},
  7387  			trailer: []string{
  7388  				// trailer missing grpc-status
  7389  				":status", "502",
  7390  			},
  7391  			errCode: codes.Unavailable,
  7392  		},
  7393  		{
  7394  			responseHeader: []string{
  7395  				":status", "404",
  7396  				"content-type", "application/grpc",
  7397  			},
  7398  			trailer: []string{
  7399  				// malformed grpc-status-details-bin field
  7400  				"grpc-status", "0",
  7401  				"grpc-status-details-bin", "????",
  7402  			},
  7403  			errCode: codes.Unimplemented,
  7404  		},
  7405  		{
  7406  			responseHeader: []string{
  7407  				":status", "200",
  7408  				"content-type", "application/grpc",
  7409  			},
  7410  			trailer: []string{
  7411  				// malformed grpc-status-details-bin field
  7412  				"grpc-status", "0",
  7413  				"grpc-status-details-bin", "????",
  7414  			},
  7415  			errCode: codes.Internal,
  7416  		},
  7417  	} {
  7418  		doHTTPHeaderTest(t, test.errCode, test.responseHeader, test.trailer)
  7419  	}
  7420  }
  7421  
  7422  func (s) TestHTTPHeaderFrameErrorHandlingMoreThanTwoHeaders(t *testing.T) {
  7423  	header := []string{
  7424  		":status", "200",
  7425  		"content-type", "application/grpc",
  7426  	}
  7427  	doHTTPHeaderTest(t, codes.Internal, header, header, header)
  7428  }
  7429  
  7430  type httpServerResponse struct {
  7431  	headers  [][]string
  7432  	payload  []byte
  7433  	trailers [][]string
  7434  }
  7435  
  7436  type httpServer struct {
  7437  	// If waitForEndStream is set, wait for the client to send a frame with end
  7438  	// stream in it before sending a response/refused stream.
  7439  	waitForEndStream bool
  7440  	refuseStream     func(uint32) bool
  7441  	responses        []httpServerResponse
  7442  }
  7443  
  7444  func (s *httpServer) writeHeader(framer *http2.Framer, sid uint32, headerFields []string, endStream bool) error {
  7445  	if len(headerFields)%2 == 1 {
  7446  		panic("odd number of kv args")
  7447  	}
  7448  
  7449  	var buf bytes.Buffer
  7450  	henc := hpack.NewEncoder(&buf)
  7451  	for len(headerFields) > 0 {
  7452  		k, v := headerFields[0], headerFields[1]
  7453  		headerFields = headerFields[2:]
  7454  		_ = henc.WriteField(hpack.HeaderField{Name: k, Value: v})
  7455  	}
  7456  
  7457  	return framer.WriteHeaders(http2.HeadersFrameParam{
  7458  		StreamID:      sid,
  7459  		BlockFragment: buf.Bytes(),
  7460  		EndStream:     endStream,
  7461  		EndHeaders:    true,
  7462  	})
  7463  }
  7464  
  7465  func (s *httpServer) writePayload(framer *http2.Framer, sid uint32, payload []byte) error {
  7466  	return framer.WriteData(sid, false, payload)
  7467  }
  7468  
  7469  func (s *httpServer) start(t *testing.T, lis net.Listener) {
  7470  	// Launch an HTTP server to send back header.
  7471  	go func() {
  7472  		conn, err := lis.Accept()
  7473  		if err != nil {
  7474  			t.Errorf("Error accepting connection: %v", err)
  7475  			return
  7476  		}
  7477  		defer func(conn net.Conn) {
  7478  			_ = conn.Close()
  7479  		}(conn)
  7480  		// Read preface sent by client.
  7481  		if _, err = io.ReadFull(conn, make([]byte, len(http2.ClientPreface))); err != nil {
  7482  			t.Errorf("Error at server-side while reading preface from client. Err: %v", err)
  7483  			return
  7484  		}
  7485  		reader := bufio.NewReader(conn)
  7486  		writer := bufio.NewWriter(conn)
  7487  		framer := http2.NewFramer(writer, reader)
  7488  		if err = framer.WriteSettingsAck(); err != nil {
  7489  			t.Errorf("Error at server-side while sending Settings ack. Err: %v", err)
  7490  			return
  7491  		}
  7492  		_ = writer.Flush() // necessary since client is expecting preface before declaring connection fully setup.
  7493  
  7494  		var sid uint32
  7495  		// Loop until conn is closed and framer returns io.EOF
  7496  		for requestNum := 0; ; requestNum = (requestNum + 1) % len(s.responses) {
  7497  			// Read frames until a header is received.
  7498  			for {
  7499  				frame, err := framer.ReadFrame()
  7500  				if err != nil {
  7501  					if err != io.EOF {
  7502  						t.Errorf("Error at server-side while reading frame. Err: %v", err)
  7503  					}
  7504  					return
  7505  				}
  7506  				sid = 0
  7507  				switch fr := frame.(type) {
  7508  				case *http2.HeadersFrame:
  7509  					// Respond after this if we are not waiting for an end
  7510  					// stream or if this frame ends it.
  7511  					if !s.waitForEndStream || fr.StreamEnded() {
  7512  						sid = fr.Header().StreamID
  7513  					}
  7514  
  7515  				case *http2.DataFrame:
  7516  					// Respond after this if we were waiting for an end stream
  7517  					// and this frame ends it.  (If we were not waiting for an
  7518  					// end stream, this stream was already responded to when
  7519  					// the headers were received.)
  7520  					if s.waitForEndStream && fr.StreamEnded() {
  7521  						sid = fr.Header().StreamID
  7522  					}
  7523  				}
  7524  				if sid != 0 {
  7525  					if s.refuseStream == nil || !s.refuseStream(sid) {
  7526  						break
  7527  					}
  7528  					_ = framer.WriteRSTStream(sid, http2.ErrCodeRefusedStream)
  7529  					_ = writer.Flush()
  7530  				}
  7531  			}
  7532  
  7533  			response := s.responses[requestNum]
  7534  			for _, header := range response.headers {
  7535  				if err = s.writeHeader(framer, sid, header, false); err != nil {
  7536  					t.Errorf("Error at server-side while writing headers. Err: %v", err)
  7537  					return
  7538  				}
  7539  				_ = writer.Flush()
  7540  			}
  7541  			if response.payload != nil {
  7542  				if err = s.writePayload(framer, sid, response.payload); err != nil {
  7543  					t.Errorf("Error at server-side while writing payload. Err: %v", err)
  7544  					return
  7545  				}
  7546  				_ = writer.Flush()
  7547  			}
  7548  			for i, trailer := range response.trailers {
  7549  				if err = s.writeHeader(framer, sid, trailer, i == len(response.trailers)-1); err != nil {
  7550  					t.Errorf("Error at server-side while writing trailers. Err: %v", err)
  7551  					return
  7552  				}
  7553  				_ = writer.Flush()
  7554  			}
  7555  		}
  7556  	}()
  7557  }
  7558  
  7559  func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string) {
  7560  	t.Helper()
  7561  	lis, err := net.Listen("tcp", "localhost:0")
  7562  	if err != nil {
  7563  		t.Fatalf("Failed to listen. Err: %v", err)
  7564  	}
  7565  	defer func(lis net.Listener) {
  7566  		_ = lis.Close()
  7567  	}(lis)
  7568  	server := &httpServer{
  7569  		responses: []httpServerResponse{{trailers: headerFields}},
  7570  	}
  7571  	server.start(t, lis)
  7572  	// grpc.WithInsecure is deprecated, use WithTransportCredentials and insecure.NewCredentials() instead.
  7573  	//cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
  7574  	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
  7575  	if err != nil {
  7576  		t.Fatalf("failed to dial due to err: %v", err)
  7577  	}
  7578  	defer func(cc *grpc.ClientConn) {
  7579  		_ = cc.Close()
  7580  	}(cc)
  7581  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  7582  	defer cancel()
  7583  	client := testpb.NewTestServiceClient(cc)
  7584  	stream, err := client.FullDuplexCall(ctx)
  7585  	if err != nil {
  7586  		t.Fatalf("error creating stream due to err: %v", err)
  7587  	}
  7588  	if _, err := stream.Recv(); err == nil || status.Code(err) != errCode {
  7589  		t.Fatalf("stream.Recv() = _, %v, want error code: %v", err, errCode)
  7590  	}
  7591  }
  7592  
  7593  func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult {
  7594  	g := r.CC.ParseServiceConfig(s)
  7595  	if g.Err != nil {
  7596  		panic(fmt.Sprintf("Error parsing config %q: %v", s, g.Err))
  7597  	}
  7598  	return g
  7599  }
  7600  
  7601  func (s) TestClientCancellationPropagatesUnary(t *testing.T) {
  7602  	wg := &sync.WaitGroup{}
  7603  	called, done := make(chan struct{}), make(chan struct{})
  7604  	ss := &stubserver.StubServer{
  7605  		EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) {
  7606  			close(called)
  7607  			<-ctx.Done()
  7608  			err := ctx.Err()
  7609  			if !errors.Is(err, context.Canceled) {
  7610  				t.Errorf("ctx.Err() = %v; want context.Canceled", err)
  7611  			}
  7612  			close(done)
  7613  			return nil, err
  7614  		},
  7615  	}
  7616  	if err := ss.Start(nil); err != nil {
  7617  		t.Fatalf("Error starting endpoint server: %v", err)
  7618  	}
  7619  	defer ss.Stop()
  7620  
  7621  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  7622  
  7623  	wg.Add(1)
  7624  	go func() {
  7625  		if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Canceled {
  7626  			t.Errorf("ss.Client.EmptyCall() = _, %v; want _, Code()=codes.Canceled", err)
  7627  		}
  7628  		wg.Done()
  7629  	}()
  7630  
  7631  	select {
  7632  	case <-called:
  7633  	case <-time.After(5 * time.Second):
  7634  		t.Fatalf("failed to perform EmptyCall after 10s")
  7635  	}
  7636  	cancel()
  7637  	select {
  7638  	case <-done:
  7639  	case <-time.After(5 * time.Second):
  7640  		t.Fatalf("server failed to close done chan due to cancellation propagation")
  7641  	}
  7642  	wg.Wait()
  7643  }
  7644  
  7645  type badGzipCompressor struct{}
  7646  
  7647  func (badGzipCompressor) Do(w io.Writer, p []byte) error {
  7648  	buf := &bytes.Buffer{}
  7649  	gzw := gzip.NewWriter(buf)
  7650  	if _, err := gzw.Write(p); err != nil {
  7651  		return err
  7652  	}
  7653  	err := gzw.Close()
  7654  	bs := buf.Bytes()
  7655  	if len(bs) >= 6 {
  7656  		bs[len(bs)-6] ^= 1 // modify checksum at end by 1 byte
  7657  	}
  7658  	_, _ = w.Write(bs)
  7659  	return err
  7660  }
  7661  
  7662  func (badGzipCompressor) Type() string {
  7663  	return "gzip"
  7664  }
  7665  
  7666  func (s) TestGzipBadChecksum(t *testing.T) {
  7667  	ss := &stubserver.StubServer{
  7668  		UnaryCallF: func(ctx context.Context, _ *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  7669  			return &testpb.SimpleResponse{}, nil
  7670  		},
  7671  	}
  7672  	if err := ss.Start(nil, grpc.WithCompressor(badGzipCompressor{})); err != nil {
  7673  		t.Fatalf("Error starting endpoint server: %v", err)
  7674  	}
  7675  	defer ss.Stop()
  7676  
  7677  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  7678  	defer cancel()
  7679  
  7680  	p, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1024))
  7681  	if err != nil {
  7682  		t.Fatalf("Unexpected error from newPayload: %v", err)
  7683  	}
  7684  	if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: p}); err == nil ||
  7685  		status.Code(err) != codes.Internal ||
  7686  		!strings.Contains(status.Convert(err).Message(), gzip.ErrChecksum.Error()) {
  7687  		t.Errorf("ss.Client.UnaryCall(_) = _, %v\n\twant: _, status(codes.Internal, contains %q)", err, gzip.ErrChecksum)
  7688  	}
  7689  }
  7690  
  7691  // When an RPC is canceled, it's possible that the last Recv() returns before
  7692  // all call options' after are executed.
  7693  func (s) TestCanceledRPCCallOptionRace(t *testing.T) {
  7694  	ss := &stubserver.StubServer{
  7695  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  7696  			err := stream.Send(&testpb.StreamingOutputCallResponse{})
  7697  			if err != nil {
  7698  				return err
  7699  			}
  7700  			<-stream.Context().Done()
  7701  			return nil
  7702  		},
  7703  	}
  7704  	if err := ss.Start(nil); err != nil {
  7705  		t.Fatalf("Error starting endpoint server: %v", err)
  7706  	}
  7707  	defer ss.Stop()
  7708  
  7709  	const count = 1000
  7710  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  7711  	defer cancel()
  7712  
  7713  	var wg sync.WaitGroup
  7714  	wg.Add(count)
  7715  	for i := 0; i < count; i++ {
  7716  		go func() {
  7717  			defer wg.Done()
  7718  			var p peer.Peer
  7719  			ctx, cancel := context.WithCancel(ctx)
  7720  			defer cancel()
  7721  			stream, err := ss.Client.FullDuplexCall(ctx, grpc.Peer(&p))
  7722  			if err != nil {
  7723  				t.Errorf("_.FullDuplexCall(_) = _, %v", err)
  7724  				return
  7725  			}
  7726  			if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil {
  7727  				t.Errorf("_ has error %v while sending", err)
  7728  				return
  7729  			}
  7730  			if _, err := stream.Recv(); err != nil {
  7731  				t.Errorf("%v.Recv() = %v", stream, err)
  7732  				return
  7733  			}
  7734  			cancel()
  7735  			if _, err := stream.Recv(); status.Code(err) != codes.Canceled {
  7736  				t.Errorf("%v compleled with error %v, want %s", stream, err, codes.Canceled)
  7737  				return
  7738  			}
  7739  			// If recv returns before call options are executed, peer.Addr is not set,
  7740  			// fail the test.
  7741  			if p.Addr == nil {
  7742  				t.Errorf("peer.Addr is nil, want non-nil")
  7743  				return
  7744  			}
  7745  		}()
  7746  	}
  7747  	wg.Wait()
  7748  }
  7749  
  7750  func (s) TestClientSettingsFloodCloseConn(t *testing.T) {
  7751  	// Tests that the server properly closes its transport if the client floods
  7752  	// settings frames and then closes the connection.
  7753  
  7754  	// Minimize buffer sizes to stimulate failure condition more quickly.
  7755  	s := grpc.NewServer(grpc.WriteBufferSize(20))
  7756  	l := bufconn.Listen(20)
  7757  	go func() {
  7758  		_ = s.Serve(l)
  7759  	}()
  7760  
  7761  	// Dial our server and handshake.
  7762  	conn, err := l.Dial()
  7763  	if err != nil {
  7764  		t.Fatalf("Error dialing bufconn: %v", err)
  7765  	}
  7766  
  7767  	n, err := conn.Write([]byte(http2.ClientPreface))
  7768  	if err != nil || n != len(http2.ClientPreface) {
  7769  		t.Fatalf("Error writing client preface: %v, %v", n, err)
  7770  	}
  7771  
  7772  	fr := http2.NewFramer(conn, conn)
  7773  	f, err := fr.ReadFrame()
  7774  	if err != nil {
  7775  		t.Fatalf("Error reading initial settings frame: %v", err)
  7776  	}
  7777  	if _, ok := f.(*http2.SettingsFrame); ok {
  7778  		if err := fr.WriteSettingsAck(); err != nil {
  7779  			t.Fatalf("Error writing settings ack: %v", err)
  7780  		}
  7781  	} else {
  7782  		t.Fatalf("Error reading initial settings frame: type=%T", f)
  7783  	}
  7784  
  7785  	// Confirm settings can be written, and that an ack is read.
  7786  	if err = fr.WriteSettings(); err != nil {
  7787  		t.Fatalf("Error writing settings frame: %v", err)
  7788  	}
  7789  	if f, err = fr.ReadFrame(); err != nil {
  7790  		t.Fatalf("Error reading frame: %v", err)
  7791  	}
  7792  	if sf, ok := f.(*http2.SettingsFrame); !ok || !sf.IsAck() {
  7793  		t.Fatalf("Unexpected frame: %v", f)
  7794  	}
  7795  
  7796  	// Flood settings frames until a timeout occurs, indiciating the server has
  7797  	// stopped reading from the connection, then close the conn.
  7798  	for {
  7799  		_ = conn.SetWriteDeadline(time.Now().Add(50 * time.Millisecond))
  7800  		if err := fr.WriteSettings(); err != nil {
  7801  			if to, ok := err.(interface{ Timeout() bool }); !ok || !to.Timeout() {
  7802  				t.Fatalf("Received unexpected write error: %v", err)
  7803  			}
  7804  			break
  7805  		}
  7806  	}
  7807  	_ = conn.Close()
  7808  
  7809  	// If the server does not handle this situation correctly, it will never
  7810  	// close the transport.  This is because its loopyWriter.run() will have
  7811  	// exited, and thus not handle the goAway the draining process initiates.
  7812  	// Also, we would see a goroutine leak in this case, as the reader would be
  7813  	// blocked on the controlBuf's throttle() method indefinitely.
  7814  
  7815  	timer := time.AfterFunc(5*time.Second, func() {
  7816  		t.Errorf("Timeout waiting for GracefulStop to return")
  7817  		s.Stop()
  7818  	})
  7819  	s.GracefulStop()
  7820  	timer.Stop()
  7821  }
  7822  
  7823  // TestDeadlineSetOnConnectionOnClientCredentialHandshake tests that there is a deadline
  7824  // set on the net.Conn when a credential handshake happens in http2_client.
  7825  func (s) TestDeadlineSetOnConnectionOnClientCredentialHandshake(t *testing.T) {
  7826  	lis, err := net.Listen("tcp", "localhost:0")
  7827  	if err != nil {
  7828  		t.Fatalf("Failed to listen: %v", err)
  7829  	}
  7830  	connCh := make(chan net.Conn, 1)
  7831  	go func() {
  7832  		defer close(connCh)
  7833  		conn, err := lis.Accept()
  7834  		if err != nil {
  7835  			t.Errorf("Error accepting connection: %v", err)
  7836  			return
  7837  		}
  7838  		connCh <- conn
  7839  	}()
  7840  	defer func() {
  7841  		conn := <-connCh
  7842  		if conn != nil {
  7843  			_ = conn.Close()
  7844  		}
  7845  	}()
  7846  	deadlineCh := testutils.NewChannel()
  7847  	cvd := &credentialsVerifyDeadline{
  7848  		deadlineCh: deadlineCh,
  7849  	}
  7850  	dOpt := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
  7851  		conn, err := (&net.Dialer{}).DialContext(ctx, "tcp", addr)
  7852  		if err != nil {
  7853  			return nil, err
  7854  		}
  7855  		return &infoConn{Conn: conn}, nil
  7856  	})
  7857  	cc, err := grpc.Dial(lis.Addr().String(), dOpt, grpc.WithTransportCredentials(cvd))
  7858  	if err != nil {
  7859  		t.Fatalf("Failed to dial: %v", err)
  7860  	}
  7861  	defer func(cc *grpc.ClientConn) {
  7862  		_ = cc.Close()
  7863  	}(cc)
  7864  
  7865  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  7866  	defer cancel()
  7867  	deadline, err := deadlineCh.Receive(ctx)
  7868  	if err != nil {
  7869  		t.Fatalf("Error receiving from credsInvoked: %v", err)
  7870  	}
  7871  	// Default connection timeout is 20 seconds, so if the deadline exceeds now
  7872  	// + 18 seconds it should be valid.
  7873  	if !deadline.(time.Time).After(time.Now().Add(time.Second * 18)) {
  7874  		t.Fatalf("Connection did not have deadline set.")
  7875  	}
  7876  }
  7877  
  7878  type infoConn struct {
  7879  	net.Conn
  7880  	deadline time.Time
  7881  }
  7882  
  7883  func (c *infoConn) SetDeadline(t time.Time) error {
  7884  	c.deadline = t
  7885  	return c.Conn.SetDeadline(t)
  7886  }
  7887  
  7888  type credentialsVerifyDeadline struct {
  7889  	deadlineCh *testutils.Channel
  7890  }
  7891  
  7892  func (cvd *credentialsVerifyDeadline) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
  7893  	return rawConn, nil, nil
  7894  }
  7895  
  7896  //goland:noinspection GoUnusedParameter
  7897  func (cvd *credentialsVerifyDeadline) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
  7898  	cvd.deadlineCh.Send(rawConn.(*infoConn).deadline)
  7899  	return rawConn, nil, nil
  7900  }
  7901  
  7902  func (cvd *credentialsVerifyDeadline) Info() credentials.ProtocolInfo {
  7903  	return credentials.ProtocolInfo{}
  7904  }
  7905  func (cvd *credentialsVerifyDeadline) Clone() credentials.TransportCredentials {
  7906  	return cvd
  7907  }
  7908  
  7909  //goland:noinspection GoUnusedParameter
  7910  func (cvd *credentialsVerifyDeadline) OverrideServerName(s string) error {
  7911  	return nil
  7912  }
  7913  
  7914  //goland:noinspection GoUnusedParameter
  7915  func unaryInterceptorVerifyConn(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
  7916  	conn := transport.GetConnection(ctx)
  7917  	if conn == nil {
  7918  		return nil, status.Error(codes.NotFound, "connection was not in context")
  7919  	}
  7920  	return nil, status.Error(codes.OK, "")
  7921  }
  7922  
  7923  // TestUnaryServerInterceptorGetsConnection tests whether the accepted conn on
  7924  // the server gets to any unary interceptors on the server side.
  7925  func (s) TestUnaryServerInterceptorGetsConnection(t *testing.T) {
  7926  	ss := &stubserver.StubServer{}
  7927  	if err := ss.Start([]grpc.ServerOption{grpc.UnaryInterceptor(unaryInterceptorVerifyConn)}); err != nil {
  7928  		t.Fatalf("Error starting endpoint server: %v", err)
  7929  	}
  7930  	defer ss.Stop()
  7931  
  7932  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  7933  	defer cancel()
  7934  
  7935  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK {
  7936  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v, want _, error code %s", err, codes.OK)
  7937  	}
  7938  }
  7939  
  7940  //goland:noinspection GoUnusedParameter
  7941  func streamingInterceptorVerifyConn(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
  7942  	conn := transport.GetConnection(ss.Context())
  7943  	if conn == nil {
  7944  		return status.Error(codes.NotFound, "connection was not in context")
  7945  	}
  7946  	return status.Error(codes.OK, "")
  7947  }
  7948  
  7949  // TestStreamingServerInterceptorGetsConnection tests whether the accepted conn on
  7950  // the server gets to any streaming interceptors on the server side.
  7951  func (s) TestStreamingServerInterceptorGetsConnection(t *testing.T) {
  7952  	ss := &stubserver.StubServer{}
  7953  	if err := ss.Start([]grpc.ServerOption{grpc.StreamInterceptor(streamingInterceptorVerifyConn)}); err != nil {
  7954  		t.Fatalf("Error starting endpoint server: %v", err)
  7955  	}
  7956  	defer ss.Stop()
  7957  
  7958  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  7959  	defer cancel()
  7960  
  7961  	s, err := ss.Client.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{})
  7962  	if err != nil {
  7963  		t.Fatalf("ss.Client.StreamingOutputCall(_) = _, %v, want _, <nil>", err)
  7964  	}
  7965  	if _, err := s.Recv(); err != io.EOF {
  7966  		t.Fatalf("ss.Client.StreamingInputCall(_) = _, %v, want _, %v", err, io.EOF)
  7967  	}
  7968  }
  7969  
  7970  // unaryInterceptorVerifyAuthority verifies there is an unambiguous :authority
  7971  // once the request gets to an interceptor. An unambiguous :authority is defined
  7972  // as at most a single :authority header, and no host header according to A41.
  7973  //goland:noinspection GoUnusedParameter
  7974  func unaryInterceptorVerifyAuthority(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
  7975  	md, ok := metadata.FromIncomingContext(ctx)
  7976  	if !ok {
  7977  		return nil, status.Error(codes.NotFound, "metadata was not in context")
  7978  	}
  7979  	authority := md.Get(":authority")
  7980  	if len(authority) > 1 { // Should be an unambiguous authority by the time it gets to interceptor.
  7981  		return nil, status.Error(codes.NotFound, ":authority value had more than one value")
  7982  	}
  7983  	// Host header shouldn't be present by the time it gets to the interceptor
  7984  	// level (should either be renamed to :authority or explicitly deleted).
  7985  	host := md.Get("host")
  7986  	if len(host) != 0 {
  7987  		return nil, status.Error(codes.NotFound, "host header should not be present in metadata")
  7988  	}
  7989  	// Pass back the authority for verification on client - NotFound so
  7990  	// grpc-message will be available to read for verification.
  7991  	if len(authority) == 0 {
  7992  		// Represent no :authority header present with an empty string.
  7993  		return nil, status.Error(codes.NotFound, "")
  7994  	}
  7995  	return nil, status.Error(codes.NotFound, authority[0])
  7996  }
  7997  
  7998  // TestAuthorityHeader tests that the eventual :authority that reaches the grpc
  7999  // layer is unambiguous due to logic added in A41.
  8000  func (s) TestAuthorityHeader(t *testing.T) {
  8001  	tests := []struct {
  8002  		name          string
  8003  		headers       []string
  8004  		wantAuthority string
  8005  	}{
  8006  		// "If :authority is missing, Host must be renamed to :authority." - A41
  8007  		{
  8008  			name: "Missing :authority",
  8009  			// Codepath triggered by incoming headers with no authority but with
  8010  			// a host.
  8011  			headers: []string{
  8012  				":method", "POST",
  8013  				":path", "/grpc.testing.TestService/UnaryCall",
  8014  				"content-type", "application/grpc",
  8015  				"te", "trailers",
  8016  				"host", "localhost",
  8017  			},
  8018  			wantAuthority: "localhost",
  8019  		},
  8020  		{
  8021  			name: "Missing :authority and host",
  8022  			// Codepath triggered by incoming headers with no :authority and no
  8023  			// host.
  8024  			headers: []string{
  8025  				":method", "POST",
  8026  				":path", "/grpc.testing.TestService/UnaryCall",
  8027  				"content-type", "application/grpc",
  8028  				"te", "trailers",
  8029  			},
  8030  			wantAuthority: "",
  8031  		},
  8032  		// "If :authority is present, Host must be discarded." - A41
  8033  		{
  8034  			name: ":authority and host present",
  8035  			// Codepath triggered by incoming headers with both an authority
  8036  			// header and a host header.
  8037  			headers: []string{
  8038  				":method", "POST",
  8039  				":path", "/grpc.testing.TestService/UnaryCall",
  8040  				":authority", "localhost",
  8041  				"content-type", "application/grpc",
  8042  				"host", "localhost2",
  8043  			},
  8044  			wantAuthority: "localhost",
  8045  		},
  8046  	}
  8047  	for _, test := range tests {
  8048  		t.Run(test.name, func(t *testing.T) {
  8049  			te := newTest(t, tcpClearRREnv)
  8050  			ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  8051  				return &testpb.SimpleResponse{}, nil
  8052  			}}
  8053  			te.unaryServerInt = unaryInterceptorVerifyAuthority
  8054  			te.startServer(ts)
  8055  			defer te.tearDown()
  8056  			success := testutils.NewChannel()
  8057  			te.withServerTester(func(st *serverTester) {
  8058  				st.writeHeaders(http2.HeadersFrameParam{
  8059  					StreamID:      1,
  8060  					BlockFragment: st.encodeHeader(test.headers...),
  8061  					EndStream:     false,
  8062  					EndHeaders:    true,
  8063  				})
  8064  				st.writeData(1, true, []byte{0, 0, 0, 0, 0})
  8065  
  8066  				for {
  8067  					frame := st.wantAnyFrame()
  8068  					f, ok := frame.(*http2.MetaHeadersFrame)
  8069  					if !ok {
  8070  						continue
  8071  					}
  8072  					for _, header := range f.Fields {
  8073  						if header.Name == "grpc-message" {
  8074  							success.Send(header.Value)
  8075  							return
  8076  						}
  8077  					}
  8078  				}
  8079  			})
  8080  
  8081  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  8082  			defer cancel()
  8083  			gotAuthority, err := success.Receive(ctx)
  8084  			if err != nil {
  8085  				t.Fatalf("Error receiving from channel: %v", err)
  8086  			}
  8087  			if gotAuthority != test.wantAuthority {
  8088  				t.Fatalf("gotAuthority: %v, wantAuthority %v", gotAuthority, test.wantAuthority)
  8089  			}
  8090  		})
  8091  	}
  8092  }
  8093  
  8094  // wrapCloseListener tracks Accepts/Closes and maintains a counter of the
  8095  // number of open connections.
  8096  type wrapCloseListener struct {
  8097  	net.Listener
  8098  	connsOpen int32
  8099  }
  8100  
  8101  // wrapCloseListener is returned by wrapCloseListener.Accept and decrements its
  8102  // connsOpen when Close is called.
  8103  type wrapCloseConn struct {
  8104  	net.Conn
  8105  	lis       *wrapCloseListener
  8106  	closeOnce sync.Once
  8107  }
  8108  
  8109  func (w *wrapCloseListener) Accept() (net.Conn, error) {
  8110  	conn, err := w.Listener.Accept()
  8111  	if err != nil {
  8112  		return nil, err
  8113  	}
  8114  	atomic.AddInt32(&w.connsOpen, 1)
  8115  	return &wrapCloseConn{Conn: conn, lis: w}, nil
  8116  }
  8117  
  8118  func (w *wrapCloseConn) Close() error {
  8119  	defer w.closeOnce.Do(func() { atomic.AddInt32(&w.lis.connsOpen, -1) })
  8120  	return w.Conn.Close()
  8121  }
  8122  
  8123  // TestServerClosesConn ensures conn.Close is always closed even if the client
  8124  // doesn't complete the HTTP/2 handshake.
  8125  func (s) TestServerClosesConn(t *testing.T) {
  8126  	lis := bufconn.Listen(20)
  8127  	wrapLis := &wrapCloseListener{Listener: lis}
  8128  
  8129  	s := grpc.NewServer()
  8130  	go func() {
  8131  		_ = s.Serve(wrapLis)
  8132  	}()
  8133  	defer s.Stop()
  8134  
  8135  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  8136  	defer cancel()
  8137  
  8138  	for i := 0; i < 10; i++ {
  8139  		conn, err := lis.DialContext(ctx)
  8140  		if err != nil {
  8141  			t.Fatalf("Dial = _, %v; want _, nil", err)
  8142  		}
  8143  		_ = conn.Close()
  8144  	}
  8145  	for ctx.Err() == nil {
  8146  		if atomic.LoadInt32(&wrapLis.connsOpen) == 0 {
  8147  			return
  8148  		}
  8149  		time.Sleep(50 * time.Millisecond)
  8150  	}
  8151  	t.Fatalf("timed out waiting for conns to be closed by server; still open: %v", atomic.LoadInt32(&wrapLis.connsOpen))
  8152  }