github.com/hxx258456/ccgo@v0.0.5-0.20230213014102-48b35f46f66f/grpc/test/end2end_test.go (about)

     1  /*
     2   *
     3   * Copyright 2014 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package test
    20  
    21  import (
    22  	"bufio"
    23  	"bytes"
    24  	"compress/gzip"
    25  	"context"
    26  	"errors"
    27  	"flag"
    28  	"fmt"
    29  	"io"
    30  	"math"
    31  	"net"
    32  	"os"
    33  	"reflect"
    34  	"runtime"
    35  	"strings"
    36  	"sync"
    37  	"sync/atomic"
    38  	"syscall"
    39  	"testing"
    40  	"time"
    41  
    42  	http "github.com/hxx258456/ccgo/gmhttp"
    43  
    44  	tls "github.com/hxx258456/ccgo/gmtls"
    45  
    46  	"github.com/golang/protobuf/proto"
    47  	anypb "github.com/golang/protobuf/ptypes/any"
    48  	grpc "github.com/hxx258456/ccgo/grpc"
    49  	"github.com/hxx258456/ccgo/grpc/codes"
    50  	"github.com/hxx258456/ccgo/grpc/connectivity"
    51  	"github.com/hxx258456/ccgo/grpc/credentials"
    52  	"github.com/hxx258456/ccgo/grpc/encoding"
    53  	_ "github.com/hxx258456/ccgo/grpc/encoding/gzip"
    54  	"github.com/hxx258456/ccgo/grpc/health"
    55  	healthgrpc "github.com/hxx258456/ccgo/grpc/health/grpc_health_v1"
    56  	healthpb "github.com/hxx258456/ccgo/grpc/health/grpc_health_v1"
    57  	"github.com/hxx258456/ccgo/grpc/internal"
    58  	"github.com/hxx258456/ccgo/grpc/internal/channelz"
    59  	"github.com/hxx258456/ccgo/grpc/internal/grpcsync"
    60  	"github.com/hxx258456/ccgo/grpc/internal/grpctest"
    61  	"github.com/hxx258456/ccgo/grpc/internal/stubserver"
    62  	"github.com/hxx258456/ccgo/grpc/internal/testutils"
    63  	"github.com/hxx258456/ccgo/grpc/internal/transport"
    64  	"github.com/hxx258456/ccgo/grpc/keepalive"
    65  	"github.com/hxx258456/ccgo/grpc/metadata"
    66  	"github.com/hxx258456/ccgo/grpc/peer"
    67  	"github.com/hxx258456/ccgo/grpc/resolver"
    68  	"github.com/hxx258456/ccgo/grpc/resolver/manual"
    69  	"github.com/hxx258456/ccgo/grpc/serviceconfig"
    70  	"github.com/hxx258456/ccgo/grpc/stats"
    71  	"github.com/hxx258456/ccgo/grpc/status"
    72  	"github.com/hxx258456/ccgo/grpc/tap"
    73  	"github.com/hxx258456/ccgo/grpc/test/bufconn"
    74  	testpb "github.com/hxx258456/ccgo/grpc/test/grpc_testing"
    75  	"github.com/hxx258456/ccgo/grpc/testdata"
    76  	"github.com/hxx258456/ccgo/net/http2"
    77  	"github.com/hxx258456/ccgo/net/http2/hpack"
    78  	spb "google.golang.org/genproto/googleapis/rpc/status"
    79  )
    80  
    81  const defaultHealthService = "grpc.health.v1.Health"
    82  
    83  func init() {
    84  	channelz.TurnOn()
    85  }
    86  
    87  type s struct {
    88  	grpctest.Tester
    89  }
    90  
    91  func Test(t *testing.T) {
    92  	grpctest.RunSubTests(t, s{})
    93  }
    94  
    95  var (
    96  	// For headers:
    97  	testMetadata = metadata.MD{
    98  		"key1":     []string{"value1"},
    99  		"key2":     []string{"value2"},
   100  		"key3-bin": []string{"binvalue1", string([]byte{1, 2, 3})},
   101  	}
   102  	testMetadata2 = metadata.MD{
   103  		"key1": []string{"value12"},
   104  		"key2": []string{"value22"},
   105  	}
   106  	// For trailers:
   107  	testTrailerMetadata = metadata.MD{
   108  		"tkey1":     []string{"trailerValue1"},
   109  		"tkey2":     []string{"trailerValue2"},
   110  		"tkey3-bin": []string{"trailerbinvalue1", string([]byte{3, 2, 1})},
   111  	}
   112  	testTrailerMetadata2 = metadata.MD{
   113  		"tkey1": []string{"trailerValue12"},
   114  		"tkey2": []string{"trailerValue22"},
   115  	}
   116  	// capital "Key" is illegal in HTTP/2.
   117  	malformedHTTP2Metadata = metadata.MD{
   118  		"Key": []string{"foo"},
   119  	}
   120  	testAppUA     = "myApp1/1.0 myApp2/0.9"
   121  	failAppUA     = "fail-this-RPC"
   122  	detailedError = status.ErrorProto(&spb.Status{
   123  		Code:    int32(codes.DataLoss),
   124  		Message: "error for testing: " + failAppUA,
   125  		Details: []*anypb.Any{{
   126  			TypeUrl: "url",
   127  			Value:   []byte{6, 0, 0, 6, 1, 3},
   128  		}},
   129  	})
   130  )
   131  
   132  var raceMode bool // set by race.go in race mode
   133  
   134  type testServer struct {
   135  	testpb.UnimplementedTestServiceServer
   136  
   137  	security           string // indicate the authentication protocol used by this server.
   138  	earlyFail          bool   // whether to error out the execution of a service handler prematurely.
   139  	setAndSendHeader   bool   // whether to call setHeader and sendHeader.
   140  	setHeaderOnly      bool   // whether to only call setHeader, not sendHeader.
   141  	multipleSetTrailer bool   // whether to call setTrailer multiple times.
   142  	unaryCallSleepTime time.Duration
   143  }
   144  
   145  func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
   146  	if md, ok := metadata.FromIncomingContext(ctx); ok {
   147  		// For testing purpose, returns an error if user-agent is failAppUA.
   148  		// To test that client gets the correct error.
   149  		if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) {
   150  			return nil, detailedError
   151  		}
   152  		var str []string
   153  		for _, entry := range md["user-agent"] {
   154  			str = append(str, "ua", entry)
   155  		}
   156  		grpc.SendHeader(ctx, metadata.Pairs(str...))
   157  	}
   158  	return new(testpb.Empty), nil
   159  }
   160  
   161  func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) {
   162  	if size < 0 {
   163  		return nil, fmt.Errorf("requested a response with invalid length %d", size)
   164  	}
   165  	body := make([]byte, size)
   166  	switch t {
   167  	case testpb.PayloadType_COMPRESSABLE:
   168  	case testpb.PayloadType_UNCOMPRESSABLE:
   169  		return nil, fmt.Errorf("PayloadType UNCOMPRESSABLE is not supported")
   170  	default:
   171  		return nil, fmt.Errorf("unsupported payload type: %d", t)
   172  	}
   173  	return &testpb.Payload{
   174  		Type: t,
   175  		Body: body,
   176  	}, nil
   177  }
   178  
   179  func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
   180  	md, ok := metadata.FromIncomingContext(ctx)
   181  	if ok {
   182  		if _, exists := md[":authority"]; !exists {
   183  			return nil, status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md)
   184  		}
   185  		if s.setAndSendHeader {
   186  			if err := grpc.SetHeader(ctx, md); err != nil {
   187  				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", md, err)
   188  			}
   189  			if err := grpc.SendHeader(ctx, testMetadata2); err != nil {
   190  				return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", testMetadata2, err)
   191  			}
   192  		} else if s.setHeaderOnly {
   193  			if err := grpc.SetHeader(ctx, md); err != nil {
   194  				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", md, err)
   195  			}
   196  			if err := grpc.SetHeader(ctx, testMetadata2); err != nil {
   197  				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", testMetadata2, err)
   198  			}
   199  		} else {
   200  			if err := grpc.SendHeader(ctx, md); err != nil {
   201  				return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", md, err)
   202  			}
   203  		}
   204  		if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil {
   205  			return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata, err)
   206  		}
   207  		if s.multipleSetTrailer {
   208  			if err := grpc.SetTrailer(ctx, testTrailerMetadata2); err != nil {
   209  				return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata2, err)
   210  			}
   211  		}
   212  	}
   213  	pr, ok := peer.FromContext(ctx)
   214  	if !ok {
   215  		return nil, status.Error(codes.DataLoss, "failed to get peer from ctx")
   216  	}
   217  	if pr.Addr == net.Addr(nil) {
   218  		return nil, status.Error(codes.DataLoss, "failed to get peer address")
   219  	}
   220  	if s.security != "" {
   221  		// Check Auth info
   222  		var authType, serverName string
   223  		switch info := pr.AuthInfo.(type) {
   224  		case credentials.TLSInfo:
   225  			authType = info.AuthType()
   226  			serverName = info.State.ServerName
   227  		default:
   228  			return nil, status.Error(codes.Unauthenticated, "Unknown AuthInfo type")
   229  		}
   230  		if authType != s.security {
   231  			return nil, status.Errorf(codes.Unauthenticated, "Wrong auth type: got %q, want %q", authType, s.security)
   232  		}
   233  		if serverName != "x.test.example.com" {
   234  			return nil, status.Errorf(codes.Unauthenticated, "Unknown server name %q", serverName)
   235  		}
   236  	}
   237  	// Simulate some service delay.
   238  	time.Sleep(s.unaryCallSleepTime)
   239  
   240  	payload, err := newPayload(in.GetResponseType(), in.GetResponseSize())
   241  	if err != nil {
   242  		return nil, err
   243  	}
   244  
   245  	return &testpb.SimpleResponse{
   246  		Payload: payload,
   247  	}, nil
   248  }
   249  
   250  func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error {
   251  	if md, ok := metadata.FromIncomingContext(stream.Context()); ok {
   252  		if _, exists := md[":authority"]; !exists {
   253  			return status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md)
   254  		}
   255  		// For testing purpose, returns an error if user-agent is failAppUA.
   256  		// To test that client gets the correct error.
   257  		if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) {
   258  			return status.Error(codes.DataLoss, "error for testing: "+failAppUA)
   259  		}
   260  	}
   261  	cs := args.GetResponseParameters()
   262  	for _, c := range cs {
   263  		if us := c.GetIntervalUs(); us > 0 {
   264  			time.Sleep(time.Duration(us) * time.Microsecond)
   265  		}
   266  
   267  		payload, err := newPayload(args.GetResponseType(), c.GetSize())
   268  		if err != nil {
   269  			return err
   270  		}
   271  
   272  		if err := stream.Send(&testpb.StreamingOutputCallResponse{
   273  			Payload: payload,
   274  		}); err != nil {
   275  			return err
   276  		}
   277  	}
   278  	return nil
   279  }
   280  
   281  func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error {
   282  	var sum int
   283  	for {
   284  		in, err := stream.Recv()
   285  		if err == io.EOF {
   286  			return stream.SendAndClose(&testpb.StreamingInputCallResponse{
   287  				AggregatedPayloadSize: int32(sum),
   288  			})
   289  		}
   290  		if err != nil {
   291  			return err
   292  		}
   293  		p := in.GetPayload().GetBody()
   294  		sum += len(p)
   295  		if s.earlyFail {
   296  			return status.Error(codes.NotFound, "not found")
   297  		}
   298  	}
   299  }
   300  
   301  func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error {
   302  	md, ok := metadata.FromIncomingContext(stream.Context())
   303  	if ok {
   304  		if s.setAndSendHeader {
   305  			if err := stream.SetHeader(md); err != nil {
   306  				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, md, err)
   307  			}
   308  			if err := stream.SendHeader(testMetadata2); err != nil {
   309  				return status.Errorf(status.Code(err), "%v.SendHeader(_, %v) = %v, want <nil>", stream, testMetadata2, err)
   310  			}
   311  		} else if s.setHeaderOnly {
   312  			if err := stream.SetHeader(md); err != nil {
   313  				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, md, err)
   314  			}
   315  			if err := stream.SetHeader(testMetadata2); err != nil {
   316  				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, testMetadata2, err)
   317  			}
   318  		} else {
   319  			if err := stream.SendHeader(md); err != nil {
   320  				return status.Errorf(status.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil)
   321  			}
   322  		}
   323  		stream.SetTrailer(testTrailerMetadata)
   324  		if s.multipleSetTrailer {
   325  			stream.SetTrailer(testTrailerMetadata2)
   326  		}
   327  	}
   328  	for {
   329  		in, err := stream.Recv()
   330  		if err == io.EOF {
   331  			// read done.
   332  			return nil
   333  		}
   334  		if err != nil {
   335  			// to facilitate testSvrWriteStatusEarlyWrite
   336  			if status.Code(err) == codes.ResourceExhausted {
   337  				return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error())
   338  			}
   339  			return err
   340  		}
   341  		cs := in.GetResponseParameters()
   342  		for _, c := range cs {
   343  			if us := c.GetIntervalUs(); us > 0 {
   344  				time.Sleep(time.Duration(us) * time.Microsecond)
   345  			}
   346  
   347  			payload, err := newPayload(in.GetResponseType(), c.GetSize())
   348  			if err != nil {
   349  				return err
   350  			}
   351  
   352  			if err := stream.Send(&testpb.StreamingOutputCallResponse{
   353  				Payload: payload,
   354  			}); err != nil {
   355  				// to facilitate testSvrWriteStatusEarlyWrite
   356  				if status.Code(err) == codes.ResourceExhausted {
   357  					return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error())
   358  				}
   359  				return err
   360  			}
   361  		}
   362  	}
   363  }
   364  
   365  func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error {
   366  	var msgBuf []*testpb.StreamingOutputCallRequest
   367  	for {
   368  		in, err := stream.Recv()
   369  		if err == io.EOF {
   370  			// read done.
   371  			break
   372  		}
   373  		if err != nil {
   374  			return err
   375  		}
   376  		msgBuf = append(msgBuf, in)
   377  	}
   378  	for _, m := range msgBuf {
   379  		cs := m.GetResponseParameters()
   380  		for _, c := range cs {
   381  			if us := c.GetIntervalUs(); us > 0 {
   382  				time.Sleep(time.Duration(us) * time.Microsecond)
   383  			}
   384  
   385  			payload, err := newPayload(m.GetResponseType(), c.GetSize())
   386  			if err != nil {
   387  				return err
   388  			}
   389  
   390  			if err := stream.Send(&testpb.StreamingOutputCallResponse{
   391  				Payload: payload,
   392  			}); err != nil {
   393  				return err
   394  			}
   395  		}
   396  	}
   397  	return nil
   398  }
   399  
   400  type env struct {
   401  	name         string
   402  	network      string // The type of network such as tcp, unix, etc.
   403  	security     string // The security protocol such as TLS, SSH, etc.
   404  	httpHandler  bool   // whether to use the http.Handler ServerTransport; requires TLS
   405  	balancer     string // One of "round_robin", "pick_first", or "".
   406  	customDialer func(string, string, time.Duration) (net.Conn, error)
   407  }
   408  
   409  func (e env) runnable() bool {
   410  	if runtime.GOOS == "windows" && e.network == "unix" {
   411  		return false
   412  	}
   413  	return true
   414  }
   415  
   416  func (e env) dialer(addr string, timeout time.Duration) (net.Conn, error) {
   417  	if e.customDialer != nil {
   418  		return e.customDialer(e.network, addr, timeout)
   419  	}
   420  	return net.DialTimeout(e.network, addr, timeout)
   421  }
   422  
   423  var (
   424  	tcpClearEnv   = env{name: "tcp-clear-v1-balancer", network: "tcp"}
   425  	tcpTLSEnv     = env{name: "tcp-tls-v1-balancer", network: "tcp", security: "tls"}
   426  	tcpClearRREnv = env{name: "tcp-clear", network: "tcp", balancer: "round_robin"}
   427  	tcpTLSRREnv   = env{name: "tcp-tls", network: "tcp", security: "tls", balancer: "round_robin"}
   428  	handlerEnv    = env{name: "handler-tls", network: "tcp", security: "tls", httpHandler: true, balancer: "round_robin"}
   429  	noBalancerEnv = env{name: "no-balancer", network: "tcp", security: "tls"}
   430  	allEnv        = []env{tcpClearEnv, tcpTLSEnv, tcpClearRREnv, tcpTLSRREnv, handlerEnv, noBalancerEnv}
   431  )
   432  
   433  var onlyEnv = flag.String("only_env", "", "If non-empty, one of 'tcp-clear', 'tcp-tls', 'unix-clear', 'unix-tls', or 'handler-tls' to only run the tests for that environment. Empty means all.")
   434  
   435  func listTestEnv() (envs []env) {
   436  	if *onlyEnv != "" {
   437  		for _, e := range allEnv {
   438  			if e.name == *onlyEnv {
   439  				if !e.runnable() {
   440  					panic(fmt.Sprintf("--only_env environment %q does not run on %s", *onlyEnv, runtime.GOOS))
   441  				}
   442  				return []env{e}
   443  			}
   444  		}
   445  		panic(fmt.Sprintf("invalid --only_env value %q", *onlyEnv))
   446  	}
   447  	for _, e := range allEnv {
   448  		if e.runnable() {
   449  			envs = append(envs, e)
   450  		}
   451  	}
   452  	return envs
   453  }
   454  
   455  // test is an end-to-end test. It should be created with the newTest
   456  // func, modified as needed, and then started with its startServer method.
   457  // It should be cleaned up with the tearDown method.
   458  type test struct {
   459  	// The following are setup in newTest().
   460  	t      *testing.T
   461  	e      env
   462  	ctx    context.Context // valid for life of test, before tearDown
   463  	cancel context.CancelFunc
   464  
   465  	// The following knobs are for the server-side, and should be set after
   466  	// calling newTest() and before calling startServer().
   467  
   468  	// whether or not to expose the server's health via the default health
   469  	// service implementation.
   470  	enableHealthServer bool
   471  	// In almost all cases, one should set the 'enableHealthServer' flag above to
   472  	// expose the server's health using the default health service
   473  	// implementation. This should only be used when a non-default health service
   474  	// implementation is required.
   475  	healthServer            healthpb.HealthServer
   476  	maxStream               uint32
   477  	tapHandle               tap.ServerInHandle
   478  	maxServerMsgSize        *int
   479  	maxServerReceiveMsgSize *int
   480  	maxServerSendMsgSize    *int
   481  	maxServerHeaderListSize *uint32
   482  	// Used to test the deprecated API WithCompressor and WithDecompressor.
   483  	serverCompression           bool
   484  	unknownHandler              grpc.StreamHandler
   485  	unaryServerInt              grpc.UnaryServerInterceptor
   486  	streamServerInt             grpc.StreamServerInterceptor
   487  	serverInitialWindowSize     int32
   488  	serverInitialConnWindowSize int32
   489  	customServerOptions         []grpc.ServerOption
   490  
   491  	// The following knobs are for the client-side, and should be set after
   492  	// calling newTest() and before calling clientConn().
   493  	maxClientMsgSize        *int
   494  	maxClientReceiveMsgSize *int
   495  	maxClientSendMsgSize    *int
   496  	maxClientHeaderListSize *uint32
   497  	userAgent               string
   498  	// Used to test the deprecated API WithCompressor and WithDecompressor.
   499  	clientCompression bool
   500  	// Used to test the new compressor registration API UseCompressor.
   501  	clientUseCompression bool
   502  	// clientNopCompression is set to create a compressor whose type is not supported.
   503  	clientNopCompression        bool
   504  	unaryClientInt              grpc.UnaryClientInterceptor
   505  	streamClientInt             grpc.StreamClientInterceptor
   506  	sc                          <-chan grpc.ServiceConfig
   507  	customCodec                 encoding.Codec
   508  	clientInitialWindowSize     int32
   509  	clientInitialConnWindowSize int32
   510  	perRPCCreds                 credentials.PerRPCCredentials
   511  	customDialOptions           []grpc.DialOption
   512  	resolverScheme              string
   513  
   514  	// These are are set once startServer is called. The common case is to have
   515  	// only one testServer.
   516  	srv     stopper
   517  	hSrv    healthpb.HealthServer
   518  	srvAddr string
   519  
   520  	// These are are set once startServers is called.
   521  	srvs     []stopper
   522  	hSrvs    []healthpb.HealthServer
   523  	srvAddrs []string
   524  
   525  	cc          *grpc.ClientConn // nil until requested via clientConn
   526  	restoreLogs func()           // nil unless declareLogNoise is used
   527  }
   528  
   529  type stopper interface {
   530  	Stop()
   531  	GracefulStop()
   532  }
   533  
   534  func (te *test) tearDown() {
   535  	if te.cancel != nil {
   536  		te.cancel()
   537  		te.cancel = nil
   538  	}
   539  
   540  	if te.cc != nil {
   541  		te.cc.Close()
   542  		te.cc = nil
   543  	}
   544  
   545  	if te.restoreLogs != nil {
   546  		te.restoreLogs()
   547  		te.restoreLogs = nil
   548  	}
   549  
   550  	if te.srv != nil {
   551  		te.srv.Stop()
   552  	}
   553  	for _, s := range te.srvs {
   554  		s.Stop()
   555  	}
   556  }
   557  
   558  // newTest returns a new test using the provided testing.T and
   559  // environment.  It is returned with default values. Tests should
   560  // modify it before calling its startServer and clientConn methods.
   561  func newTest(t *testing.T, e env) *test {
   562  	te := &test{
   563  		t:         t,
   564  		e:         e,
   565  		maxStream: math.MaxUint32,
   566  	}
   567  	te.ctx, te.cancel = context.WithCancel(context.Background())
   568  	return te
   569  }
   570  
   571  func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network, address string) (net.Listener, error)) net.Listener {
   572  	te.t.Helper()
   573  	te.t.Logf("Running test in %s environment...", te.e.name)
   574  	sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)}
   575  	if te.maxServerMsgSize != nil {
   576  		sopts = append(sopts, grpc.MaxMsgSize(*te.maxServerMsgSize))
   577  	}
   578  	if te.maxServerReceiveMsgSize != nil {
   579  		sopts = append(sopts, grpc.MaxRecvMsgSize(*te.maxServerReceiveMsgSize))
   580  	}
   581  	if te.maxServerSendMsgSize != nil {
   582  		sopts = append(sopts, grpc.MaxSendMsgSize(*te.maxServerSendMsgSize))
   583  	}
   584  	if te.maxServerHeaderListSize != nil {
   585  		sopts = append(sopts, grpc.MaxHeaderListSize(*te.maxServerHeaderListSize))
   586  	}
   587  	if te.tapHandle != nil {
   588  		sopts = append(sopts, grpc.InTapHandle(te.tapHandle))
   589  	}
   590  	if te.serverCompression {
   591  		sopts = append(sopts,
   592  			grpc.RPCCompressor(grpc.NewGZIPCompressor()),
   593  			grpc.RPCDecompressor(grpc.NewGZIPDecompressor()),
   594  		)
   595  	}
   596  	if te.unaryServerInt != nil {
   597  		sopts = append(sopts, grpc.UnaryInterceptor(te.unaryServerInt))
   598  	}
   599  	if te.streamServerInt != nil {
   600  		sopts = append(sopts, grpc.StreamInterceptor(te.streamServerInt))
   601  	}
   602  	if te.unknownHandler != nil {
   603  		sopts = append(sopts, grpc.UnknownServiceHandler(te.unknownHandler))
   604  	}
   605  	if te.serverInitialWindowSize > 0 {
   606  		sopts = append(sopts, grpc.InitialWindowSize(te.serverInitialWindowSize))
   607  	}
   608  	if te.serverInitialConnWindowSize > 0 {
   609  		sopts = append(sopts, grpc.InitialConnWindowSize(te.serverInitialConnWindowSize))
   610  	}
   611  	la := "localhost:0"
   612  	switch te.e.network {
   613  	case "unix":
   614  		la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now().UnixNano())
   615  		syscall.Unlink(la)
   616  	}
   617  	lis, err := listen(te.e.network, la)
   618  	if err != nil {
   619  		te.t.Fatalf("Failed to listen: %v", err)
   620  	}
   621  	if te.e.security == "tls" {
   622  		creds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem"))
   623  		if err != nil {
   624  			te.t.Fatalf("Failed to generate credentials %v", err)
   625  		}
   626  		sopts = append(sopts, grpc.Creds(creds))
   627  	}
   628  	sopts = append(sopts, te.customServerOptions...)
   629  	s := grpc.NewServer(sopts...)
   630  	if ts != nil {
   631  		testpb.RegisterTestServiceServer(s, ts)
   632  	}
   633  
   634  	// Create a new default health server if enableHealthServer is set, or use
   635  	// the provided one.
   636  	hs := te.healthServer
   637  	if te.enableHealthServer {
   638  		hs = health.NewServer()
   639  	}
   640  	if hs != nil {
   641  		healthgrpc.RegisterHealthServer(s, hs)
   642  	}
   643  
   644  	addr := la
   645  	switch te.e.network {
   646  	case "unix":
   647  	default:
   648  		_, port, err := net.SplitHostPort(lis.Addr().String())
   649  		if err != nil {
   650  			te.t.Fatalf("Failed to parse listener address: %v", err)
   651  		}
   652  		addr = "localhost:" + port
   653  	}
   654  
   655  	te.srv = s
   656  	te.hSrv = hs
   657  	te.srvAddr = addr
   658  
   659  	if te.e.httpHandler {
   660  		if te.e.security != "tls" {
   661  			te.t.Fatalf("unsupported environment settings")
   662  		}
   663  		cert, err := tls.LoadX509KeyPair(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem"))
   664  		if err != nil {
   665  			te.t.Fatal("tls.LoadX509KeyPair(server1.pem, server1.key) failed: ", err)
   666  		}
   667  		hs := &http.Server{
   668  			Handler:   s,
   669  			TLSConfig: &tls.Config{Certificates: []tls.Certificate{cert}},
   670  		}
   671  		if err := http2.ConfigureServer(hs, &http2.Server{MaxConcurrentStreams: te.maxStream}); err != nil {
   672  			te.t.Fatal("http2.ConfigureServer(_, _) failed: ", err)
   673  		}
   674  		te.srv = wrapHS{hs}
   675  		tlsListener := tls.NewListener(lis, hs.TLSConfig)
   676  		go hs.Serve(tlsListener)
   677  		return lis
   678  	}
   679  
   680  	go s.Serve(lis)
   681  	return lis
   682  }
   683  
   684  type wrapHS struct {
   685  	s *http.Server
   686  }
   687  
   688  func (w wrapHS) GracefulStop() {
   689  	w.s.Shutdown(context.Background())
   690  }
   691  
   692  func (w wrapHS) Stop() {
   693  	w.s.Close()
   694  }
   695  
   696  func (te *test) startServerWithConnControl(ts testpb.TestServiceServer) *listenerWrapper {
   697  	l := te.listenAndServe(ts, listenWithConnControl)
   698  	return l.(*listenerWrapper)
   699  }
   700  
   701  // startServer starts a gRPC server exposing the provided TestService
   702  // implementation. Callers should defer a call to te.tearDown to clean up
   703  func (te *test) startServer(ts testpb.TestServiceServer) {
   704  	te.t.Helper()
   705  	te.listenAndServe(ts, net.Listen)
   706  }
   707  
   708  // startServers starts 'num' gRPC servers exposing the provided TestService.
   709  func (te *test) startServers(ts testpb.TestServiceServer, num int) {
   710  	for i := 0; i < num; i++ {
   711  		te.startServer(ts)
   712  		te.srvs = append(te.srvs, te.srv.(*grpc.Server))
   713  		te.hSrvs = append(te.hSrvs, te.hSrv)
   714  		te.srvAddrs = append(te.srvAddrs, te.srvAddr)
   715  		te.srv = nil
   716  		te.hSrv = nil
   717  		te.srvAddr = ""
   718  	}
   719  }
   720  
   721  // setHealthServingStatus is a helper function to set the health status.
   722  func (te *test) setHealthServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) {
   723  	hs, ok := te.hSrv.(*health.Server)
   724  	if !ok {
   725  		panic(fmt.Sprintf("SetServingStatus(%v, %v) called for health server of type %T", service, status, hs))
   726  	}
   727  	hs.SetServingStatus(service, status)
   728  }
   729  
   730  type nopCompressor struct {
   731  	grpc.Compressor
   732  }
   733  
   734  // NewNopCompressor creates a compressor to test the case that type is not supported.
   735  func NewNopCompressor() grpc.Compressor {
   736  	return &nopCompressor{grpc.NewGZIPCompressor()}
   737  }
   738  
   739  func (c *nopCompressor) Type() string {
   740  	return "nop"
   741  }
   742  
   743  type nopDecompressor struct {
   744  	grpc.Decompressor
   745  }
   746  
   747  // NewNopDecompressor creates a decompressor to test the case that type is not supported.
   748  func NewNopDecompressor() grpc.Decompressor {
   749  	return &nopDecompressor{grpc.NewGZIPDecompressor()}
   750  }
   751  
   752  func (d *nopDecompressor) Type() string {
   753  	return "nop"
   754  }
   755  
   756  func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string) {
   757  	opts = append(opts, grpc.WithDialer(te.e.dialer), grpc.WithUserAgent(te.userAgent))
   758  
   759  	if te.sc != nil {
   760  		opts = append(opts, grpc.WithServiceConfig(te.sc))
   761  	}
   762  
   763  	if te.clientCompression {
   764  		opts = append(opts,
   765  			grpc.WithCompressor(grpc.NewGZIPCompressor()),
   766  			grpc.WithDecompressor(grpc.NewGZIPDecompressor()),
   767  		)
   768  	}
   769  	if te.clientUseCompression {
   770  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor("gzip")))
   771  	}
   772  	if te.clientNopCompression {
   773  		opts = append(opts,
   774  			grpc.WithCompressor(NewNopCompressor()),
   775  			grpc.WithDecompressor(NewNopDecompressor()),
   776  		)
   777  	}
   778  	if te.unaryClientInt != nil {
   779  		opts = append(opts, grpc.WithUnaryInterceptor(te.unaryClientInt))
   780  	}
   781  	if te.streamClientInt != nil {
   782  		opts = append(opts, grpc.WithStreamInterceptor(te.streamClientInt))
   783  	}
   784  	if te.maxClientMsgSize != nil {
   785  		opts = append(opts, grpc.WithMaxMsgSize(*te.maxClientMsgSize))
   786  	}
   787  	if te.maxClientReceiveMsgSize != nil {
   788  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(*te.maxClientReceiveMsgSize)))
   789  	}
   790  	if te.maxClientSendMsgSize != nil {
   791  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(*te.maxClientSendMsgSize)))
   792  	}
   793  	if te.maxClientHeaderListSize != nil {
   794  		opts = append(opts, grpc.WithMaxHeaderListSize(*te.maxClientHeaderListSize))
   795  	}
   796  	switch te.e.security {
   797  	case "tls":
   798  		creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "x.test.example.com")
   799  		if err != nil {
   800  			te.t.Fatalf("Failed to load credentials: %v", err)
   801  		}
   802  		opts = append(opts, grpc.WithTransportCredentials(creds))
   803  	case "empty":
   804  		// Don't add any transport creds option.
   805  	default:
   806  		opts = append(opts, grpc.WithInsecure())
   807  	}
   808  	// TODO(bar) switch balancer case "pick_first".
   809  	var scheme string
   810  	if te.resolverScheme == "" {
   811  		scheme = "passthrough:///"
   812  	} else {
   813  		scheme = te.resolverScheme + ":///"
   814  	}
   815  	if te.e.balancer != "" {
   816  		opts = append(opts, grpc.WithBalancerName(te.e.balancer))
   817  	}
   818  	if te.clientInitialWindowSize > 0 {
   819  		opts = append(opts, grpc.WithInitialWindowSize(te.clientInitialWindowSize))
   820  	}
   821  	if te.clientInitialConnWindowSize > 0 {
   822  		opts = append(opts, grpc.WithInitialConnWindowSize(te.clientInitialConnWindowSize))
   823  	}
   824  	if te.perRPCCreds != nil {
   825  		opts = append(opts, grpc.WithPerRPCCredentials(te.perRPCCreds))
   826  	}
   827  	if te.customCodec != nil {
   828  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.ForceCodec(te.customCodec)))
   829  	}
   830  	if te.srvAddr == "" {
   831  		te.srvAddr = "client.side.only.test"
   832  	}
   833  	opts = append(opts, te.customDialOptions...)
   834  	return opts, scheme
   835  }
   836  
   837  func (te *test) clientConnWithConnControl() (*grpc.ClientConn, *dialerWrapper) {
   838  	if te.cc != nil {
   839  		return te.cc, nil
   840  	}
   841  	opts, scheme := te.configDial()
   842  	dw := &dialerWrapper{}
   843  	// overwrite the dialer before
   844  	opts = append(opts, grpc.WithDialer(dw.dialer))
   845  	var err error
   846  	te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...)
   847  	if err != nil {
   848  		te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err)
   849  	}
   850  	return te.cc, dw
   851  }
   852  
   853  func (te *test) clientConn(opts ...grpc.DialOption) *grpc.ClientConn {
   854  	if te.cc != nil {
   855  		return te.cc
   856  	}
   857  	var scheme string
   858  	opts, scheme = te.configDial(opts...)
   859  	var err error
   860  	te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...)
   861  	if err != nil {
   862  		te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err)
   863  	}
   864  	return te.cc
   865  }
   866  
   867  func (te *test) declareLogNoise(phrases ...string) {
   868  	te.restoreLogs = declareLogNoise(te.t, phrases...)
   869  }
   870  
   871  func (te *test) withServerTester(fn func(st *serverTester)) {
   872  	c, err := te.e.dialer(te.srvAddr, 10*time.Second)
   873  	if err != nil {
   874  		te.t.Fatal(err)
   875  	}
   876  	defer c.Close()
   877  	if te.e.security == "tls" {
   878  		c = tls.Client(c, &tls.Config{
   879  			InsecureSkipVerify: true,
   880  			NextProtos:         []string{http2.NextProtoTLS},
   881  		})
   882  	}
   883  	st := newServerTesterFromConn(te.t, c)
   884  	st.greet()
   885  	fn(st)
   886  }
   887  
   888  type lazyConn struct {
   889  	net.Conn
   890  	beLazy int32
   891  }
   892  
   893  func (l *lazyConn) Write(b []byte) (int, error) {
   894  	if atomic.LoadInt32(&(l.beLazy)) == 1 {
   895  		time.Sleep(time.Second)
   896  	}
   897  	return l.Conn.Write(b)
   898  }
   899  
   900  func (s) TestContextDeadlineNotIgnored(t *testing.T) {
   901  	e := noBalancerEnv
   902  	var lc *lazyConn
   903  	e.customDialer = func(network, addr string, timeout time.Duration) (net.Conn, error) {
   904  		conn, err := net.DialTimeout(network, addr, timeout)
   905  		if err != nil {
   906  			return nil, err
   907  		}
   908  		lc = &lazyConn{Conn: conn}
   909  		return lc, nil
   910  	}
   911  
   912  	te := newTest(t, e)
   913  	te.startServer(&testServer{security: e.security})
   914  	defer te.tearDown()
   915  
   916  	cc := te.clientConn()
   917  	tc := testpb.NewTestServiceClient(cc)
   918  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   919  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
   920  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
   921  	}
   922  	cancel()
   923  	atomic.StoreInt32(&(lc.beLazy), 1)
   924  	ctx, cancel = context.WithTimeout(context.Background(), 50*time.Millisecond)
   925  	defer cancel()
   926  	t1 := time.Now()
   927  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
   928  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, context.DeadlineExceeded", err)
   929  	}
   930  	if time.Since(t1) > 2*time.Second {
   931  		t.Fatalf("TestService/EmptyCall(_, _) ran over the deadline")
   932  	}
   933  }
   934  
   935  func (s) TestTimeoutOnDeadServer(t *testing.T) {
   936  	for _, e := range listTestEnv() {
   937  		testTimeoutOnDeadServer(t, e)
   938  	}
   939  }
   940  
   941  func testTimeoutOnDeadServer(t *testing.T, e env) {
   942  	te := newTest(t, e)
   943  	te.userAgent = testAppUA
   944  	te.declareLogNoise(
   945  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
   946  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
   947  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
   948  	)
   949  	te.startServer(&testServer{security: e.security})
   950  	defer te.tearDown()
   951  
   952  	cc := te.clientConn()
   953  	tc := testpb.NewTestServiceClient(cc)
   954  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   955  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
   956  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
   957  	}
   958  	te.srv.Stop()
   959  	cancel()
   960  
   961  	// Wait for the client to notice the connection is gone.
   962  	ctx, cancel = context.WithTimeout(context.Background(), 500*time.Millisecond)
   963  	state := cc.GetState()
   964  	for ; state == connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() {
   965  	}
   966  	cancel()
   967  	if state == connectivity.Ready {
   968  		t.Fatalf("Timed out waiting for non-ready state")
   969  	}
   970  	ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond)
   971  	_, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true))
   972  	cancel()
   973  	if e.balancer != "" && status.Code(err) != codes.DeadlineExceeded {
   974  		// If e.balancer == nil, the ac will stop reconnecting because the dialer returns non-temp error,
   975  		// the error will be an internal error.
   976  		t.Fatalf("TestService/EmptyCall(%v, _) = _, %v, want _, error code: %s", ctx, err, codes.DeadlineExceeded)
   977  	}
   978  	awaitNewConnLogOutput()
   979  }
   980  
   981  func (s) TestServerGracefulStopIdempotent(t *testing.T) {
   982  	for _, e := range listTestEnv() {
   983  		if e.name == "handler-tls" {
   984  			continue
   985  		}
   986  		testServerGracefulStopIdempotent(t, e)
   987  	}
   988  }
   989  
   990  func testServerGracefulStopIdempotent(t *testing.T, e env) {
   991  	te := newTest(t, e)
   992  	te.userAgent = testAppUA
   993  	te.startServer(&testServer{security: e.security})
   994  	defer te.tearDown()
   995  
   996  	for i := 0; i < 3; i++ {
   997  		te.srv.GracefulStop()
   998  	}
   999  }
  1000  
  1001  func (s) TestServerGoAway(t *testing.T) {
  1002  	for _, e := range listTestEnv() {
  1003  		if e.name == "handler-tls" {
  1004  			continue
  1005  		}
  1006  		testServerGoAway(t, e)
  1007  	}
  1008  }
  1009  
  1010  func testServerGoAway(t *testing.T, e env) {
  1011  	te := newTest(t, e)
  1012  	te.userAgent = testAppUA
  1013  	te.startServer(&testServer{security: e.security})
  1014  	defer te.tearDown()
  1015  
  1016  	cc := te.clientConn()
  1017  	tc := testpb.NewTestServiceClient(cc)
  1018  	// Finish an RPC to make sure the connection is good.
  1019  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  1020  	defer cancel()
  1021  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1022  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  1023  	}
  1024  	ch := make(chan struct{})
  1025  	go func() {
  1026  		te.srv.GracefulStop()
  1027  		close(ch)
  1028  	}()
  1029  	// Loop until the server side GoAway signal is propagated to the client.
  1030  	for {
  1031  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
  1032  		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) != codes.DeadlineExceeded {
  1033  			cancel()
  1034  			break
  1035  		}
  1036  		cancel()
  1037  	}
  1038  	// A new RPC should fail.
  1039  	ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
  1040  	defer cancel()
  1041  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable && status.Code(err) != codes.Internal {
  1042  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s or %s", err, codes.Unavailable, codes.Internal)
  1043  	}
  1044  	<-ch
  1045  	awaitNewConnLogOutput()
  1046  }
  1047  
  1048  func (s) TestServerGoAwayPendingRPC(t *testing.T) {
  1049  	for _, e := range listTestEnv() {
  1050  		if e.name == "handler-tls" {
  1051  			continue
  1052  		}
  1053  		testServerGoAwayPendingRPC(t, e)
  1054  	}
  1055  }
  1056  
  1057  func testServerGoAwayPendingRPC(t *testing.T, e env) {
  1058  	te := newTest(t, e)
  1059  	te.userAgent = testAppUA
  1060  	te.declareLogNoise(
  1061  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  1062  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  1063  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  1064  	)
  1065  	te.startServer(&testServer{security: e.security})
  1066  	defer te.tearDown()
  1067  
  1068  	cc := te.clientConn()
  1069  	tc := testpb.NewTestServiceClient(cc)
  1070  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  1071  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  1072  	if err != nil {
  1073  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1074  	}
  1075  	// Finish an RPC to make sure the connection is good.
  1076  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1077  		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
  1078  	}
  1079  	ch := make(chan struct{})
  1080  	go func() {
  1081  		te.srv.GracefulStop()
  1082  		close(ch)
  1083  	}()
  1084  	// Loop until the server side GoAway signal is propagated to the client.
  1085  	start := time.Now()
  1086  	errored := false
  1087  	for time.Since(start) < time.Second {
  1088  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
  1089  		_, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true))
  1090  		cancel()
  1091  		if err != nil {
  1092  			errored = true
  1093  			break
  1094  		}
  1095  	}
  1096  	if !errored {
  1097  		t.Fatalf("GoAway never received by client")
  1098  	}
  1099  	respParam := []*testpb.ResponseParameters{{Size: 1}}
  1100  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100))
  1101  	if err != nil {
  1102  		t.Fatal(err)
  1103  	}
  1104  	req := &testpb.StreamingOutputCallRequest{
  1105  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1106  		ResponseParameters: respParam,
  1107  		Payload:            payload,
  1108  	}
  1109  	// The existing RPC should be still good to proceed.
  1110  	if err := stream.Send(req); err != nil {
  1111  		t.Fatalf("%v.Send(_) = %v, want <nil>", stream, err)
  1112  	}
  1113  	if _, err := stream.Recv(); err != nil {
  1114  		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
  1115  	}
  1116  	// The RPC will run until canceled.
  1117  	cancel()
  1118  	<-ch
  1119  	awaitNewConnLogOutput()
  1120  }
  1121  
  1122  func (s) TestServerMultipleGoAwayPendingRPC(t *testing.T) {
  1123  	for _, e := range listTestEnv() {
  1124  		if e.name == "handler-tls" {
  1125  			continue
  1126  		}
  1127  		testServerMultipleGoAwayPendingRPC(t, e)
  1128  	}
  1129  }
  1130  
  1131  func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) {
  1132  	te := newTest(t, e)
  1133  	te.userAgent = testAppUA
  1134  	te.declareLogNoise(
  1135  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  1136  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  1137  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  1138  	)
  1139  	te.startServer(&testServer{security: e.security})
  1140  	defer te.tearDown()
  1141  
  1142  	cc := te.clientConn()
  1143  	tc := testpb.NewTestServiceClient(cc)
  1144  	ctx, cancel := context.WithCancel(context.Background())
  1145  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  1146  	if err != nil {
  1147  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1148  	}
  1149  	// Finish an RPC to make sure the connection is good.
  1150  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1151  		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
  1152  	}
  1153  	ch1 := make(chan struct{})
  1154  	go func() {
  1155  		te.srv.GracefulStop()
  1156  		close(ch1)
  1157  	}()
  1158  	ch2 := make(chan struct{})
  1159  	go func() {
  1160  		te.srv.GracefulStop()
  1161  		close(ch2)
  1162  	}()
  1163  	// Loop until the server side GoAway signal is propagated to the client.
  1164  	for {
  1165  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
  1166  		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1167  			cancel()
  1168  			break
  1169  		}
  1170  		cancel()
  1171  	}
  1172  	select {
  1173  	case <-ch1:
  1174  		t.Fatal("GracefulStop() terminated early")
  1175  	case <-ch2:
  1176  		t.Fatal("GracefulStop() terminated early")
  1177  	default:
  1178  	}
  1179  	respParam := []*testpb.ResponseParameters{
  1180  		{
  1181  			Size: 1,
  1182  		},
  1183  	}
  1184  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100))
  1185  	if err != nil {
  1186  		t.Fatal(err)
  1187  	}
  1188  	req := &testpb.StreamingOutputCallRequest{
  1189  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1190  		ResponseParameters: respParam,
  1191  		Payload:            payload,
  1192  	}
  1193  	// The existing RPC should be still good to proceed.
  1194  	if err := stream.Send(req); err != nil {
  1195  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  1196  	}
  1197  	if _, err := stream.Recv(); err != nil {
  1198  		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
  1199  	}
  1200  	if err := stream.CloseSend(); err != nil {
  1201  		t.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
  1202  	}
  1203  	<-ch1
  1204  	<-ch2
  1205  	cancel()
  1206  	awaitNewConnLogOutput()
  1207  }
  1208  
  1209  func (s) TestConcurrentClientConnCloseAndServerGoAway(t *testing.T) {
  1210  	for _, e := range listTestEnv() {
  1211  		if e.name == "handler-tls" {
  1212  			continue
  1213  		}
  1214  		testConcurrentClientConnCloseAndServerGoAway(t, e)
  1215  	}
  1216  }
  1217  
  1218  func testConcurrentClientConnCloseAndServerGoAway(t *testing.T, e env) {
  1219  	te := newTest(t, e)
  1220  	te.userAgent = testAppUA
  1221  	te.declareLogNoise(
  1222  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  1223  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  1224  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  1225  	)
  1226  	te.startServer(&testServer{security: e.security})
  1227  	defer te.tearDown()
  1228  
  1229  	cc := te.clientConn()
  1230  	tc := testpb.NewTestServiceClient(cc)
  1231  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1232  	defer cancel()
  1233  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1234  		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
  1235  	}
  1236  	ch := make(chan struct{})
  1237  	// Close ClientConn and Server concurrently.
  1238  	go func() {
  1239  		te.srv.GracefulStop()
  1240  		close(ch)
  1241  	}()
  1242  	go func() {
  1243  		cc.Close()
  1244  	}()
  1245  	<-ch
  1246  }
  1247  
  1248  func (s) TestConcurrentServerStopAndGoAway(t *testing.T) {
  1249  	for _, e := range listTestEnv() {
  1250  		if e.name == "handler-tls" {
  1251  			continue
  1252  		}
  1253  		testConcurrentServerStopAndGoAway(t, e)
  1254  	}
  1255  }
  1256  
  1257  func testConcurrentServerStopAndGoAway(t *testing.T, e env) {
  1258  	te := newTest(t, e)
  1259  	te.userAgent = testAppUA
  1260  	te.declareLogNoise(
  1261  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  1262  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  1263  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  1264  	)
  1265  	te.startServer(&testServer{security: e.security})
  1266  	defer te.tearDown()
  1267  
  1268  	cc := te.clientConn()
  1269  	tc := testpb.NewTestServiceClient(cc)
  1270  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1271  	defer cancel()
  1272  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  1273  	if err != nil {
  1274  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1275  	}
  1276  
  1277  	// Finish an RPC to make sure the connection is good.
  1278  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1279  		t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, <nil>", tc, err)
  1280  	}
  1281  
  1282  	ch := make(chan struct{})
  1283  	go func() {
  1284  		te.srv.GracefulStop()
  1285  		close(ch)
  1286  	}()
  1287  	// Loop until the server side GoAway signal is propagated to the client.
  1288  	for {
  1289  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
  1290  		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
  1291  			cancel()
  1292  			break
  1293  		}
  1294  		cancel()
  1295  	}
  1296  	// Stop the server and close all the connections.
  1297  	te.srv.Stop()
  1298  	respParam := []*testpb.ResponseParameters{
  1299  		{
  1300  			Size: 1,
  1301  		},
  1302  	}
  1303  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100))
  1304  	if err != nil {
  1305  		t.Fatal(err)
  1306  	}
  1307  	req := &testpb.StreamingOutputCallRequest{
  1308  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1309  		ResponseParameters: respParam,
  1310  		Payload:            payload,
  1311  	}
  1312  	sendStart := time.Now()
  1313  	for {
  1314  		if err := stream.Send(req); err == io.EOF {
  1315  			// stream.Send should eventually send io.EOF
  1316  			break
  1317  		} else if err != nil {
  1318  			// Send should never return a transport-level error.
  1319  			t.Fatalf("stream.Send(%v) = %v; want <nil or io.EOF>", req, err)
  1320  		}
  1321  		if time.Since(sendStart) > 2*time.Second {
  1322  			t.Fatalf("stream.Send(_) did not return io.EOF after 2s")
  1323  		}
  1324  		time.Sleep(time.Millisecond)
  1325  	}
  1326  	if _, err := stream.Recv(); err == nil || err == io.EOF {
  1327  		t.Fatalf("%v.Recv() = _, %v, want _, <non-nil, non-EOF>", stream, err)
  1328  	}
  1329  	<-ch
  1330  	awaitNewConnLogOutput()
  1331  }
  1332  
  1333  func (s) TestDetailedConnectionCloseErrorPropagatesToRpcError(t *testing.T) {
  1334  	rpcStartedOnServer := make(chan struct{})
  1335  	rpcDoneOnClient := make(chan struct{})
  1336  	ss := &stubserver.StubServer{
  1337  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  1338  			close(rpcStartedOnServer)
  1339  			<-rpcDoneOnClient
  1340  			return status.Error(codes.Internal, "arbitrary status")
  1341  		},
  1342  	}
  1343  	if err := ss.Start(nil); err != nil {
  1344  		t.Fatalf("Error starting endpoint server: %v", err)
  1345  	}
  1346  	defer ss.Stop()
  1347  
  1348  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1349  	defer cancel()
  1350  	// The precise behavior of this test is subject to raceyness around the timing of when TCP packets
  1351  	// are sent from client to server, and when we tell the server to stop, so we need to account for both
  1352  	// of these possible error messages:
  1353  	// 1) If the call to ss.S.Stop() causes the server's sockets to close while there's still in-fight
  1354  	//    data from the client on the TCP connection, then the kernel can send an RST back to the client (also
  1355  	//    see https://stackoverflow.com/questions/33053507/econnreset-in-send-linux-c). Note that while this
  1356  	//    condition is expected to be rare due to the rpcStartedOnServer synchronization, in theory it should
  1357  	//    be possible, e.g. if the client sends a BDP ping at the right time.
  1358  	// 2) If, for example, the call to ss.S.Stop() happens after the RPC headers have been received at the
  1359  	//    server, then the TCP connection can shutdown gracefully when the server's socket closes.
  1360  	const possibleConnResetMsg = "connection reset by peer"
  1361  	const possibleEOFMsg = "error reading from server: EOF"
  1362  	// Start an RPC. Then, while the RPC is still being accepted or handled at the server, abruptly
  1363  	// stop the server, killing the connection. The RPC error message should include details about the specific
  1364  	// connection error that was encountered.
  1365  	stream, err := ss.Client.FullDuplexCall(ctx)
  1366  	if err != nil {
  1367  		t.Fatalf("%v.FullDuplexCall = _, %v, want _, <nil>", ss.Client, err)
  1368  	}
  1369  	// Block until the RPC has been started on the server. This ensures that the ClientConn will find a healthy
  1370  	// connection for the RPC to go out on initially, and that the TCP connection will shut down strictly after
  1371  	// the RPC has been started on it.
  1372  	<-rpcStartedOnServer
  1373  	ss.S.Stop()
  1374  	if _, err := stream.Recv(); err == nil || (!strings.Contains(err.Error(), possibleConnResetMsg) && !strings.Contains(err.Error(), possibleEOFMsg)) {
  1375  		t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q OR %q", stream, err, possibleConnResetMsg, possibleEOFMsg)
  1376  	}
  1377  	close(rpcDoneOnClient)
  1378  }
  1379  
  1380  func (s) TestDetailedGoawayErrorOnGracefulClosePropagatesToRPCError(t *testing.T) {
  1381  	rpcDoneOnClient := make(chan struct{})
  1382  	ss := &stubserver.StubServer{
  1383  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  1384  			<-rpcDoneOnClient
  1385  			return status.Error(codes.Internal, "arbitrary status")
  1386  		},
  1387  	}
  1388  	sopts := []grpc.ServerOption{
  1389  		grpc.KeepaliveParams(keepalive.ServerParameters{
  1390  			MaxConnectionAge:      time.Millisecond * 100,
  1391  			MaxConnectionAgeGrace: time.Millisecond,
  1392  		}),
  1393  	}
  1394  	if err := ss.Start(sopts); err != nil {
  1395  		t.Fatalf("Error starting endpoint server: %v", err)
  1396  	}
  1397  	defer ss.Stop()
  1398  
  1399  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1400  	defer cancel()
  1401  	stream, err := ss.Client.FullDuplexCall(ctx)
  1402  	if err != nil {
  1403  		t.Fatalf("%v.FullDuplexCall = _, %v, want _, <nil>", ss.Client, err)
  1404  	}
  1405  	const expectedErrorMessageSubstring = "received prior goaway: code: NO_ERROR"
  1406  	_, err = stream.Recv()
  1407  	close(rpcDoneOnClient)
  1408  	if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) {
  1409  		t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q", stream, err, expectedErrorMessageSubstring)
  1410  	}
  1411  }
  1412  
  1413  func (s) TestDetailedGoawayErrorOnAbruptClosePropagatesToRPCError(t *testing.T) {
  1414  	// set the min keepalive time very low so that this test can take
  1415  	// a reasonable amount of time
  1416  	prev := internal.KeepaliveMinPingTime
  1417  	internal.KeepaliveMinPingTime = time.Millisecond
  1418  	defer func() { internal.KeepaliveMinPingTime = prev }()
  1419  
  1420  	rpcDoneOnClient := make(chan struct{})
  1421  	ss := &stubserver.StubServer{
  1422  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  1423  			<-rpcDoneOnClient
  1424  			return status.Error(codes.Internal, "arbitrary status")
  1425  		},
  1426  	}
  1427  	sopts := []grpc.ServerOption{
  1428  		grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
  1429  			MinTime: time.Second * 1000, /* arbitrary, large value */
  1430  		}),
  1431  	}
  1432  	dopts := []grpc.DialOption{
  1433  		grpc.WithKeepaliveParams(keepalive.ClientParameters{
  1434  			Time:                time.Millisecond,   /* should trigger "too many pings" error quickly */
  1435  			Timeout:             time.Second * 1000, /* arbitrary, large value */
  1436  			PermitWithoutStream: false,
  1437  		}),
  1438  	}
  1439  	if err := ss.Start(sopts, dopts...); err != nil {
  1440  		t.Fatalf("Error starting endpoint server: %v", err)
  1441  	}
  1442  	defer ss.Stop()
  1443  
  1444  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1445  	defer cancel()
  1446  	stream, err := ss.Client.FullDuplexCall(ctx)
  1447  	if err != nil {
  1448  		t.Fatalf("%v.FullDuplexCall = _, %v, want _, <nil>", ss.Client, err)
  1449  	}
  1450  	const expectedErrorMessageSubstring = `received prior goaway: code: ENHANCE_YOUR_CALM, debug data: "too_many_pings"`
  1451  	_, err = stream.Recv()
  1452  	close(rpcDoneOnClient)
  1453  	if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) {
  1454  		t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: |%v|", stream, err, expectedErrorMessageSubstring)
  1455  	}
  1456  }
  1457  
  1458  func (s) TestClientConnCloseAfterGoAwayWithActiveStream(t *testing.T) {
  1459  	for _, e := range listTestEnv() {
  1460  		if e.name == "handler-tls" {
  1461  			continue
  1462  		}
  1463  		testClientConnCloseAfterGoAwayWithActiveStream(t, e)
  1464  	}
  1465  }
  1466  
  1467  func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) {
  1468  	te := newTest(t, e)
  1469  	te.startServer(&testServer{security: e.security})
  1470  	defer te.tearDown()
  1471  	cc := te.clientConn()
  1472  	tc := testpb.NewTestServiceClient(cc)
  1473  
  1474  	ctx, cancel := context.WithCancel(context.Background())
  1475  	defer cancel()
  1476  	if _, err := tc.FullDuplexCall(ctx); err != nil {
  1477  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
  1478  	}
  1479  	done := make(chan struct{})
  1480  	go func() {
  1481  		te.srv.GracefulStop()
  1482  		close(done)
  1483  	}()
  1484  	time.Sleep(50 * time.Millisecond)
  1485  	cc.Close()
  1486  	timeout := time.NewTimer(time.Second)
  1487  	select {
  1488  	case <-done:
  1489  	case <-timeout.C:
  1490  		t.Fatalf("Test timed-out.")
  1491  	}
  1492  }
  1493  
  1494  func (s) TestFailFast(t *testing.T) {
  1495  	for _, e := range listTestEnv() {
  1496  		testFailFast(t, e)
  1497  	}
  1498  }
  1499  
  1500  func testFailFast(t *testing.T, e env) {
  1501  	te := newTest(t, e)
  1502  	te.userAgent = testAppUA
  1503  	te.declareLogNoise(
  1504  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  1505  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  1506  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  1507  	)
  1508  	te.startServer(&testServer{security: e.security})
  1509  	defer te.tearDown()
  1510  
  1511  	cc := te.clientConn()
  1512  	tc := testpb.NewTestServiceClient(cc)
  1513  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1514  	defer cancel()
  1515  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  1516  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  1517  	}
  1518  	// Stop the server and tear down all the existing connections.
  1519  	te.srv.Stop()
  1520  	// Loop until the server teardown is propagated to the client.
  1521  	for {
  1522  		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  1523  		_, err := tc.EmptyCall(ctx, &testpb.Empty{})
  1524  		cancel()
  1525  		if status.Code(err) == codes.Unavailable {
  1526  			break
  1527  		}
  1528  		t.Logf("%v.EmptyCall(_, _) = _, %v", tc, err)
  1529  		time.Sleep(10 * time.Millisecond)
  1530  	}
  1531  	// The client keeps reconnecting and ongoing fail-fast RPCs should fail with code.Unavailable.
  1532  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
  1533  		t.Fatalf("TestService/EmptyCall(_, _, _) = _, %v, want _, error code: %s", err, codes.Unavailable)
  1534  	}
  1535  	if _, err := tc.StreamingInputCall(ctx); status.Code(err) != codes.Unavailable {
  1536  		t.Fatalf("TestService/StreamingInputCall(_) = _, %v, want _, error code: %s", err, codes.Unavailable)
  1537  	}
  1538  
  1539  	awaitNewConnLogOutput()
  1540  }
  1541  
  1542  func testServiceConfigSetup(t *testing.T, e env) *test {
  1543  	te := newTest(t, e)
  1544  	te.userAgent = testAppUA
  1545  	te.declareLogNoise(
  1546  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  1547  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  1548  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  1549  		"Failed to dial : context canceled; please retry.",
  1550  	)
  1551  	return te
  1552  }
  1553  
  1554  func newBool(b bool) (a *bool) {
  1555  	return &b
  1556  }
  1557  
  1558  func newInt(b int) (a *int) {
  1559  	return &b
  1560  }
  1561  
  1562  func newDuration(b time.Duration) (a *time.Duration) {
  1563  	a = new(time.Duration)
  1564  	*a = b
  1565  	return
  1566  }
  1567  
  1568  func (s) TestGetMethodConfig(t *testing.T) {
  1569  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1570  	defer te.tearDown()
  1571  	r := manual.NewBuilderWithScheme("whatever")
  1572  
  1573  	te.resolverScheme = r.Scheme()
  1574  	cc := te.clientConn(grpc.WithResolvers(r))
  1575  	addrs := []resolver.Address{{Addr: te.srvAddr}}
  1576  	r.UpdateState(resolver.State{
  1577  		Addresses: addrs,
  1578  		ServiceConfig: parseCfg(r, `{
  1579      "methodConfig": [
  1580          {
  1581              "name": [
  1582                  {
  1583                      "service": "grpc.testing.TestService",
  1584                      "method": "EmptyCall"
  1585                  }
  1586              ],
  1587              "waitForReady": true,
  1588              "timeout": ".001s"
  1589          },
  1590          {
  1591              "name": [
  1592                  {
  1593                      "service": "grpc.testing.TestService"
  1594                  }
  1595              ],
  1596              "waitForReady": false
  1597          }
  1598      ]
  1599  }`)})
  1600  
  1601  	tc := testpb.NewTestServiceClient(cc)
  1602  
  1603  	// Make sure service config has been processed by grpc.
  1604  	for {
  1605  		if cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil {
  1606  			break
  1607  		}
  1608  		time.Sleep(time.Millisecond)
  1609  	}
  1610  
  1611  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1612  	defer cancel()
  1613  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  1614  	var err error
  1615  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  1616  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1617  	}
  1618  
  1619  	r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseCfg(r, `{
  1620      "methodConfig": [
  1621          {
  1622              "name": [
  1623                  {
  1624                      "service": "grpc.testing.TestService",
  1625                      "method": "UnaryCall"
  1626                  }
  1627              ],
  1628              "waitForReady": true,
  1629              "timeout": ".001s"
  1630          },
  1631          {
  1632              "name": [
  1633                  {
  1634                      "service": "grpc.testing.TestService"
  1635                  }
  1636              ],
  1637              "waitForReady": false
  1638          }
  1639      ]
  1640  }`)})
  1641  
  1642  	// Make sure service config has been processed by grpc.
  1643  	for {
  1644  		if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && !*mc.WaitForReady {
  1645  			break
  1646  		}
  1647  		time.Sleep(time.Millisecond)
  1648  	}
  1649  	// The following RPCs are expected to become fail-fast.
  1650  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
  1651  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable)
  1652  	}
  1653  }
  1654  
  1655  func (s) TestServiceConfigWaitForReady(t *testing.T) {
  1656  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1657  	defer te.tearDown()
  1658  	r := manual.NewBuilderWithScheme("whatever")
  1659  
  1660  	// Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds.
  1661  	te.resolverScheme = r.Scheme()
  1662  	cc := te.clientConn(grpc.WithResolvers(r))
  1663  	addrs := []resolver.Address{{Addr: te.srvAddr}}
  1664  	r.UpdateState(resolver.State{
  1665  		Addresses: addrs,
  1666  		ServiceConfig: parseCfg(r, `{
  1667      "methodConfig": [
  1668          {
  1669              "name": [
  1670                  {
  1671                      "service": "grpc.testing.TestService",
  1672                      "method": "EmptyCall"
  1673                  },
  1674                  {
  1675                      "service": "grpc.testing.TestService",
  1676                      "method": "FullDuplexCall"
  1677                  }
  1678              ],
  1679              "waitForReady": false,
  1680              "timeout": ".001s"
  1681          }
  1682      ]
  1683  }`)})
  1684  
  1685  	tc := testpb.NewTestServiceClient(cc)
  1686  
  1687  	// Make sure service config has been processed by grpc.
  1688  	for {
  1689  		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").WaitForReady != nil {
  1690  			break
  1691  		}
  1692  		time.Sleep(time.Millisecond)
  1693  	}
  1694  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1695  	defer cancel()
  1696  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  1697  	var err error
  1698  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1699  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1700  	}
  1701  	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1702  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1703  	}
  1704  
  1705  	// Generate a service config update.
  1706  	// Case2:Client API set failfast to be false, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds.
  1707  	r.UpdateState(resolver.State{
  1708  		Addresses: addrs,
  1709  		ServiceConfig: parseCfg(r, `{
  1710      "methodConfig": [
  1711          {
  1712              "name": [
  1713                  {
  1714                      "service": "grpc.testing.TestService",
  1715                      "method": "EmptyCall"
  1716                  },
  1717                  {
  1718                      "service": "grpc.testing.TestService",
  1719                      "method": "FullDuplexCall"
  1720                  }
  1721              ],
  1722              "waitForReady": true,
  1723              "timeout": ".001s"
  1724          }
  1725      ]
  1726  }`)})
  1727  
  1728  	// Wait for the new service config to take effect.
  1729  	for {
  1730  		if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && *mc.WaitForReady {
  1731  			break
  1732  		}
  1733  		time.Sleep(time.Millisecond)
  1734  	}
  1735  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  1736  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  1737  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1738  	}
  1739  	if _, err := tc.FullDuplexCall(ctx); status.Code(err) != codes.DeadlineExceeded {
  1740  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1741  	}
  1742  }
  1743  
  1744  func (s) TestServiceConfigTimeout(t *testing.T) {
  1745  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1746  	defer te.tearDown()
  1747  	r := manual.NewBuilderWithScheme("whatever")
  1748  
  1749  	// Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
  1750  	te.resolverScheme = r.Scheme()
  1751  	cc := te.clientConn(grpc.WithResolvers(r))
  1752  	addrs := []resolver.Address{{Addr: te.srvAddr}}
  1753  	r.UpdateState(resolver.State{
  1754  		Addresses: addrs,
  1755  		ServiceConfig: parseCfg(r, `{
  1756      "methodConfig": [
  1757          {
  1758              "name": [
  1759                  {
  1760                      "service": "grpc.testing.TestService",
  1761                      "method": "EmptyCall"
  1762                  },
  1763                  {
  1764                      "service": "grpc.testing.TestService",
  1765                      "method": "FullDuplexCall"
  1766                  }
  1767              ],
  1768              "waitForReady": true,
  1769              "timeout": "3600s"
  1770          }
  1771      ]
  1772  }`)})
  1773  
  1774  	tc := testpb.NewTestServiceClient(cc)
  1775  
  1776  	// Make sure service config has been processed by grpc.
  1777  	for {
  1778  		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil {
  1779  			break
  1780  		}
  1781  		time.Sleep(time.Millisecond)
  1782  	}
  1783  
  1784  	// The following RPCs are expected to become non-fail-fast ones with 1ns deadline.
  1785  	var err error
  1786  	ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
  1787  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1788  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1789  	}
  1790  	cancel()
  1791  
  1792  	ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond)
  1793  	if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1794  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1795  	}
  1796  	cancel()
  1797  
  1798  	// Generate a service config update.
  1799  	// Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
  1800  	r.UpdateState(resolver.State{
  1801  		Addresses: addrs,
  1802  		ServiceConfig: parseCfg(r, `{
  1803      "methodConfig": [
  1804          {
  1805              "name": [
  1806                  {
  1807                      "service": "grpc.testing.TestService",
  1808                      "method": "EmptyCall"
  1809                  },
  1810                  {
  1811                      "service": "grpc.testing.TestService",
  1812                      "method": "FullDuplexCall"
  1813                  }
  1814              ],
  1815              "waitForReady": true,
  1816              "timeout": ".000000001s"
  1817          }
  1818      ]
  1819  }`)})
  1820  
  1821  	// Wait for the new service config to take effect.
  1822  	for {
  1823  		if mc := cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall"); mc.Timeout != nil && *mc.Timeout == time.Nanosecond {
  1824  			break
  1825  		}
  1826  		time.Sleep(time.Millisecond)
  1827  	}
  1828  
  1829  	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
  1830  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1831  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1832  	}
  1833  	cancel()
  1834  
  1835  	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
  1836  	if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1837  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1838  	}
  1839  	cancel()
  1840  }
  1841  
  1842  func (s) TestServiceConfigMaxMsgSize(t *testing.T) {
  1843  	e := tcpClearRREnv
  1844  	r := manual.NewBuilderWithScheme("whatever")
  1845  
  1846  	// Setting up values and objects shared across all test cases.
  1847  	const smallSize = 1
  1848  	const largeSize = 1024
  1849  	const extraLargeSize = 2048
  1850  
  1851  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  1852  	if err != nil {
  1853  		t.Fatal(err)
  1854  	}
  1855  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  1856  	if err != nil {
  1857  		t.Fatal(err)
  1858  	}
  1859  	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
  1860  	if err != nil {
  1861  		t.Fatal(err)
  1862  	}
  1863  
  1864  	// Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  1865  	te1 := testServiceConfigSetup(t, e)
  1866  	defer te1.tearDown()
  1867  
  1868  	te1.resolverScheme = r.Scheme()
  1869  	te1.startServer(&testServer{security: e.security})
  1870  	cc1 := te1.clientConn(grpc.WithResolvers(r))
  1871  
  1872  	addrs := []resolver.Address{{Addr: te1.srvAddr}}
  1873  	sc := parseCfg(r, `{
  1874      "methodConfig": [
  1875          {
  1876              "name": [
  1877                  {
  1878                      "service": "grpc.testing.TestService",
  1879                      "method": "UnaryCall"
  1880                  },
  1881                  {
  1882                      "service": "grpc.testing.TestService",
  1883                      "method": "FullDuplexCall"
  1884                  }
  1885              ],
  1886              "maxRequestMessageBytes": 2048,
  1887              "maxResponseMessageBytes": 2048
  1888          }
  1889      ]
  1890  }`)
  1891  	r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: sc})
  1892  	tc := testpb.NewTestServiceClient(cc1)
  1893  
  1894  	req := &testpb.SimpleRequest{
  1895  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  1896  		ResponseSize: int32(extraLargeSize),
  1897  		Payload:      smallPayload,
  1898  	}
  1899  
  1900  	for {
  1901  		if cc1.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
  1902  			break
  1903  		}
  1904  		time.Sleep(time.Millisecond)
  1905  	}
  1906  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1907  	defer cancel()
  1908  	// Test for unary RPC recv.
  1909  	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted {
  1910  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1911  	}
  1912  
  1913  	// Test for unary RPC send.
  1914  	req.Payload = extraLargePayload
  1915  	req.ResponseSize = int32(smallSize)
  1916  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1917  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1918  	}
  1919  
  1920  	// Test for streaming RPC recv.
  1921  	respParam := []*testpb.ResponseParameters{
  1922  		{
  1923  			Size: int32(extraLargeSize),
  1924  		},
  1925  	}
  1926  	sreq := &testpb.StreamingOutputCallRequest{
  1927  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1928  		ResponseParameters: respParam,
  1929  		Payload:            smallPayload,
  1930  	}
  1931  	stream, err := tc.FullDuplexCall(te1.ctx)
  1932  	if err != nil {
  1933  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1934  	}
  1935  	if err = stream.Send(sreq); err != nil {
  1936  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1937  	}
  1938  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  1939  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  1940  	}
  1941  
  1942  	// Test for streaming RPC send.
  1943  	respParam[0].Size = int32(smallSize)
  1944  	sreq.Payload = extraLargePayload
  1945  	stream, err = tc.FullDuplexCall(te1.ctx)
  1946  	if err != nil {
  1947  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1948  	}
  1949  	if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  1950  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  1951  	}
  1952  
  1953  	// Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  1954  	te2 := testServiceConfigSetup(t, e)
  1955  	te2.resolverScheme = r.Scheme()
  1956  	te2.maxClientReceiveMsgSize = newInt(1024)
  1957  	te2.maxClientSendMsgSize = newInt(1024)
  1958  
  1959  	te2.startServer(&testServer{security: e.security})
  1960  	defer te2.tearDown()
  1961  	cc2 := te2.clientConn(grpc.WithResolvers(r))
  1962  	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te2.srvAddr}}, ServiceConfig: sc})
  1963  	tc = testpb.NewTestServiceClient(cc2)
  1964  
  1965  	for {
  1966  		if cc2.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
  1967  			break
  1968  		}
  1969  		time.Sleep(time.Millisecond)
  1970  	}
  1971  
  1972  	// Test for unary RPC recv.
  1973  	req.Payload = smallPayload
  1974  	req.ResponseSize = int32(largeSize)
  1975  
  1976  	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted {
  1977  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1978  	}
  1979  
  1980  	// Test for unary RPC send.
  1981  	req.Payload = largePayload
  1982  	req.ResponseSize = int32(smallSize)
  1983  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1984  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1985  	}
  1986  
  1987  	// Test for streaming RPC recv.
  1988  	stream, err = tc.FullDuplexCall(te2.ctx)
  1989  	respParam[0].Size = int32(largeSize)
  1990  	sreq.Payload = smallPayload
  1991  	if err != nil {
  1992  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1993  	}
  1994  	if err = stream.Send(sreq); err != nil {
  1995  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1996  	}
  1997  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  1998  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  1999  	}
  2000  
  2001  	// Test for streaming RPC send.
  2002  	respParam[0].Size = int32(smallSize)
  2003  	sreq.Payload = largePayload
  2004  	stream, err = tc.FullDuplexCall(te2.ctx)
  2005  	if err != nil {
  2006  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2007  	}
  2008  	if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  2009  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  2010  	}
  2011  
  2012  	// Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  2013  	te3 := testServiceConfigSetup(t, e)
  2014  	te3.resolverScheme = r.Scheme()
  2015  	te3.maxClientReceiveMsgSize = newInt(4096)
  2016  	te3.maxClientSendMsgSize = newInt(4096)
  2017  
  2018  	te3.startServer(&testServer{security: e.security})
  2019  	defer te3.tearDown()
  2020  
  2021  	cc3 := te3.clientConn(grpc.WithResolvers(r))
  2022  	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te3.srvAddr}}, ServiceConfig: sc})
  2023  	tc = testpb.NewTestServiceClient(cc3)
  2024  
  2025  	for {
  2026  		if cc3.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
  2027  			break
  2028  		}
  2029  		time.Sleep(time.Millisecond)
  2030  	}
  2031  
  2032  	// Test for unary RPC recv.
  2033  	req.Payload = smallPayload
  2034  	req.ResponseSize = int32(largeSize)
  2035  
  2036  	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err != nil {
  2037  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
  2038  	}
  2039  
  2040  	req.ResponseSize = int32(extraLargeSize)
  2041  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2042  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2043  	}
  2044  
  2045  	// Test for unary RPC send.
  2046  	req.Payload = largePayload
  2047  	req.ResponseSize = int32(smallSize)
  2048  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  2049  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
  2050  	}
  2051  
  2052  	req.Payload = extraLargePayload
  2053  	if _, err = tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2054  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2055  	}
  2056  
  2057  	// Test for streaming RPC recv.
  2058  	stream, err = tc.FullDuplexCall(te3.ctx)
  2059  	if err != nil {
  2060  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2061  	}
  2062  	respParam[0].Size = int32(largeSize)
  2063  	sreq.Payload = smallPayload
  2064  
  2065  	if err = stream.Send(sreq); err != nil {
  2066  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2067  	}
  2068  	if _, err = stream.Recv(); err != nil {
  2069  		t.Fatalf("%v.Recv() = _, %v, want <nil>", stream, err)
  2070  	}
  2071  
  2072  	respParam[0].Size = int32(extraLargeSize)
  2073  
  2074  	if err = stream.Send(sreq); err != nil {
  2075  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2076  	}
  2077  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2078  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2079  	}
  2080  
  2081  	// Test for streaming RPC send.
  2082  	respParam[0].Size = int32(smallSize)
  2083  	sreq.Payload = largePayload
  2084  	stream, err = tc.FullDuplexCall(te3.ctx)
  2085  	if err != nil {
  2086  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2087  	}
  2088  	if err := stream.Send(sreq); err != nil {
  2089  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2090  	}
  2091  	sreq.Payload = extraLargePayload
  2092  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  2093  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  2094  	}
  2095  }
  2096  
  2097  // Reading from a streaming RPC may fail with context canceled if timeout was
  2098  // set by service config (https://github.com/grpc/grpc-go/issues/1818). This
  2099  // test makes sure read from streaming RPC doesn't fail in this case.
  2100  func (s) TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) {
  2101  	te := testServiceConfigSetup(t, tcpClearRREnv)
  2102  	te.startServer(&testServer{security: tcpClearRREnv.security})
  2103  	defer te.tearDown()
  2104  	r := manual.NewBuilderWithScheme("whatever")
  2105  
  2106  	te.resolverScheme = r.Scheme()
  2107  	cc := te.clientConn(grpc.WithResolvers(r))
  2108  	tc := testpb.NewTestServiceClient(cc)
  2109  
  2110  	r.UpdateState(resolver.State{
  2111  		Addresses: []resolver.Address{{Addr: te.srvAddr}},
  2112  		ServiceConfig: parseCfg(r, `{
  2113  	    "methodConfig": [
  2114  	        {
  2115  	            "name": [
  2116  	                {
  2117  	                    "service": "grpc.testing.TestService",
  2118  	                    "method": "FullDuplexCall"
  2119  	                }
  2120  	            ],
  2121  	            "waitForReady": true,
  2122  	            "timeout": "10s"
  2123  	        }
  2124  	    ]
  2125  	}`)})
  2126  	// Make sure service config has been processed by grpc.
  2127  	for {
  2128  		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil {
  2129  			break
  2130  		}
  2131  		time.Sleep(time.Millisecond)
  2132  	}
  2133  
  2134  	ctx, cancel := context.WithCancel(context.Background())
  2135  	defer cancel()
  2136  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  2137  	if err != nil {
  2138  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want <nil>", err)
  2139  	}
  2140  
  2141  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 0)
  2142  	if err != nil {
  2143  		t.Fatalf("failed to newPayload: %v", err)
  2144  	}
  2145  	req := &testpb.StreamingOutputCallRequest{
  2146  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2147  		ResponseParameters: []*testpb.ResponseParameters{{Size: 0}},
  2148  		Payload:            payload,
  2149  	}
  2150  	if err := stream.Send(req); err != nil {
  2151  		t.Fatalf("stream.Send(%v) = %v, want <nil>", req, err)
  2152  	}
  2153  	stream.CloseSend()
  2154  	time.Sleep(time.Second)
  2155  	// Sleep 1 second before recv to make sure the final status is received
  2156  	// before the recv.
  2157  	if _, err := stream.Recv(); err != nil {
  2158  		t.Fatalf("stream.Recv = _, %v, want _, <nil>", err)
  2159  	}
  2160  	// Keep reading to drain the stream.
  2161  	for {
  2162  		if _, err := stream.Recv(); err != nil {
  2163  			break
  2164  		}
  2165  	}
  2166  }
  2167  
  2168  func (s) TestPreloaderClientSend(t *testing.T) {
  2169  	for _, e := range listTestEnv() {
  2170  		testPreloaderClientSend(t, e)
  2171  	}
  2172  }
  2173  
  2174  func testPreloaderClientSend(t *testing.T, e env) {
  2175  	te := newTest(t, e)
  2176  	te.userAgent = testAppUA
  2177  	te.declareLogNoise(
  2178  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  2179  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  2180  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  2181  		"Failed to dial : context canceled; please retry.",
  2182  	)
  2183  	te.startServer(&testServer{security: e.security})
  2184  
  2185  	defer te.tearDown()
  2186  	tc := testpb.NewTestServiceClient(te.clientConn())
  2187  
  2188  	// Test for streaming RPC recv.
  2189  	// Set context for send with proper RPC Information
  2190  	stream, err := tc.FullDuplexCall(te.ctx, grpc.UseCompressor("gzip"))
  2191  	if err != nil {
  2192  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2193  	}
  2194  	var index int
  2195  	for index < len(reqSizes) {
  2196  		respParam := []*testpb.ResponseParameters{
  2197  			{
  2198  				Size: int32(respSizes[index]),
  2199  			},
  2200  		}
  2201  
  2202  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
  2203  		if err != nil {
  2204  			t.Fatal(err)
  2205  		}
  2206  
  2207  		req := &testpb.StreamingOutputCallRequest{
  2208  			ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2209  			ResponseParameters: respParam,
  2210  			Payload:            payload,
  2211  		}
  2212  		preparedMsg := &grpc.PreparedMsg{}
  2213  		err = preparedMsg.Encode(stream, req)
  2214  		if err != nil {
  2215  			t.Fatalf("PrepareMsg failed for size %d : %v", reqSizes[index], err)
  2216  		}
  2217  		if err := stream.SendMsg(preparedMsg); err != nil {
  2218  			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  2219  		}
  2220  		reply, err := stream.Recv()
  2221  		if err != nil {
  2222  			t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  2223  		}
  2224  		pt := reply.GetPayload().GetType()
  2225  		if pt != testpb.PayloadType_COMPRESSABLE {
  2226  			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
  2227  		}
  2228  		size := len(reply.GetPayload().GetBody())
  2229  		if size != int(respSizes[index]) {
  2230  			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
  2231  		}
  2232  		index++
  2233  	}
  2234  	if err := stream.CloseSend(); err != nil {
  2235  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  2236  	}
  2237  	if _, err := stream.Recv(); err != io.EOF {
  2238  		t.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
  2239  	}
  2240  }
  2241  
  2242  func (s) TestPreloaderSenderSend(t *testing.T) {
  2243  	ss := &stubserver.StubServer{
  2244  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  2245  			for i := 0; i < 10; i++ {
  2246  				preparedMsg := &grpc.PreparedMsg{}
  2247  				err := preparedMsg.Encode(stream, &testpb.StreamingOutputCallResponse{
  2248  					Payload: &testpb.Payload{
  2249  						Body: []byte{'0' + uint8(i)},
  2250  					},
  2251  				})
  2252  				if err != nil {
  2253  					return err
  2254  				}
  2255  				stream.SendMsg(preparedMsg)
  2256  			}
  2257  			return nil
  2258  		},
  2259  	}
  2260  	if err := ss.Start(nil); err != nil {
  2261  		t.Fatalf("Error starting endpoint server: %v", err)
  2262  	}
  2263  	defer ss.Stop()
  2264  
  2265  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  2266  	defer cancel()
  2267  
  2268  	stream, err := ss.Client.FullDuplexCall(ctx)
  2269  	if err != nil {
  2270  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  2271  	}
  2272  
  2273  	var ngot int
  2274  	var buf bytes.Buffer
  2275  	for {
  2276  		reply, err := stream.Recv()
  2277  		if err == io.EOF {
  2278  			break
  2279  		}
  2280  		if err != nil {
  2281  			t.Fatal(err)
  2282  		}
  2283  		ngot++
  2284  		if buf.Len() > 0 {
  2285  			buf.WriteByte(',')
  2286  		}
  2287  		buf.Write(reply.GetPayload().GetBody())
  2288  	}
  2289  	if want := 10; ngot != want {
  2290  		t.Errorf("Got %d replies, want %d", ngot, want)
  2291  	}
  2292  	if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want {
  2293  		t.Errorf("Got replies %q; want %q", got, want)
  2294  	}
  2295  }
  2296  
  2297  func (s) TestMaxMsgSizeClientDefault(t *testing.T) {
  2298  	for _, e := range listTestEnv() {
  2299  		testMaxMsgSizeClientDefault(t, e)
  2300  	}
  2301  }
  2302  
  2303  func testMaxMsgSizeClientDefault(t *testing.T, e env) {
  2304  	te := newTest(t, e)
  2305  	te.userAgent = testAppUA
  2306  	te.declareLogNoise(
  2307  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  2308  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  2309  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  2310  		"Failed to dial : context canceled; please retry.",
  2311  	)
  2312  	te.startServer(&testServer{security: e.security})
  2313  
  2314  	defer te.tearDown()
  2315  	tc := testpb.NewTestServiceClient(te.clientConn())
  2316  
  2317  	const smallSize = 1
  2318  	const largeSize = 4 * 1024 * 1024
  2319  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  2320  	if err != nil {
  2321  		t.Fatal(err)
  2322  	}
  2323  	req := &testpb.SimpleRequest{
  2324  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2325  		ResponseSize: int32(largeSize),
  2326  		Payload:      smallPayload,
  2327  	}
  2328  
  2329  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2330  	defer cancel()
  2331  	// Test for unary RPC recv.
  2332  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2333  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2334  	}
  2335  
  2336  	respParam := []*testpb.ResponseParameters{
  2337  		{
  2338  			Size: int32(largeSize),
  2339  		},
  2340  	}
  2341  	sreq := &testpb.StreamingOutputCallRequest{
  2342  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2343  		ResponseParameters: respParam,
  2344  		Payload:            smallPayload,
  2345  	}
  2346  
  2347  	// Test for streaming RPC recv.
  2348  	stream, err := tc.FullDuplexCall(te.ctx)
  2349  	if err != nil {
  2350  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2351  	}
  2352  	if err := stream.Send(sreq); err != nil {
  2353  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2354  	}
  2355  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2356  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2357  	}
  2358  }
  2359  
  2360  func (s) TestMaxMsgSizeClientAPI(t *testing.T) {
  2361  	for _, e := range listTestEnv() {
  2362  		testMaxMsgSizeClientAPI(t, e)
  2363  	}
  2364  }
  2365  
  2366  func testMaxMsgSizeClientAPI(t *testing.T, e env) {
  2367  	te := newTest(t, e)
  2368  	te.userAgent = testAppUA
  2369  	// To avoid error on server side.
  2370  	te.maxServerSendMsgSize = newInt(5 * 1024 * 1024)
  2371  	te.maxClientReceiveMsgSize = newInt(1024)
  2372  	te.maxClientSendMsgSize = newInt(1024)
  2373  	te.declareLogNoise(
  2374  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  2375  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  2376  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  2377  		"Failed to dial : context canceled; please retry.",
  2378  	)
  2379  	te.startServer(&testServer{security: e.security})
  2380  
  2381  	defer te.tearDown()
  2382  	tc := testpb.NewTestServiceClient(te.clientConn())
  2383  
  2384  	const smallSize = 1
  2385  	const largeSize = 1024
  2386  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  2387  	if err != nil {
  2388  		t.Fatal(err)
  2389  	}
  2390  
  2391  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  2392  	if err != nil {
  2393  		t.Fatal(err)
  2394  	}
  2395  	req := &testpb.SimpleRequest{
  2396  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2397  		ResponseSize: int32(largeSize),
  2398  		Payload:      smallPayload,
  2399  	}
  2400  
  2401  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2402  	defer cancel()
  2403  	// Test for unary RPC recv.
  2404  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2405  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2406  	}
  2407  
  2408  	// Test for unary RPC send.
  2409  	req.Payload = largePayload
  2410  	req.ResponseSize = int32(smallSize)
  2411  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2412  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2413  	}
  2414  
  2415  	respParam := []*testpb.ResponseParameters{
  2416  		{
  2417  			Size: int32(largeSize),
  2418  		},
  2419  	}
  2420  	sreq := &testpb.StreamingOutputCallRequest{
  2421  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2422  		ResponseParameters: respParam,
  2423  		Payload:            smallPayload,
  2424  	}
  2425  
  2426  	// Test for streaming RPC recv.
  2427  	stream, err := tc.FullDuplexCall(te.ctx)
  2428  	if err != nil {
  2429  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2430  	}
  2431  	if err := stream.Send(sreq); err != nil {
  2432  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2433  	}
  2434  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2435  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2436  	}
  2437  
  2438  	// Test for streaming RPC send.
  2439  	respParam[0].Size = int32(smallSize)
  2440  	sreq.Payload = largePayload
  2441  	stream, err = tc.FullDuplexCall(te.ctx)
  2442  	if err != nil {
  2443  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2444  	}
  2445  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  2446  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  2447  	}
  2448  }
  2449  
  2450  func (s) TestMaxMsgSizeServerAPI(t *testing.T) {
  2451  	for _, e := range listTestEnv() {
  2452  		testMaxMsgSizeServerAPI(t, e)
  2453  	}
  2454  }
  2455  
  2456  func testMaxMsgSizeServerAPI(t *testing.T, e env) {
  2457  	te := newTest(t, e)
  2458  	te.userAgent = testAppUA
  2459  	te.maxServerReceiveMsgSize = newInt(1024)
  2460  	te.maxServerSendMsgSize = newInt(1024)
  2461  	te.declareLogNoise(
  2462  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  2463  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  2464  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  2465  		"Failed to dial : context canceled; please retry.",
  2466  	)
  2467  	te.startServer(&testServer{security: e.security})
  2468  
  2469  	defer te.tearDown()
  2470  	tc := testpb.NewTestServiceClient(te.clientConn())
  2471  
  2472  	const smallSize = 1
  2473  	const largeSize = 1024
  2474  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  2475  	if err != nil {
  2476  		t.Fatal(err)
  2477  	}
  2478  
  2479  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  2480  	if err != nil {
  2481  		t.Fatal(err)
  2482  	}
  2483  	req := &testpb.SimpleRequest{
  2484  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2485  		ResponseSize: int32(largeSize),
  2486  		Payload:      smallPayload,
  2487  	}
  2488  
  2489  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2490  	defer cancel()
  2491  	// Test for unary RPC send.
  2492  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2493  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2494  	}
  2495  
  2496  	// Test for unary RPC recv.
  2497  	req.Payload = largePayload
  2498  	req.ResponseSize = int32(smallSize)
  2499  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2500  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2501  	}
  2502  
  2503  	respParam := []*testpb.ResponseParameters{
  2504  		{
  2505  			Size: int32(largeSize),
  2506  		},
  2507  	}
  2508  	sreq := &testpb.StreamingOutputCallRequest{
  2509  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2510  		ResponseParameters: respParam,
  2511  		Payload:            smallPayload,
  2512  	}
  2513  
  2514  	// Test for streaming RPC send.
  2515  	stream, err := tc.FullDuplexCall(te.ctx)
  2516  	if err != nil {
  2517  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2518  	}
  2519  	if err := stream.Send(sreq); err != nil {
  2520  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2521  	}
  2522  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2523  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2524  	}
  2525  
  2526  	// Test for streaming RPC recv.
  2527  	respParam[0].Size = int32(smallSize)
  2528  	sreq.Payload = largePayload
  2529  	stream, err = tc.FullDuplexCall(te.ctx)
  2530  	if err != nil {
  2531  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2532  	}
  2533  	if err := stream.Send(sreq); err != nil {
  2534  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2535  	}
  2536  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2537  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2538  	}
  2539  }
  2540  
  2541  func (s) TestTap(t *testing.T) {
  2542  	for _, e := range listTestEnv() {
  2543  		if e.name == "handler-tls" {
  2544  			continue
  2545  		}
  2546  		testTap(t, e)
  2547  	}
  2548  }
  2549  
  2550  type myTap struct {
  2551  	cnt int
  2552  }
  2553  
  2554  func (t *myTap) handle(ctx context.Context, info *tap.Info) (context.Context, error) {
  2555  	if info != nil {
  2556  		switch info.FullMethodName {
  2557  		case "/grpc.testing.TestService/EmptyCall":
  2558  			t.cnt++
  2559  		case "/grpc.testing.TestService/UnaryCall":
  2560  			return nil, fmt.Errorf("tap error")
  2561  		case "/grpc.testing.TestService/FullDuplexCall":
  2562  			return nil, status.Errorf(codes.FailedPrecondition, "test custom error")
  2563  		}
  2564  	}
  2565  	return ctx, nil
  2566  }
  2567  
  2568  func testTap(t *testing.T, e env) {
  2569  	te := newTest(t, e)
  2570  	te.userAgent = testAppUA
  2571  	ttap := &myTap{}
  2572  	te.tapHandle = ttap.handle
  2573  	te.declareLogNoise(
  2574  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  2575  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  2576  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  2577  	)
  2578  	te.startServer(&testServer{security: e.security})
  2579  	defer te.tearDown()
  2580  
  2581  	cc := te.clientConn()
  2582  	tc := testpb.NewTestServiceClient(cc)
  2583  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2584  	defer cancel()
  2585  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  2586  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  2587  	}
  2588  	if ttap.cnt != 1 {
  2589  		t.Fatalf("Get the count in ttap %d, want 1", ttap.cnt)
  2590  	}
  2591  
  2592  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 31)
  2593  	if err != nil {
  2594  		t.Fatal(err)
  2595  	}
  2596  
  2597  	req := &testpb.SimpleRequest{
  2598  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2599  		ResponseSize: 45,
  2600  		Payload:      payload,
  2601  	}
  2602  	if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.PermissionDenied {
  2603  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.PermissionDenied)
  2604  	}
  2605  	str, err := tc.FullDuplexCall(ctx)
  2606  	if err != nil {
  2607  		t.Fatalf("Unexpected error creating stream: %v", err)
  2608  	}
  2609  	if _, err := str.Recv(); status.Code(err) != codes.FailedPrecondition {
  2610  		t.Fatalf("FullDuplexCall Recv() = _, %v, want _, %s", err, codes.FailedPrecondition)
  2611  	}
  2612  }
  2613  
  2614  // healthCheck is a helper function to make a unary health check RPC and return
  2615  // the response.
  2616  func healthCheck(d time.Duration, cc *grpc.ClientConn, service string) (*healthpb.HealthCheckResponse, error) {
  2617  	ctx, cancel := context.WithTimeout(context.Background(), d)
  2618  	defer cancel()
  2619  	hc := healthgrpc.NewHealthClient(cc)
  2620  	return hc.Check(ctx, &healthpb.HealthCheckRequest{Service: service})
  2621  }
  2622  
  2623  // verifyHealthCheckStatus is a helper function to verify that the current
  2624  // health status of the service matches the one passed in 'wantStatus'.
  2625  func verifyHealthCheckStatus(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantStatus healthpb.HealthCheckResponse_ServingStatus) {
  2626  	t.Helper()
  2627  	resp, err := healthCheck(d, cc, service)
  2628  	if err != nil {
  2629  		t.Fatalf("Health/Check(_, _) = _, %v, want _, <nil>", err)
  2630  	}
  2631  	if resp.Status != wantStatus {
  2632  		t.Fatalf("Got the serving status %v, want %v", resp.Status, wantStatus)
  2633  	}
  2634  }
  2635  
  2636  // verifyHealthCheckErrCode is a helper function to verify that a unary health
  2637  // check RPC returns an error with a code set to 'wantCode'.
  2638  func verifyHealthCheckErrCode(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantCode codes.Code) {
  2639  	t.Helper()
  2640  	if _, err := healthCheck(d, cc, service); status.Code(err) != wantCode {
  2641  		t.Fatalf("Health/Check() got errCode %v, want %v", status.Code(err), wantCode)
  2642  	}
  2643  }
  2644  
  2645  // newHealthCheckStream is a helper function to start a health check streaming
  2646  // RPC, and returns the stream.
  2647  func newHealthCheckStream(t *testing.T, cc *grpc.ClientConn, service string) (healthgrpc.Health_WatchClient, context.CancelFunc) {
  2648  	t.Helper()
  2649  	ctx, cancel := context.WithCancel(context.Background())
  2650  	hc := healthgrpc.NewHealthClient(cc)
  2651  	stream, err := hc.Watch(ctx, &healthpb.HealthCheckRequest{Service: service})
  2652  	if err != nil {
  2653  		t.Fatalf("hc.Watch(_, %v) failed: %v", service, err)
  2654  	}
  2655  	return stream, cancel
  2656  }
  2657  
  2658  // healthWatchChecker is a helper function to verify that the next health
  2659  // status returned on the given stream matches the one passed in 'wantStatus'.
  2660  func healthWatchChecker(t *testing.T, stream healthgrpc.Health_WatchClient, wantStatus healthpb.HealthCheckResponse_ServingStatus) {
  2661  	t.Helper()
  2662  	response, err := stream.Recv()
  2663  	if err != nil {
  2664  		t.Fatalf("stream.Recv() failed: %v", err)
  2665  	}
  2666  	if response.Status != wantStatus {
  2667  		t.Fatalf("got servingStatus %v, want %v", response.Status, wantStatus)
  2668  	}
  2669  }
  2670  
  2671  // TestHealthCheckSuccess invokes the unary Check() RPC on the health server in
  2672  // a successful case.
  2673  func (s) TestHealthCheckSuccess(t *testing.T) {
  2674  	for _, e := range listTestEnv() {
  2675  		testHealthCheckSuccess(t, e)
  2676  	}
  2677  }
  2678  
  2679  func testHealthCheckSuccess(t *testing.T, e env) {
  2680  	te := newTest(t, e)
  2681  	te.enableHealthServer = true
  2682  	te.startServer(&testServer{security: e.security})
  2683  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2684  	defer te.tearDown()
  2685  
  2686  	verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.OK)
  2687  }
  2688  
  2689  // TestHealthCheckFailure invokes the unary Check() RPC on the health server
  2690  // with an expired context and expects the RPC to fail.
  2691  func (s) TestHealthCheckFailure(t *testing.T) {
  2692  	for _, e := range listTestEnv() {
  2693  		testHealthCheckFailure(t, e)
  2694  	}
  2695  }
  2696  
  2697  func testHealthCheckFailure(t *testing.T, e env) {
  2698  	te := newTest(t, e)
  2699  	te.declareLogNoise(
  2700  		"Failed to dial ",
  2701  		"grpc: the client connection is closing; please retry",
  2702  	)
  2703  	te.enableHealthServer = true
  2704  	te.startServer(&testServer{security: e.security})
  2705  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2706  	defer te.tearDown()
  2707  
  2708  	verifyHealthCheckErrCode(t, 0*time.Second, te.clientConn(), defaultHealthService, codes.DeadlineExceeded)
  2709  	awaitNewConnLogOutput()
  2710  }
  2711  
  2712  // TestHealthCheckOff makes a unary Check() RPC on the health server where the
  2713  // health status of the defaultHealthService is not set, and therefore expects
  2714  // an error code 'codes.NotFound'.
  2715  func (s) TestHealthCheckOff(t *testing.T) {
  2716  	for _, e := range listTestEnv() {
  2717  		// TODO(bradfitz): Temporarily skip this env due to #619.
  2718  		if e.name == "handler-tls" {
  2719  			continue
  2720  		}
  2721  		testHealthCheckOff(t, e)
  2722  	}
  2723  }
  2724  
  2725  func testHealthCheckOff(t *testing.T, e env) {
  2726  	te := newTest(t, e)
  2727  	te.enableHealthServer = true
  2728  	te.startServer(&testServer{security: e.security})
  2729  	defer te.tearDown()
  2730  
  2731  	verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.NotFound)
  2732  }
  2733  
  2734  // TestHealthWatchMultipleClients makes a streaming Watch() RPC on the health
  2735  // server with multiple clients and expects the same status on both streams.
  2736  func (s) TestHealthWatchMultipleClients(t *testing.T) {
  2737  	for _, e := range listTestEnv() {
  2738  		testHealthWatchMultipleClients(t, e)
  2739  	}
  2740  }
  2741  
  2742  func testHealthWatchMultipleClients(t *testing.T, e env) {
  2743  	te := newTest(t, e)
  2744  	te.enableHealthServer = true
  2745  	te.startServer(&testServer{security: e.security})
  2746  	defer te.tearDown()
  2747  
  2748  	cc := te.clientConn()
  2749  	stream1, cf1 := newHealthCheckStream(t, cc, defaultHealthService)
  2750  	defer cf1()
  2751  	healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
  2752  
  2753  	stream2, cf2 := newHealthCheckStream(t, cc, defaultHealthService)
  2754  	defer cf2()
  2755  	healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
  2756  
  2757  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
  2758  	healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_NOT_SERVING)
  2759  	healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_NOT_SERVING)
  2760  }
  2761  
  2762  // TestHealthWatchSameStatusmakes a streaming Watch() RPC on the health server
  2763  // and makes sure that the health status of the server is as expected after
  2764  // multiple calls to SetServingStatus with the same status.
  2765  func (s) TestHealthWatchSameStatus(t *testing.T) {
  2766  	for _, e := range listTestEnv() {
  2767  		testHealthWatchSameStatus(t, e)
  2768  	}
  2769  }
  2770  
  2771  func testHealthWatchSameStatus(t *testing.T, e env) {
  2772  	te := newTest(t, e)
  2773  	te.enableHealthServer = true
  2774  	te.startServer(&testServer{security: e.security})
  2775  	defer te.tearDown()
  2776  
  2777  	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
  2778  	defer cf()
  2779  
  2780  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
  2781  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2782  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
  2783  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2784  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
  2785  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING)
  2786  }
  2787  
  2788  // TestHealthWatchServiceStatusSetBeforeStartingServer starts a health server
  2789  // on which the health status for the defaultService is set before the gRPC
  2790  // server is started, and expects the correct health status to be returned.
  2791  func (s) TestHealthWatchServiceStatusSetBeforeStartingServer(t *testing.T) {
  2792  	for _, e := range listTestEnv() {
  2793  		testHealthWatchSetServiceStatusBeforeStartingServer(t, e)
  2794  	}
  2795  }
  2796  
  2797  func testHealthWatchSetServiceStatusBeforeStartingServer(t *testing.T, e env) {
  2798  	hs := health.NewServer()
  2799  	te := newTest(t, e)
  2800  	te.healthServer = hs
  2801  	hs.SetServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2802  	te.startServer(&testServer{security: e.security})
  2803  	defer te.tearDown()
  2804  
  2805  	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
  2806  	defer cf()
  2807  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
  2808  }
  2809  
  2810  // TestHealthWatchDefaultStatusChange verifies the simple case where the
  2811  // service starts off with a SERVICE_UNKNOWN status (because SetServingStatus
  2812  // hasn't been called yet) and then moves to SERVING after SetServingStatus is
  2813  // called.
  2814  func (s) TestHealthWatchDefaultStatusChange(t *testing.T) {
  2815  	for _, e := range listTestEnv() {
  2816  		testHealthWatchDefaultStatusChange(t, e)
  2817  	}
  2818  }
  2819  
  2820  func testHealthWatchDefaultStatusChange(t *testing.T, e env) {
  2821  	te := newTest(t, e)
  2822  	te.enableHealthServer = true
  2823  	te.startServer(&testServer{security: e.security})
  2824  	defer te.tearDown()
  2825  
  2826  	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
  2827  	defer cf()
  2828  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN)
  2829  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2830  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
  2831  }
  2832  
  2833  // TestHealthWatchSetServiceStatusBeforeClientCallsWatch verifies the case
  2834  // where the health status is set to SERVING before the client calls Watch().
  2835  func (s) TestHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T) {
  2836  	for _, e := range listTestEnv() {
  2837  		testHealthWatchSetServiceStatusBeforeClientCallsWatch(t, e)
  2838  	}
  2839  }
  2840  
  2841  func testHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T, e env) {
  2842  	te := newTest(t, e)
  2843  	te.enableHealthServer = true
  2844  	te.startServer(&testServer{security: e.security})
  2845  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2846  	defer te.tearDown()
  2847  
  2848  	stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService)
  2849  	defer cf()
  2850  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
  2851  }
  2852  
  2853  // TestHealthWatchOverallServerHealthChange verifies setting the overall status
  2854  // of the server by using the empty service name.
  2855  func (s) TestHealthWatchOverallServerHealthChange(t *testing.T) {
  2856  	for _, e := range listTestEnv() {
  2857  		testHealthWatchOverallServerHealthChange(t, e)
  2858  	}
  2859  }
  2860  
  2861  func testHealthWatchOverallServerHealthChange(t *testing.T, e env) {
  2862  	te := newTest(t, e)
  2863  	te.enableHealthServer = true
  2864  	te.startServer(&testServer{security: e.security})
  2865  	defer te.tearDown()
  2866  
  2867  	stream, cf := newHealthCheckStream(t, te.clientConn(), "")
  2868  	defer cf()
  2869  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING)
  2870  	te.setHealthServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING)
  2871  	healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING)
  2872  }
  2873  
  2874  // TestUnknownHandler verifies that an expected error is returned (by setting
  2875  // the unknownHandler on the server) for a service which is not exposed to the
  2876  // client.
  2877  func (s) TestUnknownHandler(t *testing.T) {
  2878  	// An example unknownHandler that returns a different code and a different
  2879  	// method, making sure that we do not expose what methods are implemented to
  2880  	// a client that is not authenticated.
  2881  	unknownHandler := func(srv interface{}, stream grpc.ServerStream) error {
  2882  		return status.Error(codes.Unauthenticated, "user unauthenticated")
  2883  	}
  2884  	for _, e := range listTestEnv() {
  2885  		// TODO(bradfitz): Temporarily skip this env due to #619.
  2886  		if e.name == "handler-tls" {
  2887  			continue
  2888  		}
  2889  		testUnknownHandler(t, e, unknownHandler)
  2890  	}
  2891  }
  2892  
  2893  func testUnknownHandler(t *testing.T, e env, unknownHandler grpc.StreamHandler) {
  2894  	te := newTest(t, e)
  2895  	te.unknownHandler = unknownHandler
  2896  	te.startServer(&testServer{security: e.security})
  2897  	defer te.tearDown()
  2898  	verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), "", codes.Unauthenticated)
  2899  }
  2900  
  2901  // TestHealthCheckServingStatus makes a streaming Watch() RPC on the health
  2902  // server and verifies a bunch of health status transitions.
  2903  func (s) TestHealthCheckServingStatus(t *testing.T) {
  2904  	for _, e := range listTestEnv() {
  2905  		testHealthCheckServingStatus(t, e)
  2906  	}
  2907  }
  2908  
  2909  func testHealthCheckServingStatus(t *testing.T, e env) {
  2910  	te := newTest(t, e)
  2911  	te.enableHealthServer = true
  2912  	te.startServer(&testServer{security: e.security})
  2913  	defer te.tearDown()
  2914  
  2915  	cc := te.clientConn()
  2916  	verifyHealthCheckStatus(t, 1*time.Second, cc, "", healthpb.HealthCheckResponse_SERVING)
  2917  	verifyHealthCheckErrCode(t, 1*time.Second, cc, defaultHealthService, codes.NotFound)
  2918  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2919  	verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_SERVING)
  2920  	te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
  2921  	verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING)
  2922  }
  2923  
  2924  func (s) TestEmptyUnaryWithUserAgent(t *testing.T) {
  2925  	for _, e := range listTestEnv() {
  2926  		testEmptyUnaryWithUserAgent(t, e)
  2927  	}
  2928  }
  2929  
  2930  func testEmptyUnaryWithUserAgent(t *testing.T, e env) {
  2931  	te := newTest(t, e)
  2932  	te.userAgent = testAppUA
  2933  	te.startServer(&testServer{security: e.security})
  2934  	defer te.tearDown()
  2935  
  2936  	cc := te.clientConn()
  2937  	tc := testpb.NewTestServiceClient(cc)
  2938  	var header metadata.MD
  2939  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2940  	defer cancel()
  2941  	reply, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Header(&header))
  2942  	if err != nil || !proto.Equal(&testpb.Empty{}, reply) {
  2943  		t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{})
  2944  	}
  2945  	if v, ok := header["ua"]; !ok || !strings.HasPrefix(v[0], testAppUA) {
  2946  		t.Fatalf("header[\"ua\"] = %q, %t, want string with prefix %q, true", v, ok, testAppUA)
  2947  	}
  2948  
  2949  	te.srv.Stop()
  2950  }
  2951  
  2952  func (s) TestFailedEmptyUnary(t *testing.T) {
  2953  	for _, e := range listTestEnv() {
  2954  		if e.name == "handler-tls" {
  2955  			// This test covers status details, but
  2956  			// Grpc-Status-Details-Bin is not support in handler_server.
  2957  			continue
  2958  		}
  2959  		testFailedEmptyUnary(t, e)
  2960  	}
  2961  }
  2962  
  2963  func testFailedEmptyUnary(t *testing.T, e env) {
  2964  	te := newTest(t, e)
  2965  	te.userAgent = failAppUA
  2966  	te.startServer(&testServer{security: e.security})
  2967  	defer te.tearDown()
  2968  	tc := testpb.NewTestServiceClient(te.clientConn())
  2969  
  2970  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  2971  	wantErr := detailedError
  2972  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !testutils.StatusErrEqual(err, wantErr) {
  2973  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr)
  2974  	}
  2975  }
  2976  
  2977  func (s) TestLargeUnary(t *testing.T) {
  2978  	for _, e := range listTestEnv() {
  2979  		testLargeUnary(t, e)
  2980  	}
  2981  }
  2982  
  2983  func testLargeUnary(t *testing.T, e env) {
  2984  	te := newTest(t, e)
  2985  	te.startServer(&testServer{security: e.security})
  2986  	defer te.tearDown()
  2987  	tc := testpb.NewTestServiceClient(te.clientConn())
  2988  
  2989  	const argSize = 271828
  2990  	const respSize = 314159
  2991  
  2992  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2993  	if err != nil {
  2994  		t.Fatal(err)
  2995  	}
  2996  
  2997  	req := &testpb.SimpleRequest{
  2998  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2999  		ResponseSize: respSize,
  3000  		Payload:      payload,
  3001  	}
  3002  
  3003  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3004  	defer cancel()
  3005  	reply, err := tc.UnaryCall(ctx, req)
  3006  	if err != nil {
  3007  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
  3008  	}
  3009  	pt := reply.GetPayload().GetType()
  3010  	ps := len(reply.GetPayload().GetBody())
  3011  	if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize {
  3012  		t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize)
  3013  	}
  3014  }
  3015  
  3016  // Test backward-compatibility API for setting msg size limit.
  3017  func (s) TestExceedMsgLimit(t *testing.T) {
  3018  	for _, e := range listTestEnv() {
  3019  		testExceedMsgLimit(t, e)
  3020  	}
  3021  }
  3022  
  3023  func testExceedMsgLimit(t *testing.T, e env) {
  3024  	te := newTest(t, e)
  3025  	maxMsgSize := 1024
  3026  	te.maxServerMsgSize, te.maxClientMsgSize = newInt(maxMsgSize), newInt(maxMsgSize)
  3027  	te.startServer(&testServer{security: e.security})
  3028  	defer te.tearDown()
  3029  	tc := testpb.NewTestServiceClient(te.clientConn())
  3030  
  3031  	largeSize := int32(maxMsgSize + 1)
  3032  	const smallSize = 1
  3033  
  3034  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  3035  	if err != nil {
  3036  		t.Fatal(err)
  3037  	}
  3038  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  3039  	if err != nil {
  3040  		t.Fatal(err)
  3041  	}
  3042  
  3043  	// Make sure the server cannot receive a unary RPC of largeSize.
  3044  	req := &testpb.SimpleRequest{
  3045  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3046  		ResponseSize: smallSize,
  3047  		Payload:      largePayload,
  3048  	}
  3049  
  3050  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3051  	defer cancel()
  3052  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  3053  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  3054  	}
  3055  	// Make sure the client cannot receive a unary RPC of largeSize.
  3056  	req.ResponseSize = largeSize
  3057  	req.Payload = smallPayload
  3058  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  3059  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  3060  	}
  3061  
  3062  	// Make sure the server cannot receive a streaming RPC of largeSize.
  3063  	stream, err := tc.FullDuplexCall(te.ctx)
  3064  	if err != nil {
  3065  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3066  	}
  3067  	respParam := []*testpb.ResponseParameters{
  3068  		{
  3069  			Size: 1,
  3070  		},
  3071  	}
  3072  
  3073  	sreq := &testpb.StreamingOutputCallRequest{
  3074  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3075  		ResponseParameters: respParam,
  3076  		Payload:            largePayload,
  3077  	}
  3078  	if err := stream.Send(sreq); err != nil {
  3079  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  3080  	}
  3081  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  3082  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  3083  	}
  3084  
  3085  	// Test on client side for streaming RPC.
  3086  	stream, err = tc.FullDuplexCall(te.ctx)
  3087  	if err != nil {
  3088  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3089  	}
  3090  	respParam[0].Size = largeSize
  3091  	sreq.Payload = smallPayload
  3092  	if err := stream.Send(sreq); err != nil {
  3093  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  3094  	}
  3095  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  3096  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  3097  	}
  3098  }
  3099  
  3100  func (s) TestPeerClientSide(t *testing.T) {
  3101  	for _, e := range listTestEnv() {
  3102  		testPeerClientSide(t, e)
  3103  	}
  3104  }
  3105  
  3106  func testPeerClientSide(t *testing.T, e env) {
  3107  	te := newTest(t, e)
  3108  	te.userAgent = testAppUA
  3109  	te.startServer(&testServer{security: e.security})
  3110  	defer te.tearDown()
  3111  	tc := testpb.NewTestServiceClient(te.clientConn())
  3112  	peer := new(peer.Peer)
  3113  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3114  	defer cancel()
  3115  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil {
  3116  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  3117  	}
  3118  	pa := peer.Addr.String()
  3119  	if e.network == "unix" {
  3120  		if pa != te.srvAddr {
  3121  			t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr)
  3122  		}
  3123  		return
  3124  	}
  3125  	_, pp, err := net.SplitHostPort(pa)
  3126  	if err != nil {
  3127  		t.Fatalf("Failed to parse address from peer.")
  3128  	}
  3129  	_, sp, err := net.SplitHostPort(te.srvAddr)
  3130  	if err != nil {
  3131  		t.Fatalf("Failed to parse address of test server.")
  3132  	}
  3133  	if pp != sp {
  3134  		t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp)
  3135  	}
  3136  }
  3137  
  3138  // TestPeerNegative tests that if call fails setting peer
  3139  // doesn't cause a segmentation fault.
  3140  // issue#1141 https://github.com/grpc/grpc-go/issues/1141
  3141  func (s) TestPeerNegative(t *testing.T) {
  3142  	for _, e := range listTestEnv() {
  3143  		testPeerNegative(t, e)
  3144  	}
  3145  }
  3146  
  3147  func testPeerNegative(t *testing.T, e env) {
  3148  	te := newTest(t, e)
  3149  	te.startServer(&testServer{security: e.security})
  3150  	defer te.tearDown()
  3151  
  3152  	cc := te.clientConn()
  3153  	tc := testpb.NewTestServiceClient(cc)
  3154  	peer := new(peer.Peer)
  3155  	ctx, cancel := context.WithCancel(context.Background())
  3156  	cancel()
  3157  	tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer))
  3158  }
  3159  
  3160  func (s) TestPeerFailedRPC(t *testing.T) {
  3161  	for _, e := range listTestEnv() {
  3162  		testPeerFailedRPC(t, e)
  3163  	}
  3164  }
  3165  
  3166  func testPeerFailedRPC(t *testing.T, e env) {
  3167  	te := newTest(t, e)
  3168  	te.maxServerReceiveMsgSize = newInt(1 * 1024)
  3169  	te.startServer(&testServer{security: e.security})
  3170  
  3171  	defer te.tearDown()
  3172  	tc := testpb.NewTestServiceClient(te.clientConn())
  3173  
  3174  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3175  	defer cancel()
  3176  	// first make a successful request to the server
  3177  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  3178  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  3179  	}
  3180  
  3181  	// make a second request that will be rejected by the server
  3182  	const largeSize = 5 * 1024
  3183  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  3184  	if err != nil {
  3185  		t.Fatal(err)
  3186  	}
  3187  	req := &testpb.SimpleRequest{
  3188  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3189  		Payload:      largePayload,
  3190  	}
  3191  
  3192  	peer := new(peer.Peer)
  3193  	if _, err := tc.UnaryCall(ctx, req, grpc.Peer(peer)); err == nil || status.Code(err) != codes.ResourceExhausted {
  3194  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  3195  	} else {
  3196  		pa := peer.Addr.String()
  3197  		if e.network == "unix" {
  3198  			if pa != te.srvAddr {
  3199  				t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr)
  3200  			}
  3201  			return
  3202  		}
  3203  		_, pp, err := net.SplitHostPort(pa)
  3204  		if err != nil {
  3205  			t.Fatalf("Failed to parse address from peer.")
  3206  		}
  3207  		_, sp, err := net.SplitHostPort(te.srvAddr)
  3208  		if err != nil {
  3209  			t.Fatalf("Failed to parse address of test server.")
  3210  		}
  3211  		if pp != sp {
  3212  			t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp)
  3213  		}
  3214  	}
  3215  }
  3216  
  3217  func (s) TestMetadataUnaryRPC(t *testing.T) {
  3218  	for _, e := range listTestEnv() {
  3219  		testMetadataUnaryRPC(t, e)
  3220  	}
  3221  }
  3222  
  3223  func testMetadataUnaryRPC(t *testing.T, e env) {
  3224  	te := newTest(t, e)
  3225  	te.startServer(&testServer{security: e.security})
  3226  	defer te.tearDown()
  3227  	tc := testpb.NewTestServiceClient(te.clientConn())
  3228  
  3229  	const argSize = 2718
  3230  	const respSize = 314
  3231  
  3232  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3233  	if err != nil {
  3234  		t.Fatal(err)
  3235  	}
  3236  
  3237  	req := &testpb.SimpleRequest{
  3238  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3239  		ResponseSize: respSize,
  3240  		Payload:      payload,
  3241  	}
  3242  	var header, trailer metadata.MD
  3243  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3244  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil {
  3245  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  3246  	}
  3247  	// Ignore optional response headers that Servers may set:
  3248  	if header != nil {
  3249  		delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers
  3250  		delete(header, "date")    // the Date header is also optional
  3251  		delete(header, "user-agent")
  3252  		delete(header, "content-type")
  3253  	}
  3254  	if !reflect.DeepEqual(header, testMetadata) {
  3255  		t.Fatalf("Received header metadata %v, want %v", header, testMetadata)
  3256  	}
  3257  	if !reflect.DeepEqual(trailer, testTrailerMetadata) {
  3258  		t.Fatalf("Received trailer metadata %v, want %v", trailer, testTrailerMetadata)
  3259  	}
  3260  }
  3261  
  3262  func (s) TestMetadataOrderUnaryRPC(t *testing.T) {
  3263  	for _, e := range listTestEnv() {
  3264  		testMetadataOrderUnaryRPC(t, e)
  3265  	}
  3266  }
  3267  
  3268  func testMetadataOrderUnaryRPC(t *testing.T, e env) {
  3269  	te := newTest(t, e)
  3270  	te.startServer(&testServer{security: e.security})
  3271  	defer te.tearDown()
  3272  	tc := testpb.NewTestServiceClient(te.clientConn())
  3273  
  3274  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3275  	ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value2")
  3276  	ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value3")
  3277  
  3278  	// using Join to built expected metadata instead of FromOutgoingContext
  3279  	newMetadata := metadata.Join(testMetadata, metadata.Pairs("key1", "value2", "key1", "value3"))
  3280  
  3281  	var header metadata.MD
  3282  	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}, grpc.Header(&header)); err != nil {
  3283  		t.Fatal(err)
  3284  	}
  3285  
  3286  	// Ignore optional response headers that Servers may set:
  3287  	if header != nil {
  3288  		delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers
  3289  		delete(header, "date")    // the Date header is also optional
  3290  		delete(header, "user-agent")
  3291  		delete(header, "content-type")
  3292  	}
  3293  
  3294  	if !reflect.DeepEqual(header, newMetadata) {
  3295  		t.Fatalf("Received header metadata %v, want %v", header, newMetadata)
  3296  	}
  3297  }
  3298  
  3299  func (s) TestMultipleSetTrailerUnaryRPC(t *testing.T) {
  3300  	for _, e := range listTestEnv() {
  3301  		testMultipleSetTrailerUnaryRPC(t, e)
  3302  	}
  3303  }
  3304  
  3305  func testMultipleSetTrailerUnaryRPC(t *testing.T, e env) {
  3306  	te := newTest(t, e)
  3307  	te.startServer(&testServer{security: e.security, multipleSetTrailer: true})
  3308  	defer te.tearDown()
  3309  	tc := testpb.NewTestServiceClient(te.clientConn())
  3310  
  3311  	const (
  3312  		argSize  = 1
  3313  		respSize = 1
  3314  	)
  3315  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3316  	if err != nil {
  3317  		t.Fatal(err)
  3318  	}
  3319  
  3320  	req := &testpb.SimpleRequest{
  3321  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3322  		ResponseSize: respSize,
  3323  		Payload:      payload,
  3324  	}
  3325  	var trailer metadata.MD
  3326  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3327  	if _, err := tc.UnaryCall(ctx, req, grpc.Trailer(&trailer), grpc.WaitForReady(true)); err != nil {
  3328  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  3329  	}
  3330  	expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2)
  3331  	if !reflect.DeepEqual(trailer, expectedTrailer) {
  3332  		t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer)
  3333  	}
  3334  }
  3335  
  3336  func (s) TestMultipleSetTrailerStreamingRPC(t *testing.T) {
  3337  	for _, e := range listTestEnv() {
  3338  		testMultipleSetTrailerStreamingRPC(t, e)
  3339  	}
  3340  }
  3341  
  3342  func testMultipleSetTrailerStreamingRPC(t *testing.T, e env) {
  3343  	te := newTest(t, e)
  3344  	te.startServer(&testServer{security: e.security, multipleSetTrailer: true})
  3345  	defer te.tearDown()
  3346  	tc := testpb.NewTestServiceClient(te.clientConn())
  3347  
  3348  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3349  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  3350  	if err != nil {
  3351  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3352  	}
  3353  	if err := stream.CloseSend(); err != nil {
  3354  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  3355  	}
  3356  	if _, err := stream.Recv(); err != io.EOF {
  3357  		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
  3358  	}
  3359  
  3360  	trailer := stream.Trailer()
  3361  	expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2)
  3362  	if !reflect.DeepEqual(trailer, expectedTrailer) {
  3363  		t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer)
  3364  	}
  3365  }
  3366  
  3367  func (s) TestSetAndSendHeaderUnaryRPC(t *testing.T) {
  3368  	for _, e := range listTestEnv() {
  3369  		if e.name == "handler-tls" {
  3370  			continue
  3371  		}
  3372  		testSetAndSendHeaderUnaryRPC(t, e)
  3373  	}
  3374  }
  3375  
  3376  // To test header metadata is sent on SendHeader().
  3377  func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) {
  3378  	te := newTest(t, e)
  3379  	te.startServer(&testServer{security: e.security, setAndSendHeader: true})
  3380  	defer te.tearDown()
  3381  	tc := testpb.NewTestServiceClient(te.clientConn())
  3382  
  3383  	const (
  3384  		argSize  = 1
  3385  		respSize = 1
  3386  	)
  3387  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3388  	if err != nil {
  3389  		t.Fatal(err)
  3390  	}
  3391  
  3392  	req := &testpb.SimpleRequest{
  3393  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3394  		ResponseSize: respSize,
  3395  		Payload:      payload,
  3396  	}
  3397  	var header metadata.MD
  3398  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3399  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
  3400  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  3401  	}
  3402  	delete(header, "user-agent")
  3403  	delete(header, "content-type")
  3404  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  3405  	if !reflect.DeepEqual(header, expectedHeader) {
  3406  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  3407  	}
  3408  }
  3409  
  3410  func (s) TestMultipleSetHeaderUnaryRPC(t *testing.T) {
  3411  	for _, e := range listTestEnv() {
  3412  		if e.name == "handler-tls" {
  3413  			continue
  3414  		}
  3415  		testMultipleSetHeaderUnaryRPC(t, e)
  3416  	}
  3417  }
  3418  
  3419  // To test header metadata is sent when sending response.
  3420  func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) {
  3421  	te := newTest(t, e)
  3422  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  3423  	defer te.tearDown()
  3424  	tc := testpb.NewTestServiceClient(te.clientConn())
  3425  
  3426  	const (
  3427  		argSize  = 1
  3428  		respSize = 1
  3429  	)
  3430  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3431  	if err != nil {
  3432  		t.Fatal(err)
  3433  	}
  3434  
  3435  	req := &testpb.SimpleRequest{
  3436  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3437  		ResponseSize: respSize,
  3438  		Payload:      payload,
  3439  	}
  3440  
  3441  	var header metadata.MD
  3442  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3443  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
  3444  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  3445  	}
  3446  	delete(header, "user-agent")
  3447  	delete(header, "content-type")
  3448  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  3449  	if !reflect.DeepEqual(header, expectedHeader) {
  3450  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  3451  	}
  3452  }
  3453  
  3454  func (s) TestMultipleSetHeaderUnaryRPCError(t *testing.T) {
  3455  	for _, e := range listTestEnv() {
  3456  		if e.name == "handler-tls" {
  3457  			continue
  3458  		}
  3459  		testMultipleSetHeaderUnaryRPCError(t, e)
  3460  	}
  3461  }
  3462  
  3463  // To test header metadata is sent when sending status.
  3464  func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) {
  3465  	te := newTest(t, e)
  3466  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  3467  	defer te.tearDown()
  3468  	tc := testpb.NewTestServiceClient(te.clientConn())
  3469  
  3470  	const (
  3471  		argSize  = 1
  3472  		respSize = -1 // Invalid respSize to make RPC fail.
  3473  	)
  3474  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3475  	if err != nil {
  3476  		t.Fatal(err)
  3477  	}
  3478  
  3479  	req := &testpb.SimpleRequest{
  3480  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3481  		ResponseSize: respSize,
  3482  		Payload:      payload,
  3483  	}
  3484  	var header metadata.MD
  3485  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3486  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err == nil {
  3487  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <non-nil>", ctx, err)
  3488  	}
  3489  	delete(header, "user-agent")
  3490  	delete(header, "content-type")
  3491  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  3492  	if !reflect.DeepEqual(header, expectedHeader) {
  3493  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  3494  	}
  3495  }
  3496  
  3497  func (s) TestSetAndSendHeaderStreamingRPC(t *testing.T) {
  3498  	for _, e := range listTestEnv() {
  3499  		if e.name == "handler-tls" {
  3500  			continue
  3501  		}
  3502  		testSetAndSendHeaderStreamingRPC(t, e)
  3503  	}
  3504  }
  3505  
  3506  // To test header metadata is sent on SendHeader().
  3507  func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) {
  3508  	te := newTest(t, e)
  3509  	te.startServer(&testServer{security: e.security, setAndSendHeader: true})
  3510  	defer te.tearDown()
  3511  	tc := testpb.NewTestServiceClient(te.clientConn())
  3512  
  3513  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3514  	stream, err := tc.FullDuplexCall(ctx)
  3515  	if err != nil {
  3516  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3517  	}
  3518  	if err := stream.CloseSend(); err != nil {
  3519  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  3520  	}
  3521  	if _, err := stream.Recv(); err != io.EOF {
  3522  		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
  3523  	}
  3524  
  3525  	header, err := stream.Header()
  3526  	if err != nil {
  3527  		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
  3528  	}
  3529  	delete(header, "user-agent")
  3530  	delete(header, "content-type")
  3531  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  3532  	if !reflect.DeepEqual(header, expectedHeader) {
  3533  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  3534  	}
  3535  }
  3536  
  3537  func (s) TestMultipleSetHeaderStreamingRPC(t *testing.T) {
  3538  	for _, e := range listTestEnv() {
  3539  		if e.name == "handler-tls" {
  3540  			continue
  3541  		}
  3542  		testMultipleSetHeaderStreamingRPC(t, e)
  3543  	}
  3544  }
  3545  
  3546  // To test header metadata is sent when sending response.
  3547  func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) {
  3548  	te := newTest(t, e)
  3549  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  3550  	defer te.tearDown()
  3551  	tc := testpb.NewTestServiceClient(te.clientConn())
  3552  
  3553  	const (
  3554  		argSize  = 1
  3555  		respSize = 1
  3556  	)
  3557  	ctx := metadata.NewOutgoingContext(context.Background(), testMetadata)
  3558  	stream, err := tc.FullDuplexCall(ctx)
  3559  	if err != nil {
  3560  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3561  	}
  3562  
  3563  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3564  	if err != nil {
  3565  		t.Fatal(err)
  3566  	}
  3567  
  3568  	req := &testpb.StreamingOutputCallRequest{
  3569  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3570  		ResponseParameters: []*testpb.ResponseParameters{
  3571  			{Size: respSize},
  3572  		},
  3573  		Payload: payload,
  3574  	}
  3575  	if err := stream.Send(req); err != nil {
  3576  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  3577  	}
  3578  	if _, err := stream.Recv(); err != nil {
  3579  		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  3580  	}
  3581  	if err := stream.CloseSend(); err != nil {
  3582  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  3583  	}
  3584  	if _, err := stream.Recv(); err != io.EOF {
  3585  		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
  3586  	}
  3587  
  3588  	header, err := stream.Header()
  3589  	if err != nil {
  3590  		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
  3591  	}
  3592  	delete(header, "user-agent")
  3593  	delete(header, "content-type")
  3594  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  3595  	if !reflect.DeepEqual(header, expectedHeader) {
  3596  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  3597  	}
  3598  
  3599  }
  3600  
  3601  func (s) TestMultipleSetHeaderStreamingRPCError(t *testing.T) {
  3602  	for _, e := range listTestEnv() {
  3603  		if e.name == "handler-tls" {
  3604  			continue
  3605  		}
  3606  		testMultipleSetHeaderStreamingRPCError(t, e)
  3607  	}
  3608  }
  3609  
  3610  // To test header metadata is sent when sending status.
  3611  func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) {
  3612  	te := newTest(t, e)
  3613  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  3614  	defer te.tearDown()
  3615  	tc := testpb.NewTestServiceClient(te.clientConn())
  3616  
  3617  	const (
  3618  		argSize  = 1
  3619  		respSize = -1
  3620  	)
  3621  	ctx, cancel := context.WithCancel(context.Background())
  3622  	defer cancel()
  3623  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  3624  	stream, err := tc.FullDuplexCall(ctx)
  3625  	if err != nil {
  3626  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3627  	}
  3628  
  3629  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3630  	if err != nil {
  3631  		t.Fatal(err)
  3632  	}
  3633  
  3634  	req := &testpb.StreamingOutputCallRequest{
  3635  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3636  		ResponseParameters: []*testpb.ResponseParameters{
  3637  			{Size: respSize},
  3638  		},
  3639  		Payload: payload,
  3640  	}
  3641  	if err := stream.Send(req); err != nil {
  3642  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  3643  	}
  3644  	if _, err := stream.Recv(); err == nil {
  3645  		t.Fatalf("%v.Recv() = %v, want <non-nil>", stream, err)
  3646  	}
  3647  
  3648  	header, err := stream.Header()
  3649  	if err != nil {
  3650  		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
  3651  	}
  3652  	delete(header, "user-agent")
  3653  	delete(header, "content-type")
  3654  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  3655  	if !reflect.DeepEqual(header, expectedHeader) {
  3656  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  3657  	}
  3658  	if err := stream.CloseSend(); err != nil {
  3659  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  3660  	}
  3661  }
  3662  
  3663  // TestMalformedHTTP2Metadata verfies the returned error when the client
  3664  // sends an illegal metadata.
  3665  func (s) TestMalformedHTTP2Metadata(t *testing.T) {
  3666  	for _, e := range listTestEnv() {
  3667  		if e.name == "handler-tls" {
  3668  			// Failed with "server stops accepting new RPCs".
  3669  			// Server stops accepting new RPCs when the client sends an illegal http2 header.
  3670  			continue
  3671  		}
  3672  		testMalformedHTTP2Metadata(t, e)
  3673  	}
  3674  }
  3675  
  3676  func testMalformedHTTP2Metadata(t *testing.T, e env) {
  3677  	te := newTest(t, e)
  3678  	te.startServer(&testServer{security: e.security})
  3679  	defer te.tearDown()
  3680  	tc := testpb.NewTestServiceClient(te.clientConn())
  3681  
  3682  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 2718)
  3683  	if err != nil {
  3684  		t.Fatal(err)
  3685  	}
  3686  
  3687  	req := &testpb.SimpleRequest{
  3688  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3689  		ResponseSize: 314,
  3690  		Payload:      payload,
  3691  	}
  3692  	ctx := metadata.NewOutgoingContext(context.Background(), malformedHTTP2Metadata)
  3693  	if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Internal {
  3694  		t.Fatalf("TestService.UnaryCall(%v, _) = _, %v; want _, %s", ctx, err, codes.Internal)
  3695  	}
  3696  }
  3697  
  3698  // Tests that the client transparently retries correctly when receiving a
  3699  // RST_STREAM with code REFUSED_STREAM.
  3700  func (s) TestTransparentRetry(t *testing.T) {
  3701  	testCases := []struct {
  3702  		failFast bool
  3703  		errCode  codes.Code
  3704  	}{{
  3705  		// success attempt: 1, (stream ID 1)
  3706  	}, {
  3707  		// success attempt: 2, (stream IDs 3, 5)
  3708  	}, {
  3709  		// no success attempt (stream IDs 7, 9)
  3710  		errCode: codes.Unavailable,
  3711  	}, {
  3712  		// success attempt: 1 (stream ID 11),
  3713  		failFast: true,
  3714  	}, {
  3715  		// success attempt: 2 (stream IDs 13, 15),
  3716  		failFast: true,
  3717  	}, {
  3718  		// no success attempt (stream IDs 17, 19)
  3719  		failFast: true,
  3720  		errCode:  codes.Unavailable,
  3721  	}}
  3722  
  3723  	lis, err := net.Listen("tcp", "localhost:0")
  3724  	if err != nil {
  3725  		t.Fatalf("Failed to listen. Err: %v", err)
  3726  	}
  3727  	defer lis.Close()
  3728  	server := &httpServer{
  3729  		responses: []httpServerResponse{{
  3730  			trailers: [][]string{{
  3731  				":status", "200",
  3732  				"content-type", "application/grpc",
  3733  				"grpc-status", "0",
  3734  			}},
  3735  		}},
  3736  		refuseStream: func(i uint32) bool {
  3737  			switch i {
  3738  			case 1, 5, 11, 15: // these stream IDs succeed
  3739  				return false
  3740  			}
  3741  			return true // these are refused
  3742  		},
  3743  	}
  3744  	server.start(t, lis)
  3745  	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
  3746  	if err != nil {
  3747  		t.Fatalf("failed to dial due to err: %v", err)
  3748  	}
  3749  	defer cc.Close()
  3750  
  3751  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  3752  	defer cancel()
  3753  
  3754  	client := testpb.NewTestServiceClient(cc)
  3755  
  3756  	for i, tc := range testCases {
  3757  		stream, err := client.FullDuplexCall(ctx)
  3758  		if err != nil {
  3759  			t.Fatalf("error creating stream due to err: %v", err)
  3760  		}
  3761  		code := func(err error) codes.Code {
  3762  			if err == io.EOF {
  3763  				return codes.OK
  3764  			}
  3765  			return status.Code(err)
  3766  		}
  3767  		if _, err := stream.Recv(); code(err) != tc.errCode {
  3768  			t.Fatalf("%v: stream.Recv() = _, %v, want error code: %v", i, err, tc.errCode)
  3769  		}
  3770  
  3771  	}
  3772  }
  3773  
  3774  func (s) TestCancel(t *testing.T) {
  3775  	for _, e := range listTestEnv() {
  3776  		testCancel(t, e)
  3777  	}
  3778  }
  3779  
  3780  func testCancel(t *testing.T, e env) {
  3781  	te := newTest(t, e)
  3782  	te.declareLogNoise("grpc: the client connection is closing; please retry")
  3783  	te.startServer(&testServer{security: e.security, unaryCallSleepTime: time.Second})
  3784  	defer te.tearDown()
  3785  
  3786  	cc := te.clientConn()
  3787  	tc := testpb.NewTestServiceClient(cc)
  3788  
  3789  	const argSize = 2718
  3790  	const respSize = 314
  3791  
  3792  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3793  	if err != nil {
  3794  		t.Fatal(err)
  3795  	}
  3796  
  3797  	req := &testpb.SimpleRequest{
  3798  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3799  		ResponseSize: respSize,
  3800  		Payload:      payload,
  3801  	}
  3802  	ctx, cancel := context.WithCancel(context.Background())
  3803  	time.AfterFunc(1*time.Millisecond, cancel)
  3804  	if r, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Canceled {
  3805  		t.Fatalf("TestService/UnaryCall(_, _) = %v, %v; want _, error code: %s", r, err, codes.Canceled)
  3806  	}
  3807  	awaitNewConnLogOutput()
  3808  }
  3809  
  3810  func (s) TestCancelNoIO(t *testing.T) {
  3811  	for _, e := range listTestEnv() {
  3812  		testCancelNoIO(t, e)
  3813  	}
  3814  }
  3815  
  3816  func testCancelNoIO(t *testing.T, e env) {
  3817  	te := newTest(t, e)
  3818  	te.declareLogNoise("http2Client.notifyError got notified that the client transport was broken")
  3819  	te.maxStream = 1 // Only allows 1 live stream per server transport.
  3820  	te.startServer(&testServer{security: e.security})
  3821  	defer te.tearDown()
  3822  
  3823  	cc := te.clientConn()
  3824  	tc := testpb.NewTestServiceClient(cc)
  3825  
  3826  	// Start one blocked RPC for which we'll never send streaming
  3827  	// input. This will consume the 1 maximum concurrent streams,
  3828  	// causing future RPCs to hang.
  3829  	ctx, cancelFirst := context.WithCancel(context.Background())
  3830  	_, err := tc.StreamingInputCall(ctx)
  3831  	if err != nil {
  3832  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
  3833  	}
  3834  
  3835  	// Loop until the ClientConn receives the initial settings
  3836  	// frame from the server, notifying it about the maximum
  3837  	// concurrent streams. We know when it's received it because
  3838  	// an RPC will fail with codes.DeadlineExceeded instead of
  3839  	// succeeding.
  3840  	// TODO(bradfitz): add internal test hook for this (Issue 534)
  3841  	for {
  3842  		ctx, cancelSecond := context.WithTimeout(context.Background(), 50*time.Millisecond)
  3843  		_, err := tc.StreamingInputCall(ctx)
  3844  		cancelSecond()
  3845  		if err == nil {
  3846  			continue
  3847  		}
  3848  		if status.Code(err) == codes.DeadlineExceeded {
  3849  			break
  3850  		}
  3851  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded)
  3852  	}
  3853  	// If there are any RPCs in flight before the client receives
  3854  	// the max streams setting, let them be expired.
  3855  	// TODO(bradfitz): add internal test hook for this (Issue 534)
  3856  	time.Sleep(50 * time.Millisecond)
  3857  
  3858  	go func() {
  3859  		time.Sleep(50 * time.Millisecond)
  3860  		cancelFirst()
  3861  	}()
  3862  
  3863  	// This should be blocked until the 1st is canceled, then succeed.
  3864  	ctx, cancelThird := context.WithTimeout(context.Background(), 500*time.Millisecond)
  3865  	if _, err := tc.StreamingInputCall(ctx); err != nil {
  3866  		t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
  3867  	}
  3868  	cancelThird()
  3869  }
  3870  
  3871  // The following tests the gRPC streaming RPC implementations.
  3872  // TODO(zhaoq): Have better coverage on error cases.
  3873  var (
  3874  	reqSizes  = []int{27182, 8, 1828, 45904}
  3875  	respSizes = []int{31415, 9, 2653, 58979}
  3876  )
  3877  
  3878  func (s) TestNoService(t *testing.T) {
  3879  	for _, e := range listTestEnv() {
  3880  		testNoService(t, e)
  3881  	}
  3882  }
  3883  
  3884  func testNoService(t *testing.T, e env) {
  3885  	te := newTest(t, e)
  3886  	te.startServer(nil)
  3887  	defer te.tearDown()
  3888  
  3889  	cc := te.clientConn()
  3890  	tc := testpb.NewTestServiceClient(cc)
  3891  
  3892  	stream, err := tc.FullDuplexCall(te.ctx, grpc.WaitForReady(true))
  3893  	if err != nil {
  3894  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3895  	}
  3896  	if _, err := stream.Recv(); status.Code(err) != codes.Unimplemented {
  3897  		t.Fatalf("stream.Recv() = _, %v, want _, error code %s", err, codes.Unimplemented)
  3898  	}
  3899  }
  3900  
  3901  func (s) TestPingPong(t *testing.T) {
  3902  	for _, e := range listTestEnv() {
  3903  		testPingPong(t, e)
  3904  	}
  3905  }
  3906  
  3907  func testPingPong(t *testing.T, e env) {
  3908  	te := newTest(t, e)
  3909  	te.startServer(&testServer{security: e.security})
  3910  	defer te.tearDown()
  3911  	tc := testpb.NewTestServiceClient(te.clientConn())
  3912  
  3913  	stream, err := tc.FullDuplexCall(te.ctx)
  3914  	if err != nil {
  3915  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3916  	}
  3917  	var index int
  3918  	for index < len(reqSizes) {
  3919  		respParam := []*testpb.ResponseParameters{
  3920  			{
  3921  				Size: int32(respSizes[index]),
  3922  			},
  3923  		}
  3924  
  3925  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
  3926  		if err != nil {
  3927  			t.Fatal(err)
  3928  		}
  3929  
  3930  		req := &testpb.StreamingOutputCallRequest{
  3931  			ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3932  			ResponseParameters: respParam,
  3933  			Payload:            payload,
  3934  		}
  3935  		if err := stream.Send(req); err != nil {
  3936  			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  3937  		}
  3938  		reply, err := stream.Recv()
  3939  		if err != nil {
  3940  			t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  3941  		}
  3942  		pt := reply.GetPayload().GetType()
  3943  		if pt != testpb.PayloadType_COMPRESSABLE {
  3944  			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
  3945  		}
  3946  		size := len(reply.GetPayload().GetBody())
  3947  		if size != int(respSizes[index]) {
  3948  			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
  3949  		}
  3950  		index++
  3951  	}
  3952  	if err := stream.CloseSend(); err != nil {
  3953  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  3954  	}
  3955  	if _, err := stream.Recv(); err != io.EOF {
  3956  		t.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
  3957  	}
  3958  }
  3959  
  3960  func (s) TestMetadataStreamingRPC(t *testing.T) {
  3961  	for _, e := range listTestEnv() {
  3962  		testMetadataStreamingRPC(t, e)
  3963  	}
  3964  }
  3965  
  3966  func testMetadataStreamingRPC(t *testing.T, e env) {
  3967  	te := newTest(t, e)
  3968  	te.startServer(&testServer{security: e.security})
  3969  	defer te.tearDown()
  3970  	tc := testpb.NewTestServiceClient(te.clientConn())
  3971  
  3972  	ctx := metadata.NewOutgoingContext(te.ctx, testMetadata)
  3973  	stream, err := tc.FullDuplexCall(ctx)
  3974  	if err != nil {
  3975  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3976  	}
  3977  	go func() {
  3978  		headerMD, err := stream.Header()
  3979  		if e.security == "tls" {
  3980  			delete(headerMD, "transport_security_type")
  3981  		}
  3982  		delete(headerMD, "trailer") // ignore if present
  3983  		delete(headerMD, "user-agent")
  3984  		delete(headerMD, "content-type")
  3985  		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
  3986  			t.Errorf("#1 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
  3987  		}
  3988  		// test the cached value.
  3989  		headerMD, err = stream.Header()
  3990  		delete(headerMD, "trailer") // ignore if present
  3991  		delete(headerMD, "user-agent")
  3992  		delete(headerMD, "content-type")
  3993  		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
  3994  			t.Errorf("#2 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
  3995  		}
  3996  		err = func() error {
  3997  			for index := 0; index < len(reqSizes); index++ {
  3998  				respParam := []*testpb.ResponseParameters{
  3999  					{
  4000  						Size: int32(respSizes[index]),
  4001  					},
  4002  				}
  4003  
  4004  				payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
  4005  				if err != nil {
  4006  					return err
  4007  				}
  4008  
  4009  				req := &testpb.StreamingOutputCallRequest{
  4010  					ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4011  					ResponseParameters: respParam,
  4012  					Payload:            payload,
  4013  				}
  4014  				if err := stream.Send(req); err != nil {
  4015  					return fmt.Errorf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  4016  				}
  4017  			}
  4018  			return nil
  4019  		}()
  4020  		// Tell the server we're done sending args.
  4021  		stream.CloseSend()
  4022  		if err != nil {
  4023  			t.Error(err)
  4024  		}
  4025  	}()
  4026  	for {
  4027  		if _, err := stream.Recv(); err != nil {
  4028  			break
  4029  		}
  4030  	}
  4031  	trailerMD := stream.Trailer()
  4032  	if !reflect.DeepEqual(testTrailerMetadata, trailerMD) {
  4033  		t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testTrailerMetadata)
  4034  	}
  4035  }
  4036  
  4037  func (s) TestServerStreaming(t *testing.T) {
  4038  	for _, e := range listTestEnv() {
  4039  		testServerStreaming(t, e)
  4040  	}
  4041  }
  4042  
  4043  func testServerStreaming(t *testing.T, e env) {
  4044  	te := newTest(t, e)
  4045  	te.startServer(&testServer{security: e.security})
  4046  	defer te.tearDown()
  4047  	tc := testpb.NewTestServiceClient(te.clientConn())
  4048  
  4049  	respParam := make([]*testpb.ResponseParameters, len(respSizes))
  4050  	for i, s := range respSizes {
  4051  		respParam[i] = &testpb.ResponseParameters{
  4052  			Size: int32(s),
  4053  		}
  4054  	}
  4055  	req := &testpb.StreamingOutputCallRequest{
  4056  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4057  		ResponseParameters: respParam,
  4058  	}
  4059  
  4060  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4061  	defer cancel()
  4062  	stream, err := tc.StreamingOutputCall(ctx, req)
  4063  	if err != nil {
  4064  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
  4065  	}
  4066  	var rpcStatus error
  4067  	var respCnt int
  4068  	var index int
  4069  	for {
  4070  		reply, err := stream.Recv()
  4071  		if err != nil {
  4072  			rpcStatus = err
  4073  			break
  4074  		}
  4075  		pt := reply.GetPayload().GetType()
  4076  		if pt != testpb.PayloadType_COMPRESSABLE {
  4077  			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
  4078  		}
  4079  		size := len(reply.GetPayload().GetBody())
  4080  		if size != int(respSizes[index]) {
  4081  			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
  4082  		}
  4083  		index++
  4084  		respCnt++
  4085  	}
  4086  	if rpcStatus != io.EOF {
  4087  		t.Fatalf("Failed to finish the server streaming rpc: %v, want <EOF>", rpcStatus)
  4088  	}
  4089  	if respCnt != len(respSizes) {
  4090  		t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt)
  4091  	}
  4092  }
  4093  
  4094  func (s) TestFailedServerStreaming(t *testing.T) {
  4095  	for _, e := range listTestEnv() {
  4096  		testFailedServerStreaming(t, e)
  4097  	}
  4098  }
  4099  
  4100  func testFailedServerStreaming(t *testing.T, e env) {
  4101  	te := newTest(t, e)
  4102  	te.userAgent = failAppUA
  4103  	te.startServer(&testServer{security: e.security})
  4104  	defer te.tearDown()
  4105  	tc := testpb.NewTestServiceClient(te.clientConn())
  4106  
  4107  	respParam := make([]*testpb.ResponseParameters, len(respSizes))
  4108  	for i, s := range respSizes {
  4109  		respParam[i] = &testpb.ResponseParameters{
  4110  			Size: int32(s),
  4111  		}
  4112  	}
  4113  	req := &testpb.StreamingOutputCallRequest{
  4114  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4115  		ResponseParameters: respParam,
  4116  	}
  4117  	ctx := metadata.NewOutgoingContext(te.ctx, testMetadata)
  4118  	stream, err := tc.StreamingOutputCall(ctx, req)
  4119  	if err != nil {
  4120  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
  4121  	}
  4122  	wantErr := status.Error(codes.DataLoss, "error for testing: "+failAppUA)
  4123  	if _, err := stream.Recv(); !equalError(err, wantErr) {
  4124  		t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, wantErr)
  4125  	}
  4126  }
  4127  
  4128  func equalError(x, y error) bool {
  4129  	return x == y || (x != nil && y != nil && x.Error() == y.Error())
  4130  }
  4131  
  4132  // concurrentSendServer is a TestServiceServer whose
  4133  // StreamingOutputCall makes ten serial Send calls, sending payloads
  4134  // "0".."9", inclusive.  TestServerStreamingConcurrent verifies they
  4135  // were received in the correct order, and that there were no races.
  4136  //
  4137  // All other TestServiceServer methods crash if called.
  4138  type concurrentSendServer struct {
  4139  	testpb.TestServiceServer
  4140  }
  4141  
  4142  func (s concurrentSendServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error {
  4143  	for i := 0; i < 10; i++ {
  4144  		stream.Send(&testpb.StreamingOutputCallResponse{
  4145  			Payload: &testpb.Payload{
  4146  				Body: []byte{'0' + uint8(i)},
  4147  			},
  4148  		})
  4149  	}
  4150  	return nil
  4151  }
  4152  
  4153  // Tests doing a bunch of concurrent streaming output calls.
  4154  func (s) TestServerStreamingConcurrent(t *testing.T) {
  4155  	for _, e := range listTestEnv() {
  4156  		testServerStreamingConcurrent(t, e)
  4157  	}
  4158  }
  4159  
  4160  func testServerStreamingConcurrent(t *testing.T, e env) {
  4161  	te := newTest(t, e)
  4162  	te.startServer(concurrentSendServer{})
  4163  	defer te.tearDown()
  4164  
  4165  	cc := te.clientConn()
  4166  	tc := testpb.NewTestServiceClient(cc)
  4167  
  4168  	doStreamingCall := func() {
  4169  		req := &testpb.StreamingOutputCallRequest{}
  4170  		ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4171  		defer cancel()
  4172  		stream, err := tc.StreamingOutputCall(ctx, req)
  4173  		if err != nil {
  4174  			t.Errorf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
  4175  			return
  4176  		}
  4177  		var ngot int
  4178  		var buf bytes.Buffer
  4179  		for {
  4180  			reply, err := stream.Recv()
  4181  			if err == io.EOF {
  4182  				break
  4183  			}
  4184  			if err != nil {
  4185  				t.Fatal(err)
  4186  			}
  4187  			ngot++
  4188  			if buf.Len() > 0 {
  4189  				buf.WriteByte(',')
  4190  			}
  4191  			buf.Write(reply.GetPayload().GetBody())
  4192  		}
  4193  		if want := 10; ngot != want {
  4194  			t.Errorf("Got %d replies, want %d", ngot, want)
  4195  		}
  4196  		if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want {
  4197  			t.Errorf("Got replies %q; want %q", got, want)
  4198  		}
  4199  	}
  4200  
  4201  	var wg sync.WaitGroup
  4202  	for i := 0; i < 20; i++ {
  4203  		wg.Add(1)
  4204  		go func() {
  4205  			defer wg.Done()
  4206  			doStreamingCall()
  4207  		}()
  4208  	}
  4209  	wg.Wait()
  4210  
  4211  }
  4212  
  4213  func generatePayloadSizes() [][]int {
  4214  	reqSizes := [][]int{
  4215  		{27182, 8, 1828, 45904},
  4216  	}
  4217  
  4218  	num8KPayloads := 1024
  4219  	eightKPayloads := []int{}
  4220  	for i := 0; i < num8KPayloads; i++ {
  4221  		eightKPayloads = append(eightKPayloads, (1 << 13))
  4222  	}
  4223  	reqSizes = append(reqSizes, eightKPayloads)
  4224  
  4225  	num2MPayloads := 8
  4226  	twoMPayloads := []int{}
  4227  	for i := 0; i < num2MPayloads; i++ {
  4228  		twoMPayloads = append(twoMPayloads, (1 << 21))
  4229  	}
  4230  	reqSizes = append(reqSizes, twoMPayloads)
  4231  
  4232  	return reqSizes
  4233  }
  4234  
  4235  func (s) TestClientStreaming(t *testing.T) {
  4236  	for _, s := range generatePayloadSizes() {
  4237  		for _, e := range listTestEnv() {
  4238  			testClientStreaming(t, e, s)
  4239  		}
  4240  	}
  4241  }
  4242  
  4243  func testClientStreaming(t *testing.T, e env, sizes []int) {
  4244  	te := newTest(t, e)
  4245  	te.startServer(&testServer{security: e.security})
  4246  	defer te.tearDown()
  4247  	tc := testpb.NewTestServiceClient(te.clientConn())
  4248  
  4249  	ctx, cancel := context.WithTimeout(te.ctx, time.Second*30)
  4250  	defer cancel()
  4251  	stream, err := tc.StreamingInputCall(ctx)
  4252  	if err != nil {
  4253  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err)
  4254  	}
  4255  
  4256  	var sum int
  4257  	for _, s := range sizes {
  4258  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s))
  4259  		if err != nil {
  4260  			t.Fatal(err)
  4261  		}
  4262  
  4263  		req := &testpb.StreamingInputCallRequest{
  4264  			Payload: payload,
  4265  		}
  4266  		if err := stream.Send(req); err != nil {
  4267  			t.Fatalf("%v.Send(_) = %v, want <nil>", stream, err)
  4268  		}
  4269  		sum += s
  4270  	}
  4271  	reply, err := stream.CloseAndRecv()
  4272  	if err != nil {
  4273  		t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil)
  4274  	}
  4275  	if reply.GetAggregatedPayloadSize() != int32(sum) {
  4276  		t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum)
  4277  	}
  4278  }
  4279  
  4280  func (s) TestClientStreamingError(t *testing.T) {
  4281  	for _, e := range listTestEnv() {
  4282  		if e.name == "handler-tls" {
  4283  			continue
  4284  		}
  4285  		testClientStreamingError(t, e)
  4286  	}
  4287  }
  4288  
  4289  func testClientStreamingError(t *testing.T, e env) {
  4290  	te := newTest(t, e)
  4291  	te.startServer(&testServer{security: e.security, earlyFail: true})
  4292  	defer te.tearDown()
  4293  	tc := testpb.NewTestServiceClient(te.clientConn())
  4294  
  4295  	stream, err := tc.StreamingInputCall(te.ctx)
  4296  	if err != nil {
  4297  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err)
  4298  	}
  4299  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1)
  4300  	if err != nil {
  4301  		t.Fatal(err)
  4302  	}
  4303  
  4304  	req := &testpb.StreamingInputCallRequest{
  4305  		Payload: payload,
  4306  	}
  4307  	// The 1st request should go through.
  4308  	if err := stream.Send(req); err != nil {
  4309  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  4310  	}
  4311  	for {
  4312  		if err := stream.Send(req); err != io.EOF {
  4313  			continue
  4314  		}
  4315  		if _, err := stream.CloseAndRecv(); status.Code(err) != codes.NotFound {
  4316  			t.Fatalf("%v.CloseAndRecv() = %v, want error %s", stream, err, codes.NotFound)
  4317  		}
  4318  		break
  4319  	}
  4320  }
  4321  
  4322  func (s) TestExceedMaxStreamsLimit(t *testing.T) {
  4323  	for _, e := range listTestEnv() {
  4324  		testExceedMaxStreamsLimit(t, e)
  4325  	}
  4326  }
  4327  
  4328  func testExceedMaxStreamsLimit(t *testing.T, e env) {
  4329  	te := newTest(t, e)
  4330  	te.declareLogNoise(
  4331  		"http2Client.notifyError got notified that the client transport was broken",
  4332  		"Conn.resetTransport failed to create client transport",
  4333  		"grpc: the connection is closing",
  4334  	)
  4335  	te.maxStream = 1 // Only allows 1 live stream per server transport.
  4336  	te.startServer(&testServer{security: e.security})
  4337  	defer te.tearDown()
  4338  
  4339  	cc := te.clientConn()
  4340  	tc := testpb.NewTestServiceClient(cc)
  4341  
  4342  	_, err := tc.StreamingInputCall(te.ctx)
  4343  	if err != nil {
  4344  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
  4345  	}
  4346  	// Loop until receiving the new max stream setting from the server.
  4347  	for {
  4348  		ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
  4349  		defer cancel()
  4350  		_, err := tc.StreamingInputCall(ctx)
  4351  		if err == nil {
  4352  			time.Sleep(50 * time.Millisecond)
  4353  			continue
  4354  		}
  4355  		if status.Code(err) == codes.DeadlineExceeded {
  4356  			break
  4357  		}
  4358  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded)
  4359  	}
  4360  }
  4361  
  4362  func (s) TestStreamsQuotaRecovery(t *testing.T) {
  4363  	for _, e := range listTestEnv() {
  4364  		testStreamsQuotaRecovery(t, e)
  4365  	}
  4366  }
  4367  
  4368  func testStreamsQuotaRecovery(t *testing.T, e env) {
  4369  	te := newTest(t, e)
  4370  	te.declareLogNoise(
  4371  		"http2Client.notifyError got notified that the client transport was broken",
  4372  		"Conn.resetTransport failed to create client transport",
  4373  		"grpc: the connection is closing",
  4374  	)
  4375  	te.maxStream = 1 // Allows 1 live stream.
  4376  	te.startServer(&testServer{security: e.security})
  4377  	defer te.tearDown()
  4378  
  4379  	cc := te.clientConn()
  4380  	tc := testpb.NewTestServiceClient(cc)
  4381  	ctx, cancel := context.WithCancel(context.Background())
  4382  	defer cancel()
  4383  	if _, err := tc.StreamingInputCall(ctx); err != nil {
  4384  		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, <nil>", err)
  4385  	}
  4386  	// Loop until the new max stream setting is effective.
  4387  	for {
  4388  		ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
  4389  		_, err := tc.StreamingInputCall(ctx)
  4390  		cancel()
  4391  		if err == nil {
  4392  			time.Sleep(5 * time.Millisecond)
  4393  			continue
  4394  		}
  4395  		if status.Code(err) == codes.DeadlineExceeded {
  4396  			break
  4397  		}
  4398  		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  4399  	}
  4400  
  4401  	var wg sync.WaitGroup
  4402  	for i := 0; i < 10; i++ {
  4403  		wg.Add(1)
  4404  		go func() {
  4405  			defer wg.Done()
  4406  			payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 314)
  4407  			if err != nil {
  4408  				t.Error(err)
  4409  				return
  4410  			}
  4411  			req := &testpb.SimpleRequest{
  4412  				ResponseType: testpb.PayloadType_COMPRESSABLE,
  4413  				ResponseSize: 1592,
  4414  				Payload:      payload,
  4415  			}
  4416  			// No rpc should go through due to the max streams limit.
  4417  			ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
  4418  			defer cancel()
  4419  			if _, err := tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  4420  				t.Errorf("tc.UnaryCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  4421  			}
  4422  		}()
  4423  	}
  4424  	wg.Wait()
  4425  
  4426  	cancel()
  4427  	// A new stream should be allowed after canceling the first one.
  4428  	ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
  4429  	defer cancel()
  4430  	if _, err := tc.StreamingInputCall(ctx); err != nil {
  4431  		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %v", err, nil)
  4432  	}
  4433  }
  4434  
  4435  func (s) TestCompressServerHasNoSupport(t *testing.T) {
  4436  	for _, e := range listTestEnv() {
  4437  		testCompressServerHasNoSupport(t, e)
  4438  	}
  4439  }
  4440  
  4441  func testCompressServerHasNoSupport(t *testing.T, e env) {
  4442  	te := newTest(t, e)
  4443  	te.serverCompression = false
  4444  	te.clientCompression = false
  4445  	te.clientNopCompression = true
  4446  	te.startServer(&testServer{security: e.security})
  4447  	defer te.tearDown()
  4448  	tc := testpb.NewTestServiceClient(te.clientConn())
  4449  
  4450  	const argSize = 271828
  4451  	const respSize = 314159
  4452  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  4453  	if err != nil {
  4454  		t.Fatal(err)
  4455  	}
  4456  	req := &testpb.SimpleRequest{
  4457  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  4458  		ResponseSize: respSize,
  4459  		Payload:      payload,
  4460  	}
  4461  
  4462  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4463  	defer cancel()
  4464  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.Unimplemented {
  4465  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code %s", err, codes.Unimplemented)
  4466  	}
  4467  	// Streaming RPC
  4468  	stream, err := tc.FullDuplexCall(ctx)
  4469  	if err != nil {
  4470  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4471  	}
  4472  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Unimplemented {
  4473  		t.Fatalf("%v.Recv() = %v, want error code %s", stream, err, codes.Unimplemented)
  4474  	}
  4475  }
  4476  
  4477  func (s) TestCompressOK(t *testing.T) {
  4478  	for _, e := range listTestEnv() {
  4479  		testCompressOK(t, e)
  4480  	}
  4481  }
  4482  
  4483  func testCompressOK(t *testing.T, e env) {
  4484  	te := newTest(t, e)
  4485  	te.serverCompression = true
  4486  	te.clientCompression = true
  4487  	te.startServer(&testServer{security: e.security})
  4488  	defer te.tearDown()
  4489  	tc := testpb.NewTestServiceClient(te.clientConn())
  4490  
  4491  	// Unary call
  4492  	const argSize = 271828
  4493  	const respSize = 314159
  4494  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  4495  	if err != nil {
  4496  		t.Fatal(err)
  4497  	}
  4498  	req := &testpb.SimpleRequest{
  4499  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  4500  		ResponseSize: respSize,
  4501  		Payload:      payload,
  4502  	}
  4503  	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
  4504  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  4505  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
  4506  	}
  4507  	// Streaming RPC
  4508  	ctx, cancel := context.WithCancel(context.Background())
  4509  	defer cancel()
  4510  	stream, err := tc.FullDuplexCall(ctx)
  4511  	if err != nil {
  4512  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4513  	}
  4514  	respParam := []*testpb.ResponseParameters{
  4515  		{
  4516  			Size: 31415,
  4517  		},
  4518  	}
  4519  	payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415))
  4520  	if err != nil {
  4521  		t.Fatal(err)
  4522  	}
  4523  	sreq := &testpb.StreamingOutputCallRequest{
  4524  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4525  		ResponseParameters: respParam,
  4526  		Payload:            payload,
  4527  	}
  4528  	if err := stream.Send(sreq); err != nil {
  4529  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  4530  	}
  4531  	stream.CloseSend()
  4532  	if _, err := stream.Recv(); err != nil {
  4533  		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  4534  	}
  4535  	if _, err := stream.Recv(); err != io.EOF {
  4536  		t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err)
  4537  	}
  4538  }
  4539  
  4540  func (s) TestIdentityEncoding(t *testing.T) {
  4541  	for _, e := range listTestEnv() {
  4542  		testIdentityEncoding(t, e)
  4543  	}
  4544  }
  4545  
  4546  func testIdentityEncoding(t *testing.T, e env) {
  4547  	te := newTest(t, e)
  4548  	te.startServer(&testServer{security: e.security})
  4549  	defer te.tearDown()
  4550  	tc := testpb.NewTestServiceClient(te.clientConn())
  4551  
  4552  	// Unary call
  4553  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 5)
  4554  	if err != nil {
  4555  		t.Fatal(err)
  4556  	}
  4557  	req := &testpb.SimpleRequest{
  4558  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  4559  		ResponseSize: 10,
  4560  		Payload:      payload,
  4561  	}
  4562  	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
  4563  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  4564  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
  4565  	}
  4566  	// Streaming RPC
  4567  	ctx, cancel := context.WithCancel(context.Background())
  4568  	defer cancel()
  4569  	stream, err := tc.FullDuplexCall(ctx, grpc.UseCompressor("identity"))
  4570  	if err != nil {
  4571  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4572  	}
  4573  	payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415))
  4574  	if err != nil {
  4575  		t.Fatal(err)
  4576  	}
  4577  	sreq := &testpb.StreamingOutputCallRequest{
  4578  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4579  		ResponseParameters: []*testpb.ResponseParameters{{Size: 10}},
  4580  		Payload:            payload,
  4581  	}
  4582  	if err := stream.Send(sreq); err != nil {
  4583  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  4584  	}
  4585  	stream.CloseSend()
  4586  	if _, err := stream.Recv(); err != nil {
  4587  		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  4588  	}
  4589  	if _, err := stream.Recv(); err != io.EOF {
  4590  		t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err)
  4591  	}
  4592  }
  4593  
  4594  func (s) TestUnaryClientInterceptor(t *testing.T) {
  4595  	for _, e := range listTestEnv() {
  4596  		testUnaryClientInterceptor(t, e)
  4597  	}
  4598  }
  4599  
  4600  func failOkayRPC(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
  4601  	err := invoker(ctx, method, req, reply, cc, opts...)
  4602  	if err == nil {
  4603  		return status.Error(codes.NotFound, "")
  4604  	}
  4605  	return err
  4606  }
  4607  
  4608  func testUnaryClientInterceptor(t *testing.T, e env) {
  4609  	te := newTest(t, e)
  4610  	te.userAgent = testAppUA
  4611  	te.unaryClientInt = failOkayRPC
  4612  	te.startServer(&testServer{security: e.security})
  4613  	defer te.tearDown()
  4614  
  4615  	tc := testpb.NewTestServiceClient(te.clientConn())
  4616  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4617  	defer cancel()
  4618  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.NotFound {
  4619  		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.NotFound)
  4620  	}
  4621  }
  4622  
  4623  func (s) TestStreamClientInterceptor(t *testing.T) {
  4624  	for _, e := range listTestEnv() {
  4625  		testStreamClientInterceptor(t, e)
  4626  	}
  4627  }
  4628  
  4629  func failOkayStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
  4630  	s, err := streamer(ctx, desc, cc, method, opts...)
  4631  	if err == nil {
  4632  		return nil, status.Error(codes.NotFound, "")
  4633  	}
  4634  	return s, nil
  4635  }
  4636  
  4637  func testStreamClientInterceptor(t *testing.T, e env) {
  4638  	te := newTest(t, e)
  4639  	te.streamClientInt = failOkayStream
  4640  	te.startServer(&testServer{security: e.security})
  4641  	defer te.tearDown()
  4642  
  4643  	tc := testpb.NewTestServiceClient(te.clientConn())
  4644  	respParam := []*testpb.ResponseParameters{
  4645  		{
  4646  			Size: int32(1),
  4647  		},
  4648  	}
  4649  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1))
  4650  	if err != nil {
  4651  		t.Fatal(err)
  4652  	}
  4653  	req := &testpb.StreamingOutputCallRequest{
  4654  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4655  		ResponseParameters: respParam,
  4656  		Payload:            payload,
  4657  	}
  4658  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4659  	defer cancel()
  4660  	if _, err := tc.StreamingOutputCall(ctx, req); status.Code(err) != codes.NotFound {
  4661  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, error code %s", tc, err, codes.NotFound)
  4662  	}
  4663  }
  4664  
  4665  func (s) TestUnaryServerInterceptor(t *testing.T) {
  4666  	for _, e := range listTestEnv() {
  4667  		testUnaryServerInterceptor(t, e)
  4668  	}
  4669  }
  4670  
  4671  func errInjector(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
  4672  	return nil, status.Error(codes.PermissionDenied, "")
  4673  }
  4674  
  4675  func testUnaryServerInterceptor(t *testing.T, e env) {
  4676  	te := newTest(t, e)
  4677  	te.unaryServerInt = errInjector
  4678  	te.startServer(&testServer{security: e.security})
  4679  	defer te.tearDown()
  4680  
  4681  	tc := testpb.NewTestServiceClient(te.clientConn())
  4682  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4683  	defer cancel()
  4684  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.PermissionDenied {
  4685  		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied)
  4686  	}
  4687  }
  4688  
  4689  func (s) TestStreamServerInterceptor(t *testing.T) {
  4690  	for _, e := range listTestEnv() {
  4691  		// TODO(bradfitz): Temporarily skip this env due to #619.
  4692  		if e.name == "handler-tls" {
  4693  			continue
  4694  		}
  4695  		testStreamServerInterceptor(t, e)
  4696  	}
  4697  }
  4698  
  4699  func fullDuplexOnly(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
  4700  	if info.FullMethod == "/grpc.testing.TestService/FullDuplexCall" {
  4701  		return handler(srv, ss)
  4702  	}
  4703  	// Reject the other methods.
  4704  	return status.Error(codes.PermissionDenied, "")
  4705  }
  4706  
  4707  func testStreamServerInterceptor(t *testing.T, e env) {
  4708  	te := newTest(t, e)
  4709  	te.streamServerInt = fullDuplexOnly
  4710  	te.startServer(&testServer{security: e.security})
  4711  	defer te.tearDown()
  4712  
  4713  	tc := testpb.NewTestServiceClient(te.clientConn())
  4714  	respParam := []*testpb.ResponseParameters{
  4715  		{
  4716  			Size: int32(1),
  4717  		},
  4718  	}
  4719  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1))
  4720  	if err != nil {
  4721  		t.Fatal(err)
  4722  	}
  4723  	req := &testpb.StreamingOutputCallRequest{
  4724  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4725  		ResponseParameters: respParam,
  4726  		Payload:            payload,
  4727  	}
  4728  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4729  	defer cancel()
  4730  	s1, err := tc.StreamingOutputCall(ctx, req)
  4731  	if err != nil {
  4732  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, <nil>", tc, err)
  4733  	}
  4734  	if _, err := s1.Recv(); status.Code(err) != codes.PermissionDenied {
  4735  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied)
  4736  	}
  4737  	s2, err := tc.FullDuplexCall(ctx)
  4738  	if err != nil {
  4739  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4740  	}
  4741  	if err := s2.Send(req); err != nil {
  4742  		t.Fatalf("%v.Send(_) = %v, want <nil>", s2, err)
  4743  	}
  4744  	if _, err := s2.Recv(); err != nil {
  4745  		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", s2, err)
  4746  	}
  4747  }
  4748  
  4749  // funcServer implements methods of TestServiceServer using funcs,
  4750  // similar to an http.HandlerFunc.
  4751  // Any unimplemented method will crash. Tests implement the method(s)
  4752  // they need.
  4753  type funcServer struct {
  4754  	testpb.TestServiceServer
  4755  	unaryCall          func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error)
  4756  	streamingInputCall func(stream testpb.TestService_StreamingInputCallServer) error
  4757  	fullDuplexCall     func(stream testpb.TestService_FullDuplexCallServer) error
  4758  }
  4759  
  4760  func (s *funcServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  4761  	return s.unaryCall(ctx, in)
  4762  }
  4763  
  4764  func (s *funcServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error {
  4765  	return s.streamingInputCall(stream)
  4766  }
  4767  
  4768  func (s *funcServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error {
  4769  	return s.fullDuplexCall(stream)
  4770  }
  4771  
  4772  func (s) TestClientRequestBodyErrorUnexpectedEOF(t *testing.T) {
  4773  	for _, e := range listTestEnv() {
  4774  		testClientRequestBodyErrorUnexpectedEOF(t, e)
  4775  	}
  4776  }
  4777  
  4778  func testClientRequestBodyErrorUnexpectedEOF(t *testing.T, e env) {
  4779  	te := newTest(t, e)
  4780  	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  4781  		errUnexpectedCall := errors.New("unexpected call func server method")
  4782  		t.Error(errUnexpectedCall)
  4783  		return nil, errUnexpectedCall
  4784  	}}
  4785  	te.startServer(ts)
  4786  	defer te.tearDown()
  4787  	te.withServerTester(func(st *serverTester) {
  4788  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
  4789  		// Say we have 5 bytes coming, but set END_STREAM flag:
  4790  		st.writeData(1, true, []byte{0, 0, 0, 0, 5})
  4791  		st.wantAnyFrame() // wait for server to crash (it used to crash)
  4792  	})
  4793  }
  4794  
  4795  func (s) TestClientRequestBodyErrorCloseAfterLength(t *testing.T) {
  4796  	for _, e := range listTestEnv() {
  4797  		testClientRequestBodyErrorCloseAfterLength(t, e)
  4798  	}
  4799  }
  4800  
  4801  func testClientRequestBodyErrorCloseAfterLength(t *testing.T, e env) {
  4802  	te := newTest(t, e)
  4803  	te.declareLogNoise("Server.processUnaryRPC failed to write status")
  4804  	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  4805  		errUnexpectedCall := errors.New("unexpected call func server method")
  4806  		t.Error(errUnexpectedCall)
  4807  		return nil, errUnexpectedCall
  4808  	}}
  4809  	te.startServer(ts)
  4810  	defer te.tearDown()
  4811  	te.withServerTester(func(st *serverTester) {
  4812  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
  4813  		// say we're sending 5 bytes, but then close the connection instead.
  4814  		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
  4815  		st.cc.Close()
  4816  	})
  4817  }
  4818  
  4819  func (s) TestClientRequestBodyErrorCancel(t *testing.T) {
  4820  	for _, e := range listTestEnv() {
  4821  		testClientRequestBodyErrorCancel(t, e)
  4822  	}
  4823  }
  4824  
  4825  func testClientRequestBodyErrorCancel(t *testing.T, e env) {
  4826  	te := newTest(t, e)
  4827  	gotCall := make(chan bool, 1)
  4828  	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  4829  		gotCall <- true
  4830  		return new(testpb.SimpleResponse), nil
  4831  	}}
  4832  	te.startServer(ts)
  4833  	defer te.tearDown()
  4834  	te.withServerTester(func(st *serverTester) {
  4835  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
  4836  		// Say we have 5 bytes coming, but cancel it instead.
  4837  		st.writeRSTStream(1, http2.ErrCodeCancel)
  4838  		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
  4839  
  4840  		// Verify we didn't a call yet.
  4841  		select {
  4842  		case <-gotCall:
  4843  			t.Fatal("unexpected call")
  4844  		default:
  4845  		}
  4846  
  4847  		// And now send an uncanceled (but still invalid), just to get a response.
  4848  		st.writeHeadersGRPC(3, "/grpc.testing.TestService/UnaryCall", false)
  4849  		st.writeData(3, true, []byte{0, 0, 0, 0, 0})
  4850  		<-gotCall
  4851  		st.wantAnyFrame()
  4852  	})
  4853  }
  4854  
  4855  func (s) TestClientRequestBodyErrorCancelStreamingInput(t *testing.T) {
  4856  	for _, e := range listTestEnv() {
  4857  		testClientRequestBodyErrorCancelStreamingInput(t, e)
  4858  	}
  4859  }
  4860  
  4861  func testClientRequestBodyErrorCancelStreamingInput(t *testing.T, e env) {
  4862  	te := newTest(t, e)
  4863  	recvErr := make(chan error, 1)
  4864  	ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error {
  4865  		_, err := stream.Recv()
  4866  		recvErr <- err
  4867  		return nil
  4868  	}}
  4869  	te.startServer(ts)
  4870  	defer te.tearDown()
  4871  	te.withServerTester(func(st *serverTester) {
  4872  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", false)
  4873  		// Say we have 5 bytes coming, but cancel it instead.
  4874  		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
  4875  		st.writeRSTStream(1, http2.ErrCodeCancel)
  4876  
  4877  		var got error
  4878  		select {
  4879  		case got = <-recvErr:
  4880  		case <-time.After(3 * time.Second):
  4881  			t.Fatal("timeout waiting for error")
  4882  		}
  4883  		if grpc.Code(got) != codes.Canceled {
  4884  			t.Errorf("error = %#v; want error code %s", got, codes.Canceled)
  4885  		}
  4886  	})
  4887  }
  4888  
  4889  func (s) TestClientInitialHeaderEndStream(t *testing.T) {
  4890  	for _, e := range listTestEnv() {
  4891  		if e.httpHandler {
  4892  			continue
  4893  		}
  4894  		testClientInitialHeaderEndStream(t, e)
  4895  	}
  4896  }
  4897  
  4898  func testClientInitialHeaderEndStream(t *testing.T, e env) {
  4899  	// To ensure RST_STREAM is sent for illegal data write and not normal stream
  4900  	// close.
  4901  	frameCheckingDone := make(chan struct{})
  4902  	// To ensure goroutine for test does not end before RPC handler performs error
  4903  	// checking.
  4904  	handlerDone := make(chan struct{})
  4905  	te := newTest(t, e)
  4906  	ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error {
  4907  		defer close(handlerDone)
  4908  		// Block on serverTester receiving RST_STREAM. This ensures server has closed
  4909  		// stream before stream.Recv().
  4910  		<-frameCheckingDone
  4911  		data, err := stream.Recv()
  4912  		if err == nil {
  4913  			t.Errorf("unexpected data received in func server method: '%v'", data)
  4914  		} else if status.Code(err) != codes.Canceled {
  4915  			t.Errorf("expected canceled error, instead received '%v'", err)
  4916  		}
  4917  		return nil
  4918  	}}
  4919  	te.startServer(ts)
  4920  	defer te.tearDown()
  4921  	te.withServerTester(func(st *serverTester) {
  4922  		// Send a headers with END_STREAM flag, but then write data.
  4923  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", true)
  4924  		st.writeData(1, false, []byte{0, 0, 0, 0, 0})
  4925  		st.wantAnyFrame()
  4926  		st.wantAnyFrame()
  4927  		st.wantRSTStream(http2.ErrCodeStreamClosed)
  4928  		close(frameCheckingDone)
  4929  		<-handlerDone
  4930  	})
  4931  }
  4932  
  4933  func (s) TestClientSendDataAfterCloseSend(t *testing.T) {
  4934  	for _, e := range listTestEnv() {
  4935  		if e.httpHandler {
  4936  			continue
  4937  		}
  4938  		testClientSendDataAfterCloseSend(t, e)
  4939  	}
  4940  }
  4941  
  4942  func testClientSendDataAfterCloseSend(t *testing.T, e env) {
  4943  	// To ensure RST_STREAM is sent for illegal data write prior to execution of RPC
  4944  	// handler.
  4945  	frameCheckingDone := make(chan struct{})
  4946  	// To ensure goroutine for test does not end before RPC handler performs error
  4947  	// checking.
  4948  	handlerDone := make(chan struct{})
  4949  	te := newTest(t, e)
  4950  	ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error {
  4951  		defer close(handlerDone)
  4952  		// Block on serverTester receiving RST_STREAM. This ensures server has closed
  4953  		// stream before stream.Recv().
  4954  		<-frameCheckingDone
  4955  		for {
  4956  			_, err := stream.Recv()
  4957  			if err == io.EOF {
  4958  				break
  4959  			}
  4960  			if err != nil {
  4961  				if status.Code(err) != codes.Canceled {
  4962  					t.Errorf("expected canceled error, instead received '%v'", err)
  4963  				}
  4964  				break
  4965  			}
  4966  		}
  4967  		if err := stream.SendMsg(nil); err == nil {
  4968  			t.Error("expected error sending message on stream after stream closed due to illegal data")
  4969  		} else if status.Code(err) != codes.Internal {
  4970  			t.Errorf("expected internal error, instead received '%v'", err)
  4971  		}
  4972  		return nil
  4973  	}}
  4974  	te.startServer(ts)
  4975  	defer te.tearDown()
  4976  	te.withServerTester(func(st *serverTester) {
  4977  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", false)
  4978  		// Send data with END_STREAM flag, but then write more data.
  4979  		st.writeData(1, true, []byte{0, 0, 0, 0, 0})
  4980  		st.writeData(1, false, []byte{0, 0, 0, 0, 0})
  4981  		st.wantAnyFrame()
  4982  		st.wantAnyFrame()
  4983  		st.wantRSTStream(http2.ErrCodeStreamClosed)
  4984  		close(frameCheckingDone)
  4985  		<-handlerDone
  4986  	})
  4987  }
  4988  
  4989  func (s) TestClientResourceExhaustedCancelFullDuplex(t *testing.T) {
  4990  	for _, e := range listTestEnv() {
  4991  		if e.httpHandler {
  4992  			// httpHandler write won't be blocked on flow control window.
  4993  			continue
  4994  		}
  4995  		testClientResourceExhaustedCancelFullDuplex(t, e)
  4996  	}
  4997  }
  4998  
  4999  func testClientResourceExhaustedCancelFullDuplex(t *testing.T, e env) {
  5000  	te := newTest(t, e)
  5001  	recvErr := make(chan error, 1)
  5002  	ts := &funcServer{fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
  5003  		defer close(recvErr)
  5004  		_, err := stream.Recv()
  5005  		if err != nil {
  5006  			return status.Errorf(codes.Internal, "stream.Recv() got error: %v, want <nil>", err)
  5007  		}
  5008  		// create a payload that's larger than the default flow control window.
  5009  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 10)
  5010  		if err != nil {
  5011  			return err
  5012  		}
  5013  		resp := &testpb.StreamingOutputCallResponse{
  5014  			Payload: payload,
  5015  		}
  5016  		ce := make(chan error, 1)
  5017  		go func() {
  5018  			var err error
  5019  			for {
  5020  				if err = stream.Send(resp); err != nil {
  5021  					break
  5022  				}
  5023  			}
  5024  			ce <- err
  5025  		}()
  5026  		select {
  5027  		case err = <-ce:
  5028  		case <-time.After(10 * time.Second):
  5029  			err = errors.New("10s timeout reached")
  5030  		}
  5031  		recvErr <- err
  5032  		return err
  5033  	}}
  5034  	te.startServer(ts)
  5035  	defer te.tearDown()
  5036  	// set a low limit on receive message size to error with Resource Exhausted on
  5037  	// client side when server send a large message.
  5038  	te.maxClientReceiveMsgSize = newInt(10)
  5039  	cc := te.clientConn()
  5040  	tc := testpb.NewTestServiceClient(cc)
  5041  
  5042  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5043  	defer cancel()
  5044  	stream, err := tc.FullDuplexCall(ctx)
  5045  	if err != nil {
  5046  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  5047  	}
  5048  	req := &testpb.StreamingOutputCallRequest{}
  5049  	if err := stream.Send(req); err != nil {
  5050  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  5051  	}
  5052  	if _, err := stream.Recv(); status.Code(err) != codes.ResourceExhausted {
  5053  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  5054  	}
  5055  	err = <-recvErr
  5056  	if status.Code(err) != codes.Canceled {
  5057  		t.Fatalf("server got error %v, want error code: %s", err, codes.Canceled)
  5058  	}
  5059  }
  5060  
  5061  type clientFailCreds struct{}
  5062  
  5063  func (c *clientFailCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
  5064  	return rawConn, nil, nil
  5065  }
  5066  func (c *clientFailCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
  5067  	return nil, nil, fmt.Errorf("client handshake fails with fatal error")
  5068  }
  5069  func (c *clientFailCreds) Info() credentials.ProtocolInfo {
  5070  	return credentials.ProtocolInfo{}
  5071  }
  5072  func (c *clientFailCreds) Clone() credentials.TransportCredentials {
  5073  	return c
  5074  }
  5075  func (c *clientFailCreds) OverrideServerName(s string) error {
  5076  	return nil
  5077  }
  5078  
  5079  // This test makes sure that failfast RPCs fail if client handshake fails with
  5080  // fatal errors.
  5081  func (s) TestFailfastRPCFailOnFatalHandshakeError(t *testing.T) {
  5082  	lis, err := net.Listen("tcp", "localhost:0")
  5083  	if err != nil {
  5084  		t.Fatalf("Failed to listen: %v", err)
  5085  	}
  5086  	defer lis.Close()
  5087  
  5088  	cc, err := grpc.Dial("passthrough:///"+lis.Addr().String(), grpc.WithTransportCredentials(&clientFailCreds{}))
  5089  	if err != nil {
  5090  		t.Fatalf("grpc.Dial(_) = %v", err)
  5091  	}
  5092  	defer cc.Close()
  5093  
  5094  	tc := testpb.NewTestServiceClient(cc)
  5095  	// This unary call should fail, but not timeout.
  5096  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
  5097  	defer cancel()
  5098  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(false)); status.Code(err) != codes.Unavailable {
  5099  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want <Unavailable>", err)
  5100  	}
  5101  }
  5102  
  5103  func (s) TestFlowControlLogicalRace(t *testing.T) {
  5104  	// Test for a regression of https://github.com/grpc/grpc-go/issues/632,
  5105  	// and other flow control bugs.
  5106  
  5107  	const (
  5108  		itemCount   = 100
  5109  		itemSize    = 1 << 10
  5110  		recvCount   = 2
  5111  		maxFailures = 3
  5112  
  5113  		requestTimeout = time.Second * 5
  5114  	)
  5115  
  5116  	requestCount := 10000
  5117  	if raceMode {
  5118  		requestCount = 1000
  5119  	}
  5120  
  5121  	lis, err := net.Listen("tcp", "localhost:0")
  5122  	if err != nil {
  5123  		t.Fatalf("Failed to listen: %v", err)
  5124  	}
  5125  	defer lis.Close()
  5126  
  5127  	s := grpc.NewServer()
  5128  	testpb.RegisterTestServiceServer(s, &flowControlLogicalRaceServer{
  5129  		itemCount: itemCount,
  5130  		itemSize:  itemSize,
  5131  	})
  5132  	defer s.Stop()
  5133  
  5134  	go s.Serve(lis)
  5135  
  5136  	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
  5137  	if err != nil {
  5138  		t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err)
  5139  	}
  5140  	defer cc.Close()
  5141  	cl := testpb.NewTestServiceClient(cc)
  5142  
  5143  	failures := 0
  5144  	for i := 0; i < requestCount; i++ {
  5145  		ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
  5146  		output, err := cl.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{})
  5147  		if err != nil {
  5148  			t.Fatalf("StreamingOutputCall; err = %q", err)
  5149  		}
  5150  
  5151  		j := 0
  5152  	loop:
  5153  		for ; j < recvCount; j++ {
  5154  			_, err := output.Recv()
  5155  			if err != nil {
  5156  				if err == io.EOF {
  5157  					break loop
  5158  				}
  5159  				switch status.Code(err) {
  5160  				case codes.DeadlineExceeded:
  5161  					break loop
  5162  				default:
  5163  					t.Fatalf("Recv; err = %q", err)
  5164  				}
  5165  			}
  5166  		}
  5167  		cancel()
  5168  		<-ctx.Done()
  5169  
  5170  		if j < recvCount {
  5171  			t.Errorf("got %d responses to request %d", j, i)
  5172  			failures++
  5173  			if failures >= maxFailures {
  5174  				// Continue past the first failure to see if the connection is
  5175  				// entirely broken, or if only a single RPC was affected
  5176  				break
  5177  			}
  5178  		}
  5179  	}
  5180  }
  5181  
  5182  type flowControlLogicalRaceServer struct {
  5183  	testpb.TestServiceServer
  5184  
  5185  	itemSize  int
  5186  	itemCount int
  5187  }
  5188  
  5189  func (s *flowControlLogicalRaceServer) StreamingOutputCall(req *testpb.StreamingOutputCallRequest, srv testpb.TestService_StreamingOutputCallServer) error {
  5190  	for i := 0; i < s.itemCount; i++ {
  5191  		err := srv.Send(&testpb.StreamingOutputCallResponse{
  5192  			Payload: &testpb.Payload{
  5193  				// Sending a large stream of data which the client reject
  5194  				// helps to trigger some types of flow control bugs.
  5195  				//
  5196  				// Reallocating memory here is inefficient, but the stress it
  5197  				// puts on the GC leads to more frequent flow control
  5198  				// failures. The GC likely causes more variety in the
  5199  				// goroutine scheduling orders.
  5200  				Body: bytes.Repeat([]byte("a"), s.itemSize),
  5201  			},
  5202  		})
  5203  		if err != nil {
  5204  			return err
  5205  		}
  5206  	}
  5207  	return nil
  5208  }
  5209  
  5210  type lockingWriter struct {
  5211  	mu sync.Mutex
  5212  	w  io.Writer
  5213  }
  5214  
  5215  func (lw *lockingWriter) Write(p []byte) (n int, err error) {
  5216  	lw.mu.Lock()
  5217  	defer lw.mu.Unlock()
  5218  	return lw.w.Write(p)
  5219  }
  5220  
  5221  func (lw *lockingWriter) setWriter(w io.Writer) {
  5222  	lw.mu.Lock()
  5223  	defer lw.mu.Unlock()
  5224  	lw.w = w
  5225  }
  5226  
  5227  var testLogOutput = &lockingWriter{w: os.Stderr}
  5228  
  5229  // awaitNewConnLogOutput waits for any of grpc.NewConn's goroutines to
  5230  // terminate, if they're still running. It spams logs with this
  5231  // message.  We wait for it so our log filter is still
  5232  // active. Otherwise the "defer restore()" at the top of various test
  5233  // functions restores our log filter and then the goroutine spams.
  5234  func awaitNewConnLogOutput() {
  5235  	awaitLogOutput(50*time.Millisecond, "grpc: the client connection is closing; please retry")
  5236  }
  5237  
  5238  func awaitLogOutput(maxWait time.Duration, phrase string) {
  5239  	pb := []byte(phrase)
  5240  
  5241  	timer := time.NewTimer(maxWait)
  5242  	defer timer.Stop()
  5243  	wakeup := make(chan bool, 1)
  5244  	for {
  5245  		if logOutputHasContents(pb, wakeup) {
  5246  			return
  5247  		}
  5248  		select {
  5249  		case <-timer.C:
  5250  			// Too slow. Oh well.
  5251  			return
  5252  		case <-wakeup:
  5253  		}
  5254  	}
  5255  }
  5256  
  5257  func logOutputHasContents(v []byte, wakeup chan<- bool) bool {
  5258  	testLogOutput.mu.Lock()
  5259  	defer testLogOutput.mu.Unlock()
  5260  	fw, ok := testLogOutput.w.(*filterWriter)
  5261  	if !ok {
  5262  		return false
  5263  	}
  5264  	fw.mu.Lock()
  5265  	defer fw.mu.Unlock()
  5266  	if bytes.Contains(fw.buf.Bytes(), v) {
  5267  		return true
  5268  	}
  5269  	fw.wakeup = wakeup
  5270  	return false
  5271  }
  5272  
  5273  var verboseLogs = flag.Bool("verbose_logs", false, "show all log output, without filtering")
  5274  
  5275  func noop() {}
  5276  
  5277  // declareLogNoise declares that t is expected to emit the following noisy
  5278  // phrases, even on success. Those phrases will be filtered from log output and
  5279  // only be shown if *verbose_logs or t ends up failing. The returned restore
  5280  // function should be called with defer to be run before the test ends.
  5281  func declareLogNoise(t *testing.T, phrases ...string) (restore func()) {
  5282  	if *verboseLogs {
  5283  		return noop
  5284  	}
  5285  	fw := &filterWriter{dst: os.Stderr, filter: phrases}
  5286  	testLogOutput.setWriter(fw)
  5287  	return func() {
  5288  		if t.Failed() {
  5289  			fw.mu.Lock()
  5290  			defer fw.mu.Unlock()
  5291  			if fw.buf.Len() > 0 {
  5292  				t.Logf("Complete log output:\n%s", fw.buf.Bytes())
  5293  			}
  5294  		}
  5295  		testLogOutput.setWriter(os.Stderr)
  5296  	}
  5297  }
  5298  
  5299  type filterWriter struct {
  5300  	dst    io.Writer
  5301  	filter []string
  5302  
  5303  	mu     sync.Mutex
  5304  	buf    bytes.Buffer
  5305  	wakeup chan<- bool // if non-nil, gets true on write
  5306  }
  5307  
  5308  func (fw *filterWriter) Write(p []byte) (n int, err error) {
  5309  	fw.mu.Lock()
  5310  	fw.buf.Write(p)
  5311  	if fw.wakeup != nil {
  5312  		select {
  5313  		case fw.wakeup <- true:
  5314  		default:
  5315  		}
  5316  	}
  5317  	fw.mu.Unlock()
  5318  
  5319  	ps := string(p)
  5320  	for _, f := range fw.filter {
  5321  		if strings.Contains(ps, f) {
  5322  			return len(p), nil
  5323  		}
  5324  	}
  5325  	return fw.dst.Write(p)
  5326  }
  5327  
  5328  func (s) TestGRPCMethod(t *testing.T) {
  5329  	var method string
  5330  	var ok bool
  5331  
  5332  	ss := &stubserver.StubServer{
  5333  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5334  			method, ok = grpc.Method(ctx)
  5335  			return &testpb.Empty{}, nil
  5336  		},
  5337  	}
  5338  	if err := ss.Start(nil); err != nil {
  5339  		t.Fatalf("Error starting endpoint server: %v", err)
  5340  	}
  5341  	defer ss.Stop()
  5342  
  5343  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5344  	defer cancel()
  5345  
  5346  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  5347  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  5348  	}
  5349  
  5350  	if want := "/grpc.testing.TestService/EmptyCall"; !ok || method != want {
  5351  		t.Fatalf("grpc.Method(_) = %q, %v; want %q, true", method, ok, want)
  5352  	}
  5353  }
  5354  
  5355  // renameProtoCodec is an encoding.Codec wrapper that allows customizing the
  5356  // Name() of another codec.
  5357  type renameProtoCodec struct {
  5358  	encoding.Codec
  5359  	name string
  5360  }
  5361  
  5362  func (r *renameProtoCodec) Name() string { return r.name }
  5363  
  5364  // TestForceCodecName confirms that the ForceCodec call option sets the subtype
  5365  // in the content-type header according to the Name() of the codec provided.
  5366  func (s) TestForceCodecName(t *testing.T) {
  5367  	wantContentTypeCh := make(chan []string, 1)
  5368  	defer close(wantContentTypeCh)
  5369  
  5370  	ss := &stubserver.StubServer{
  5371  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5372  			md, ok := metadata.FromIncomingContext(ctx)
  5373  			if !ok {
  5374  				return nil, status.Errorf(codes.Internal, "no metadata in context")
  5375  			}
  5376  			if got, want := md["content-type"], <-wantContentTypeCh; !reflect.DeepEqual(got, want) {
  5377  				return nil, status.Errorf(codes.Internal, "got content-type=%q; want [%q]", got, want)
  5378  			}
  5379  			return &testpb.Empty{}, nil
  5380  		},
  5381  	}
  5382  	if err := ss.Start([]grpc.ServerOption{grpc.ForceServerCodec(encoding.GetCodec("proto"))}); err != nil {
  5383  		t.Fatalf("Error starting endpoint server: %v", err)
  5384  	}
  5385  	defer ss.Stop()
  5386  
  5387  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5388  	defer cancel()
  5389  
  5390  	codec := &renameProtoCodec{Codec: encoding.GetCodec("proto"), name: "some-test-name"}
  5391  	wantContentTypeCh <- []string{"application/grpc+some-test-name"}
  5392  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.ForceCodec(codec)); err != nil {
  5393  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  5394  	}
  5395  
  5396  	// Confirm the name is converted to lowercase before transmitting.
  5397  	codec.name = "aNoTHeRNaME"
  5398  	wantContentTypeCh <- []string{"application/grpc+anothername"}
  5399  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.ForceCodec(codec)); err != nil {
  5400  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  5401  	}
  5402  }
  5403  
  5404  func (s) TestForceServerCodec(t *testing.T) {
  5405  	ss := &stubserver.StubServer{
  5406  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5407  			return &testpb.Empty{}, nil
  5408  		},
  5409  	}
  5410  	codec := &countingProtoCodec{}
  5411  	if err := ss.Start([]grpc.ServerOption{grpc.ForceServerCodec(codec)}); err != nil {
  5412  		t.Fatalf("Error starting endpoint server: %v", err)
  5413  	}
  5414  	defer ss.Stop()
  5415  
  5416  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5417  	defer cancel()
  5418  
  5419  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  5420  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  5421  	}
  5422  
  5423  	unmarshalCount := atomic.LoadInt32(&codec.unmarshalCount)
  5424  	const wantUnmarshalCount = 1
  5425  	if unmarshalCount != wantUnmarshalCount {
  5426  		t.Fatalf("protoCodec.unmarshalCount = %d; want %d", unmarshalCount, wantUnmarshalCount)
  5427  	}
  5428  	marshalCount := atomic.LoadInt32(&codec.marshalCount)
  5429  	const wantMarshalCount = 1
  5430  	if marshalCount != wantMarshalCount {
  5431  		t.Fatalf("protoCodec.marshalCount = %d; want %d", marshalCount, wantMarshalCount)
  5432  	}
  5433  }
  5434  
  5435  func (s) TestUnaryProxyDoesNotForwardMetadata(t *testing.T) {
  5436  	const mdkey = "somedata"
  5437  
  5438  	// endpoint ensures mdkey is NOT in metadata and returns an error if it is.
  5439  	endpoint := &stubserver.StubServer{
  5440  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5441  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil {
  5442  				return nil, status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
  5443  			}
  5444  			return &testpb.Empty{}, nil
  5445  		},
  5446  	}
  5447  	if err := endpoint.Start(nil); err != nil {
  5448  		t.Fatalf("Error starting endpoint server: %v", err)
  5449  	}
  5450  	defer endpoint.Stop()
  5451  
  5452  	// proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint
  5453  	// without explicitly copying the metadata.
  5454  	proxy := &stubserver.StubServer{
  5455  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5456  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil {
  5457  				return nil, status.Errorf(codes.Internal, "proxy: md=%v; want contains(%q)", md, mdkey)
  5458  			}
  5459  			return endpoint.Client.EmptyCall(ctx, in)
  5460  		},
  5461  	}
  5462  	if err := proxy.Start(nil); err != nil {
  5463  		t.Fatalf("Error starting proxy server: %v", err)
  5464  	}
  5465  	defer proxy.Stop()
  5466  
  5467  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  5468  	defer cancel()
  5469  	md := metadata.Pairs(mdkey, "val")
  5470  	ctx = metadata.NewOutgoingContext(ctx, md)
  5471  
  5472  	// Sanity check that endpoint properly errors when it sees mdkey.
  5473  	_, err := endpoint.Client.EmptyCall(ctx, &testpb.Empty{})
  5474  	if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal {
  5475  		t.Fatalf("endpoint.Client.EmptyCall(_, _) = _, %v; want _, <status with Code()=Internal>", err)
  5476  	}
  5477  
  5478  	if _, err := proxy.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  5479  		t.Fatal(err.Error())
  5480  	}
  5481  }
  5482  
  5483  func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) {
  5484  	const mdkey = "somedata"
  5485  
  5486  	// doFDC performs a FullDuplexCall with client and returns the error from the
  5487  	// first stream.Recv call, or nil if that error is io.EOF.  Calls t.Fatal if
  5488  	// the stream cannot be established.
  5489  	doFDC := func(ctx context.Context, client testpb.TestServiceClient) error {
  5490  		stream, err := client.FullDuplexCall(ctx)
  5491  		if err != nil {
  5492  			t.Fatalf("Unwanted error: %v", err)
  5493  		}
  5494  		if _, err := stream.Recv(); err != io.EOF {
  5495  			return err
  5496  		}
  5497  		return nil
  5498  	}
  5499  
  5500  	// endpoint ensures mdkey is NOT in metadata and returns an error if it is.
  5501  	endpoint := &stubserver.StubServer{
  5502  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  5503  			ctx := stream.Context()
  5504  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil {
  5505  				return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
  5506  			}
  5507  			return nil
  5508  		},
  5509  	}
  5510  	if err := endpoint.Start(nil); err != nil {
  5511  		t.Fatalf("Error starting endpoint server: %v", err)
  5512  	}
  5513  	defer endpoint.Stop()
  5514  
  5515  	// proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint
  5516  	// without explicitly copying the metadata.
  5517  	proxy := &stubserver.StubServer{
  5518  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  5519  			ctx := stream.Context()
  5520  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil {
  5521  				return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
  5522  			}
  5523  			return doFDC(ctx, endpoint.Client)
  5524  		},
  5525  	}
  5526  	if err := proxy.Start(nil); err != nil {
  5527  		t.Fatalf("Error starting proxy server: %v", err)
  5528  	}
  5529  	defer proxy.Stop()
  5530  
  5531  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  5532  	defer cancel()
  5533  	md := metadata.Pairs(mdkey, "val")
  5534  	ctx = metadata.NewOutgoingContext(ctx, md)
  5535  
  5536  	// Sanity check that endpoint properly errors when it sees mdkey in ctx.
  5537  	err := doFDC(ctx, endpoint.Client)
  5538  	if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal {
  5539  		t.Fatalf("stream.Recv() = _, %v; want _, <status with Code()=Internal>", err)
  5540  	}
  5541  
  5542  	if err := doFDC(ctx, proxy.Client); err != nil {
  5543  		t.Fatalf("doFDC(_, proxy.Client) = %v; want nil", err)
  5544  	}
  5545  }
  5546  
  5547  func (s) TestStatsTagsAndTrace(t *testing.T) {
  5548  	// Data added to context by client (typically in a stats handler).
  5549  	tags := []byte{1, 5, 2, 4, 3}
  5550  	trace := []byte{5, 2, 1, 3, 4}
  5551  
  5552  	// endpoint ensures Tags() and Trace() in context match those that were added
  5553  	// by the client and returns an error if not.
  5554  	endpoint := &stubserver.StubServer{
  5555  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5556  			md, _ := metadata.FromIncomingContext(ctx)
  5557  			if tg := stats.Tags(ctx); !reflect.DeepEqual(tg, tags) {
  5558  				return nil, status.Errorf(codes.Internal, "stats.Tags(%v)=%v; want %v", ctx, tg, tags)
  5559  			}
  5560  			if !reflect.DeepEqual(md["grpc-tags-bin"], []string{string(tags)}) {
  5561  				return nil, status.Errorf(codes.Internal, "md['grpc-tags-bin']=%v; want %v", md["grpc-tags-bin"], tags)
  5562  			}
  5563  			if tr := stats.Trace(ctx); !reflect.DeepEqual(tr, trace) {
  5564  				return nil, status.Errorf(codes.Internal, "stats.Trace(%v)=%v; want %v", ctx, tr, trace)
  5565  			}
  5566  			if !reflect.DeepEqual(md["grpc-trace-bin"], []string{string(trace)}) {
  5567  				return nil, status.Errorf(codes.Internal, "md['grpc-trace-bin']=%v; want %v", md["grpc-trace-bin"], trace)
  5568  			}
  5569  			return &testpb.Empty{}, nil
  5570  		},
  5571  	}
  5572  	if err := endpoint.Start(nil); err != nil {
  5573  		t.Fatalf("Error starting endpoint server: %v", err)
  5574  	}
  5575  	defer endpoint.Stop()
  5576  
  5577  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  5578  	defer cancel()
  5579  
  5580  	testCases := []struct {
  5581  		ctx  context.Context
  5582  		want codes.Code
  5583  	}{
  5584  		{ctx: ctx, want: codes.Internal},
  5585  		{ctx: stats.SetTags(ctx, tags), want: codes.Internal},
  5586  		{ctx: stats.SetTrace(ctx, trace), want: codes.Internal},
  5587  		{ctx: stats.SetTags(stats.SetTrace(ctx, tags), tags), want: codes.Internal},
  5588  		{ctx: stats.SetTags(stats.SetTrace(ctx, trace), tags), want: codes.OK},
  5589  	}
  5590  
  5591  	for _, tc := range testCases {
  5592  		_, err := endpoint.Client.EmptyCall(tc.ctx, &testpb.Empty{})
  5593  		if tc.want == codes.OK && err != nil {
  5594  			t.Fatalf("endpoint.Client.EmptyCall(%v, _) = _, %v; want _, nil", tc.ctx, err)
  5595  		}
  5596  		if s, ok := status.FromError(err); !ok || s.Code() != tc.want {
  5597  			t.Fatalf("endpoint.Client.EmptyCall(%v, _) = _, %v; want _, <status with Code()=%v>", tc.ctx, err, tc.want)
  5598  		}
  5599  	}
  5600  }
  5601  
  5602  func (s) TestTapTimeout(t *testing.T) {
  5603  	sopts := []grpc.ServerOption{
  5604  		grpc.InTapHandle(func(ctx context.Context, _ *tap.Info) (context.Context, error) {
  5605  			c, cancel := context.WithCancel(ctx)
  5606  			// Call cancel instead of setting a deadline so we can detect which error
  5607  			// occurred -- this cancellation (desired) or the client's deadline
  5608  			// expired (indicating this cancellation did not affect the RPC).
  5609  			time.AfterFunc(10*time.Millisecond, cancel)
  5610  			return c, nil
  5611  		}),
  5612  	}
  5613  
  5614  	ss := &stubserver.StubServer{
  5615  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  5616  			<-ctx.Done()
  5617  			return nil, status.Errorf(codes.Canceled, ctx.Err().Error())
  5618  		},
  5619  	}
  5620  	if err := ss.Start(sopts); err != nil {
  5621  		t.Fatalf("Error starting endpoint server: %v", err)
  5622  	}
  5623  	defer ss.Stop()
  5624  
  5625  	// This was known to be flaky; test several times.
  5626  	for i := 0; i < 10; i++ {
  5627  		// Set our own deadline in case the server hangs.
  5628  		ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  5629  		res, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
  5630  		cancel()
  5631  		if s, ok := status.FromError(err); !ok || s.Code() != codes.Canceled {
  5632  			t.Fatalf("ss.Client.EmptyCall(ctx, _) = %v, %v; want nil, <status with Code()=Canceled>", res, err)
  5633  		}
  5634  	}
  5635  
  5636  }
  5637  
  5638  func (s) TestClientWriteFailsAfterServerClosesStream(t *testing.T) {
  5639  	ss := &stubserver.StubServer{
  5640  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  5641  			return status.Errorf(codes.Internal, "")
  5642  		},
  5643  	}
  5644  	sopts := []grpc.ServerOption{}
  5645  	if err := ss.Start(sopts); err != nil {
  5646  		t.Fatalf("Error starting endpoint server: %v", err)
  5647  	}
  5648  	defer ss.Stop()
  5649  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  5650  	defer cancel()
  5651  	stream, err := ss.Client.FullDuplexCall(ctx)
  5652  	if err != nil {
  5653  		t.Fatalf("Error while creating stream: %v", err)
  5654  	}
  5655  	for {
  5656  		if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err == nil {
  5657  			time.Sleep(5 * time.Millisecond)
  5658  		} else if err == io.EOF {
  5659  			break // Success.
  5660  		} else {
  5661  			t.Fatalf("stream.Send(_) = %v, want io.EOF", err)
  5662  		}
  5663  	}
  5664  }
  5665  
  5666  type windowSizeConfig struct {
  5667  	serverStream int32
  5668  	serverConn   int32
  5669  	clientStream int32
  5670  	clientConn   int32
  5671  }
  5672  
  5673  func max(a, b int32) int32 {
  5674  	if a > b {
  5675  		return a
  5676  	}
  5677  	return b
  5678  }
  5679  
  5680  func (s) TestConfigurableWindowSizeWithLargeWindow(t *testing.T) {
  5681  	wc := windowSizeConfig{
  5682  		serverStream: 8 * 1024 * 1024,
  5683  		serverConn:   12 * 1024 * 1024,
  5684  		clientStream: 6 * 1024 * 1024,
  5685  		clientConn:   8 * 1024 * 1024,
  5686  	}
  5687  	for _, e := range listTestEnv() {
  5688  		testConfigurableWindowSize(t, e, wc)
  5689  	}
  5690  }
  5691  
  5692  func (s) TestConfigurableWindowSizeWithSmallWindow(t *testing.T) {
  5693  	wc := windowSizeConfig{
  5694  		serverStream: 1,
  5695  		serverConn:   1,
  5696  		clientStream: 1,
  5697  		clientConn:   1,
  5698  	}
  5699  	for _, e := range listTestEnv() {
  5700  		testConfigurableWindowSize(t, e, wc)
  5701  	}
  5702  }
  5703  
  5704  func testConfigurableWindowSize(t *testing.T, e env, wc windowSizeConfig) {
  5705  	te := newTest(t, e)
  5706  	te.serverInitialWindowSize = wc.serverStream
  5707  	te.serverInitialConnWindowSize = wc.serverConn
  5708  	te.clientInitialWindowSize = wc.clientStream
  5709  	te.clientInitialConnWindowSize = wc.clientConn
  5710  
  5711  	te.startServer(&testServer{security: e.security})
  5712  	defer te.tearDown()
  5713  
  5714  	cc := te.clientConn()
  5715  	tc := testpb.NewTestServiceClient(cc)
  5716  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5717  	defer cancel()
  5718  	stream, err := tc.FullDuplexCall(ctx)
  5719  	if err != nil {
  5720  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  5721  	}
  5722  	numOfIter := 11
  5723  	// Set message size to exhaust largest of window sizes.
  5724  	messageSize := max(max(wc.serverStream, wc.serverConn), max(wc.clientStream, wc.clientConn)) / int32(numOfIter-1)
  5725  	messageSize = max(messageSize, 64*1024)
  5726  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, messageSize)
  5727  	if err != nil {
  5728  		t.Fatal(err)
  5729  	}
  5730  	respParams := []*testpb.ResponseParameters{
  5731  		{
  5732  			Size: messageSize,
  5733  		},
  5734  	}
  5735  	req := &testpb.StreamingOutputCallRequest{
  5736  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  5737  		ResponseParameters: respParams,
  5738  		Payload:            payload,
  5739  	}
  5740  	for i := 0; i < numOfIter; i++ {
  5741  		if err := stream.Send(req); err != nil {
  5742  			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  5743  		}
  5744  		if _, err := stream.Recv(); err != nil {
  5745  			t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
  5746  		}
  5747  	}
  5748  	if err := stream.CloseSend(); err != nil {
  5749  		t.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
  5750  	}
  5751  }
  5752  
  5753  func (s) TestWaitForReadyConnection(t *testing.T) {
  5754  	for _, e := range listTestEnv() {
  5755  		testWaitForReadyConnection(t, e)
  5756  	}
  5757  
  5758  }
  5759  
  5760  func testWaitForReadyConnection(t *testing.T, e env) {
  5761  	te := newTest(t, e)
  5762  	te.userAgent = testAppUA
  5763  	te.startServer(&testServer{security: e.security})
  5764  	defer te.tearDown()
  5765  
  5766  	cc := te.clientConn() // Non-blocking dial.
  5767  	tc := testpb.NewTestServiceClient(cc)
  5768  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
  5769  	defer cancel()
  5770  	state := cc.GetState()
  5771  	// Wait for connection to be Ready.
  5772  	for ; state != connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() {
  5773  	}
  5774  	if state != connectivity.Ready {
  5775  		t.Fatalf("Want connection state to be Ready, got %v", state)
  5776  	}
  5777  	ctx, cancel = context.WithTimeout(context.Background(), time.Second)
  5778  	defer cancel()
  5779  	// Make a fail-fast RPC.
  5780  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  5781  		t.Fatalf("TestService/EmptyCall(_,_) = _, %v, want _, nil", err)
  5782  	}
  5783  }
  5784  
  5785  type errCodec struct {
  5786  	noError bool
  5787  }
  5788  
  5789  func (c *errCodec) Marshal(v interface{}) ([]byte, error) {
  5790  	if c.noError {
  5791  		return []byte{}, nil
  5792  	}
  5793  	return nil, fmt.Errorf("3987^12 + 4365^12 = 4472^12")
  5794  }
  5795  
  5796  func (c *errCodec) Unmarshal(data []byte, v interface{}) error {
  5797  	return nil
  5798  }
  5799  
  5800  func (c *errCodec) Name() string {
  5801  	return "Fermat's near-miss."
  5802  }
  5803  
  5804  type countingProtoCodec struct {
  5805  	marshalCount   int32
  5806  	unmarshalCount int32
  5807  }
  5808  
  5809  func (p *countingProtoCodec) Marshal(v interface{}) ([]byte, error) {
  5810  	atomic.AddInt32(&p.marshalCount, 1)
  5811  	vv, ok := v.(proto.Message)
  5812  	if !ok {
  5813  		return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
  5814  	}
  5815  	return proto.Marshal(vv)
  5816  }
  5817  
  5818  func (p *countingProtoCodec) Unmarshal(data []byte, v interface{}) error {
  5819  	atomic.AddInt32(&p.unmarshalCount, 1)
  5820  	vv, ok := v.(proto.Message)
  5821  	if !ok {
  5822  		return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
  5823  	}
  5824  	return proto.Unmarshal(data, vv)
  5825  }
  5826  
  5827  func (*countingProtoCodec) Name() string {
  5828  	return "proto"
  5829  }
  5830  
  5831  func (s) TestEncodeDoesntPanic(t *testing.T) {
  5832  	for _, e := range listTestEnv() {
  5833  		testEncodeDoesntPanic(t, e)
  5834  	}
  5835  }
  5836  
  5837  func testEncodeDoesntPanic(t *testing.T, e env) {
  5838  	te := newTest(t, e)
  5839  	erc := &errCodec{}
  5840  	te.customCodec = erc
  5841  	te.startServer(&testServer{security: e.security})
  5842  	defer te.tearDown()
  5843  	te.customCodec = nil
  5844  	tc := testpb.NewTestServiceClient(te.clientConn())
  5845  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5846  	defer cancel()
  5847  	// Failure case, should not panic.
  5848  	tc.EmptyCall(ctx, &testpb.Empty{})
  5849  	erc.noError = true
  5850  	// Passing case.
  5851  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  5852  		t.Fatalf("EmptyCall(_, _) = _, %v, want _, <nil>", err)
  5853  	}
  5854  }
  5855  
  5856  func (s) TestSvrWriteStatusEarlyWrite(t *testing.T) {
  5857  	for _, e := range listTestEnv() {
  5858  		testSvrWriteStatusEarlyWrite(t, e)
  5859  	}
  5860  }
  5861  
  5862  func testSvrWriteStatusEarlyWrite(t *testing.T, e env) {
  5863  	te := newTest(t, e)
  5864  	const smallSize = 1024
  5865  	const largeSize = 2048
  5866  	const extraLargeSize = 4096
  5867  	te.maxServerReceiveMsgSize = newInt(largeSize)
  5868  	te.maxServerSendMsgSize = newInt(largeSize)
  5869  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  5870  	if err != nil {
  5871  		t.Fatal(err)
  5872  	}
  5873  	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
  5874  	if err != nil {
  5875  		t.Fatal(err)
  5876  	}
  5877  	te.startServer(&testServer{security: e.security})
  5878  	defer te.tearDown()
  5879  	tc := testpb.NewTestServiceClient(te.clientConn())
  5880  	respParam := []*testpb.ResponseParameters{
  5881  		{
  5882  			Size: int32(smallSize),
  5883  		},
  5884  	}
  5885  	sreq := &testpb.StreamingOutputCallRequest{
  5886  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  5887  		ResponseParameters: respParam,
  5888  		Payload:            extraLargePayload,
  5889  	}
  5890  	// Test recv case: server receives a message larger than maxServerReceiveMsgSize.
  5891  	stream, err := tc.FullDuplexCall(te.ctx)
  5892  	if err != nil {
  5893  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  5894  	}
  5895  	if err = stream.Send(sreq); err != nil {
  5896  		t.Fatalf("%v.Send() = _, %v, want <nil>", stream, err)
  5897  	}
  5898  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  5899  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  5900  	}
  5901  	// Test send case: server sends a message larger than maxServerSendMsgSize.
  5902  	sreq.Payload = smallPayload
  5903  	respParam[0].Size = int32(extraLargeSize)
  5904  
  5905  	stream, err = tc.FullDuplexCall(te.ctx)
  5906  	if err != nil {
  5907  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  5908  	}
  5909  	if err = stream.Send(sreq); err != nil {
  5910  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  5911  	}
  5912  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  5913  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  5914  	}
  5915  }
  5916  
  5917  // The following functions with function name ending with TD indicates that they
  5918  // should be deleted after old service config API is deprecated and deleted.
  5919  func testServiceConfigSetupTD(t *testing.T, e env) (*test, chan grpc.ServiceConfig) {
  5920  	te := newTest(t, e)
  5921  	// We write before read.
  5922  	ch := make(chan grpc.ServiceConfig, 1)
  5923  	te.sc = ch
  5924  	te.userAgent = testAppUA
  5925  	te.declareLogNoise(
  5926  		"transport: http2Client.notifyError got notified that the client transport was broken EOF",
  5927  		"grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing",
  5928  		"grpc: addrConn.resetTransport failed to create client transport: connection error",
  5929  		"Failed to dial : context canceled; please retry.",
  5930  	)
  5931  	return te, ch
  5932  }
  5933  
  5934  func (s) TestServiceConfigGetMethodConfigTD(t *testing.T) {
  5935  	for _, e := range listTestEnv() {
  5936  		testGetMethodConfigTD(t, e)
  5937  	}
  5938  }
  5939  
  5940  func testGetMethodConfigTD(t *testing.T, e env) {
  5941  	te, ch := testServiceConfigSetupTD(t, e)
  5942  	defer te.tearDown()
  5943  
  5944  	mc1 := grpc.MethodConfig{
  5945  		WaitForReady: newBool(true),
  5946  		Timeout:      newDuration(time.Millisecond),
  5947  	}
  5948  	mc2 := grpc.MethodConfig{WaitForReady: newBool(false)}
  5949  	m := make(map[string]grpc.MethodConfig)
  5950  	m["/grpc.testing.TestService/EmptyCall"] = mc1
  5951  	m["/grpc.testing.TestService/"] = mc2
  5952  	sc := grpc.ServiceConfig{
  5953  		Methods: m,
  5954  	}
  5955  	ch <- sc
  5956  
  5957  	cc := te.clientConn()
  5958  	tc := testpb.NewTestServiceClient(cc)
  5959  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5960  	defer cancel()
  5961  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  5962  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  5963  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  5964  	}
  5965  
  5966  	m = make(map[string]grpc.MethodConfig)
  5967  	m["/grpc.testing.TestService/UnaryCall"] = mc1
  5968  	m["/grpc.testing.TestService/"] = mc2
  5969  	sc = grpc.ServiceConfig{
  5970  		Methods: m,
  5971  	}
  5972  	ch <- sc
  5973  	// Wait for the new service config to propagate.
  5974  	for {
  5975  		if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  5976  			break
  5977  		}
  5978  	}
  5979  	// The following RPCs are expected to become fail-fast.
  5980  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
  5981  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable)
  5982  	}
  5983  }
  5984  
  5985  func (s) TestServiceConfigWaitForReadyTD(t *testing.T) {
  5986  	for _, e := range listTestEnv() {
  5987  		testServiceConfigWaitForReadyTD(t, e)
  5988  	}
  5989  }
  5990  
  5991  func testServiceConfigWaitForReadyTD(t *testing.T, e env) {
  5992  	te, ch := testServiceConfigSetupTD(t, e)
  5993  	defer te.tearDown()
  5994  
  5995  	// Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds.
  5996  	mc := grpc.MethodConfig{
  5997  		WaitForReady: newBool(false),
  5998  		Timeout:      newDuration(time.Millisecond),
  5999  	}
  6000  	m := make(map[string]grpc.MethodConfig)
  6001  	m["/grpc.testing.TestService/EmptyCall"] = mc
  6002  	m["/grpc.testing.TestService/FullDuplexCall"] = mc
  6003  	sc := grpc.ServiceConfig{
  6004  		Methods: m,
  6005  	}
  6006  	ch <- sc
  6007  
  6008  	cc := te.clientConn()
  6009  	tc := testpb.NewTestServiceClient(cc)
  6010  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6011  	defer cancel()
  6012  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  6013  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  6014  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  6015  	}
  6016  	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  6017  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  6018  	}
  6019  
  6020  	// Generate a service config update.
  6021  	// Case2: Client API does not set failfast, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds.
  6022  	mc.WaitForReady = newBool(true)
  6023  	m = make(map[string]grpc.MethodConfig)
  6024  	m["/grpc.testing.TestService/EmptyCall"] = mc
  6025  	m["/grpc.testing.TestService/FullDuplexCall"] = mc
  6026  	sc = grpc.ServiceConfig{
  6027  		Methods: m,
  6028  	}
  6029  	ch <- sc
  6030  
  6031  	// Wait for the new service config to take effect.
  6032  	mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall")
  6033  	for {
  6034  		if !*mc.WaitForReady {
  6035  			time.Sleep(100 * time.Millisecond)
  6036  			mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall")
  6037  			continue
  6038  		}
  6039  		break
  6040  	}
  6041  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  6042  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  6043  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  6044  	}
  6045  	if _, err := tc.FullDuplexCall(ctx); status.Code(err) != codes.DeadlineExceeded {
  6046  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  6047  	}
  6048  }
  6049  
  6050  func (s) TestServiceConfigTimeoutTD(t *testing.T) {
  6051  	for _, e := range listTestEnv() {
  6052  		testServiceConfigTimeoutTD(t, e)
  6053  	}
  6054  }
  6055  
  6056  func testServiceConfigTimeoutTD(t *testing.T, e env) {
  6057  	te, ch := testServiceConfigSetupTD(t, e)
  6058  	defer te.tearDown()
  6059  
  6060  	// Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
  6061  	mc := grpc.MethodConfig{
  6062  		Timeout: newDuration(time.Hour),
  6063  	}
  6064  	m := make(map[string]grpc.MethodConfig)
  6065  	m["/grpc.testing.TestService/EmptyCall"] = mc
  6066  	m["/grpc.testing.TestService/FullDuplexCall"] = mc
  6067  	sc := grpc.ServiceConfig{
  6068  		Methods: m,
  6069  	}
  6070  	ch <- sc
  6071  
  6072  	cc := te.clientConn()
  6073  	tc := testpb.NewTestServiceClient(cc)
  6074  	// The following RPCs are expected to become non-fail-fast ones with 1ns deadline.
  6075  	ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
  6076  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  6077  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  6078  	}
  6079  	cancel()
  6080  	ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond)
  6081  	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  6082  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  6083  	}
  6084  	cancel()
  6085  
  6086  	// Generate a service config update.
  6087  	// Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
  6088  	mc.Timeout = newDuration(time.Nanosecond)
  6089  	m = make(map[string]grpc.MethodConfig)
  6090  	m["/grpc.testing.TestService/EmptyCall"] = mc
  6091  	m["/grpc.testing.TestService/FullDuplexCall"] = mc
  6092  	sc = grpc.ServiceConfig{
  6093  		Methods: m,
  6094  	}
  6095  	ch <- sc
  6096  
  6097  	// Wait for the new service config to take effect.
  6098  	mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall")
  6099  	for {
  6100  		if *mc.Timeout != time.Nanosecond {
  6101  			time.Sleep(100 * time.Millisecond)
  6102  			mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall")
  6103  			continue
  6104  		}
  6105  		break
  6106  	}
  6107  
  6108  	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
  6109  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  6110  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  6111  	}
  6112  	cancel()
  6113  
  6114  	ctx, cancel = context.WithTimeout(context.Background(), time.Hour)
  6115  	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  6116  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  6117  	}
  6118  	cancel()
  6119  }
  6120  
  6121  func (s) TestServiceConfigMaxMsgSizeTD(t *testing.T) {
  6122  	for _, e := range listTestEnv() {
  6123  		testServiceConfigMaxMsgSizeTD(t, e)
  6124  	}
  6125  }
  6126  
  6127  func testServiceConfigMaxMsgSizeTD(t *testing.T, e env) {
  6128  	// Setting up values and objects shared across all test cases.
  6129  	const smallSize = 1
  6130  	const largeSize = 1024
  6131  	const extraLargeSize = 2048
  6132  
  6133  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  6134  	if err != nil {
  6135  		t.Fatal(err)
  6136  	}
  6137  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  6138  	if err != nil {
  6139  		t.Fatal(err)
  6140  	}
  6141  	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
  6142  	if err != nil {
  6143  		t.Fatal(err)
  6144  	}
  6145  
  6146  	mc := grpc.MethodConfig{
  6147  		MaxReqSize:  newInt(extraLargeSize),
  6148  		MaxRespSize: newInt(extraLargeSize),
  6149  	}
  6150  
  6151  	m := make(map[string]grpc.MethodConfig)
  6152  	m["/grpc.testing.TestService/UnaryCall"] = mc
  6153  	m["/grpc.testing.TestService/FullDuplexCall"] = mc
  6154  	sc := grpc.ServiceConfig{
  6155  		Methods: m,
  6156  	}
  6157  	// Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  6158  	te1, ch1 := testServiceConfigSetupTD(t, e)
  6159  	te1.startServer(&testServer{security: e.security})
  6160  	defer te1.tearDown()
  6161  
  6162  	ch1 <- sc
  6163  	tc := testpb.NewTestServiceClient(te1.clientConn())
  6164  
  6165  	req := &testpb.SimpleRequest{
  6166  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  6167  		ResponseSize: int32(extraLargeSize),
  6168  		Payload:      smallPayload,
  6169  	}
  6170  
  6171  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6172  	defer cancel()
  6173  	// Test for unary RPC recv.
  6174  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  6175  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  6176  	}
  6177  
  6178  	// Test for unary RPC send.
  6179  	req.Payload = extraLargePayload
  6180  	req.ResponseSize = int32(smallSize)
  6181  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  6182  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  6183  	}
  6184  
  6185  	// Test for streaming RPC recv.
  6186  	respParam := []*testpb.ResponseParameters{
  6187  		{
  6188  			Size: int32(extraLargeSize),
  6189  		},
  6190  	}
  6191  	sreq := &testpb.StreamingOutputCallRequest{
  6192  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  6193  		ResponseParameters: respParam,
  6194  		Payload:            smallPayload,
  6195  	}
  6196  	stream, err := tc.FullDuplexCall(te1.ctx)
  6197  	if err != nil {
  6198  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6199  	}
  6200  	if err := stream.Send(sreq); err != nil {
  6201  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  6202  	}
  6203  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  6204  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  6205  	}
  6206  
  6207  	// Test for streaming RPC send.
  6208  	respParam[0].Size = int32(smallSize)
  6209  	sreq.Payload = extraLargePayload
  6210  	stream, err = tc.FullDuplexCall(te1.ctx)
  6211  	if err != nil {
  6212  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6213  	}
  6214  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  6215  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  6216  	}
  6217  
  6218  	// Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  6219  	te2, ch2 := testServiceConfigSetupTD(t, e)
  6220  	te2.maxClientReceiveMsgSize = newInt(1024)
  6221  	te2.maxClientSendMsgSize = newInt(1024)
  6222  	te2.startServer(&testServer{security: e.security})
  6223  	defer te2.tearDown()
  6224  	ch2 <- sc
  6225  	tc = testpb.NewTestServiceClient(te2.clientConn())
  6226  
  6227  	// Test for unary RPC recv.
  6228  	req.Payload = smallPayload
  6229  	req.ResponseSize = int32(largeSize)
  6230  
  6231  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  6232  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  6233  	}
  6234  
  6235  	// Test for unary RPC send.
  6236  	req.Payload = largePayload
  6237  	req.ResponseSize = int32(smallSize)
  6238  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  6239  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  6240  	}
  6241  
  6242  	// Test for streaming RPC recv.
  6243  	stream, err = tc.FullDuplexCall(te2.ctx)
  6244  	respParam[0].Size = int32(largeSize)
  6245  	sreq.Payload = smallPayload
  6246  	if err != nil {
  6247  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6248  	}
  6249  	if err := stream.Send(sreq); err != nil {
  6250  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  6251  	}
  6252  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  6253  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  6254  	}
  6255  
  6256  	// Test for streaming RPC send.
  6257  	respParam[0].Size = int32(smallSize)
  6258  	sreq.Payload = largePayload
  6259  	stream, err = tc.FullDuplexCall(te2.ctx)
  6260  	if err != nil {
  6261  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6262  	}
  6263  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  6264  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  6265  	}
  6266  
  6267  	// Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  6268  	te3, ch3 := testServiceConfigSetupTD(t, e)
  6269  	te3.maxClientReceiveMsgSize = newInt(4096)
  6270  	te3.maxClientSendMsgSize = newInt(4096)
  6271  	te3.startServer(&testServer{security: e.security})
  6272  	defer te3.tearDown()
  6273  	ch3 <- sc
  6274  	tc = testpb.NewTestServiceClient(te3.clientConn())
  6275  
  6276  	// Test for unary RPC recv.
  6277  	req.Payload = smallPayload
  6278  	req.ResponseSize = int32(largeSize)
  6279  
  6280  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  6281  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
  6282  	}
  6283  
  6284  	req.ResponseSize = int32(extraLargeSize)
  6285  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  6286  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  6287  	}
  6288  
  6289  	// Test for unary RPC send.
  6290  	req.Payload = largePayload
  6291  	req.ResponseSize = int32(smallSize)
  6292  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  6293  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
  6294  	}
  6295  
  6296  	req.Payload = extraLargePayload
  6297  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  6298  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  6299  	}
  6300  
  6301  	// Test for streaming RPC recv.
  6302  	stream, err = tc.FullDuplexCall(te3.ctx)
  6303  	if err != nil {
  6304  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6305  	}
  6306  	respParam[0].Size = int32(largeSize)
  6307  	sreq.Payload = smallPayload
  6308  
  6309  	if err := stream.Send(sreq); err != nil {
  6310  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  6311  	}
  6312  	if _, err := stream.Recv(); err != nil {
  6313  		t.Fatalf("%v.Recv() = _, %v, want <nil>", stream, err)
  6314  	}
  6315  
  6316  	respParam[0].Size = int32(extraLargeSize)
  6317  
  6318  	if err := stream.Send(sreq); err != nil {
  6319  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  6320  	}
  6321  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  6322  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  6323  	}
  6324  
  6325  	// Test for streaming RPC send.
  6326  	respParam[0].Size = int32(smallSize)
  6327  	sreq.Payload = largePayload
  6328  	stream, err = tc.FullDuplexCall(te3.ctx)
  6329  	if err != nil {
  6330  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6331  	}
  6332  	if err := stream.Send(sreq); err != nil {
  6333  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  6334  	}
  6335  	sreq.Payload = extraLargePayload
  6336  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  6337  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  6338  	}
  6339  }
  6340  
  6341  // TestMalformedStreamMethod starts a test server and sends an RPC with a
  6342  // malformed method name. The server should respond with an UNIMPLEMENTED status
  6343  // code in this case.
  6344  func (s) TestMalformedStreamMethod(t *testing.T) {
  6345  	const testMethod = "a-method-name-without-any-slashes"
  6346  	te := newTest(t, tcpClearRREnv)
  6347  	te.startServer(nil)
  6348  	defer te.tearDown()
  6349  
  6350  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6351  	defer cancel()
  6352  	err := te.clientConn().Invoke(ctx, testMethod, nil, nil)
  6353  	if gotCode := status.Code(err); gotCode != codes.Unimplemented {
  6354  		t.Fatalf("Invoke with method %q, got code %s, want %s", testMethod, gotCode, codes.Unimplemented)
  6355  	}
  6356  }
  6357  
  6358  func (s) TestMethodFromServerStream(t *testing.T) {
  6359  	const testMethod = "/package.service/method"
  6360  	e := tcpClearRREnv
  6361  	te := newTest(t, e)
  6362  	var method string
  6363  	var ok bool
  6364  	te.unknownHandler = func(srv interface{}, stream grpc.ServerStream) error {
  6365  		method, ok = grpc.MethodFromServerStream(stream)
  6366  		return nil
  6367  	}
  6368  
  6369  	te.startServer(nil)
  6370  	defer te.tearDown()
  6371  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6372  	defer cancel()
  6373  	_ = te.clientConn().Invoke(ctx, testMethod, nil, nil)
  6374  	if !ok || method != testMethod {
  6375  		t.Fatalf("Invoke with method %q, got %q, %v, want %q, true", testMethod, method, ok, testMethod)
  6376  	}
  6377  }
  6378  
  6379  func (s) TestInterceptorCanAccessCallOptions(t *testing.T) {
  6380  	e := tcpClearRREnv
  6381  	te := newTest(t, e)
  6382  	te.startServer(&testServer{security: e.security})
  6383  	defer te.tearDown()
  6384  
  6385  	type observedOptions struct {
  6386  		headers     []*metadata.MD
  6387  		trailers    []*metadata.MD
  6388  		peer        []*peer.Peer
  6389  		creds       []credentials.PerRPCCredentials
  6390  		failFast    []bool
  6391  		maxRecvSize []int
  6392  		maxSendSize []int
  6393  		compressor  []string
  6394  		subtype     []string
  6395  	}
  6396  	var observedOpts observedOptions
  6397  	populateOpts := func(opts []grpc.CallOption) {
  6398  		for _, o := range opts {
  6399  			switch o := o.(type) {
  6400  			case grpc.HeaderCallOption:
  6401  				observedOpts.headers = append(observedOpts.headers, o.HeaderAddr)
  6402  			case grpc.TrailerCallOption:
  6403  				observedOpts.trailers = append(observedOpts.trailers, o.TrailerAddr)
  6404  			case grpc.PeerCallOption:
  6405  				observedOpts.peer = append(observedOpts.peer, o.PeerAddr)
  6406  			case grpc.PerRPCCredsCallOption:
  6407  				observedOpts.creds = append(observedOpts.creds, o.Creds)
  6408  			case grpc.FailFastCallOption:
  6409  				observedOpts.failFast = append(observedOpts.failFast, o.FailFast)
  6410  			case grpc.MaxRecvMsgSizeCallOption:
  6411  				observedOpts.maxRecvSize = append(observedOpts.maxRecvSize, o.MaxRecvMsgSize)
  6412  			case grpc.MaxSendMsgSizeCallOption:
  6413  				observedOpts.maxSendSize = append(observedOpts.maxSendSize, o.MaxSendMsgSize)
  6414  			case grpc.CompressorCallOption:
  6415  				observedOpts.compressor = append(observedOpts.compressor, o.CompressorType)
  6416  			case grpc.ContentSubtypeCallOption:
  6417  				observedOpts.subtype = append(observedOpts.subtype, o.ContentSubtype)
  6418  			}
  6419  		}
  6420  	}
  6421  
  6422  	te.unaryClientInt = func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
  6423  		populateOpts(opts)
  6424  		return nil
  6425  	}
  6426  	te.streamClientInt = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
  6427  		populateOpts(opts)
  6428  		return nil, nil
  6429  	}
  6430  
  6431  	defaults := []grpc.CallOption{
  6432  		grpc.WaitForReady(true),
  6433  		grpc.MaxCallRecvMsgSize(1010),
  6434  	}
  6435  	tc := testpb.NewTestServiceClient(te.clientConn(grpc.WithDefaultCallOptions(defaults...)))
  6436  
  6437  	var headers metadata.MD
  6438  	var trailers metadata.MD
  6439  	var pr peer.Peer
  6440  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6441  	defer cancel()
  6442  	tc.UnaryCall(ctx, &testpb.SimpleRequest{},
  6443  		grpc.MaxCallRecvMsgSize(100),
  6444  		grpc.MaxCallSendMsgSize(200),
  6445  		grpc.PerRPCCredentials(testPerRPCCredentials{}),
  6446  		grpc.Header(&headers),
  6447  		grpc.Trailer(&trailers),
  6448  		grpc.Peer(&pr))
  6449  	expected := observedOptions{
  6450  		failFast:    []bool{false},
  6451  		maxRecvSize: []int{1010, 100},
  6452  		maxSendSize: []int{200},
  6453  		creds:       []credentials.PerRPCCredentials{testPerRPCCredentials{}},
  6454  		headers:     []*metadata.MD{&headers},
  6455  		trailers:    []*metadata.MD{&trailers},
  6456  		peer:        []*peer.Peer{&pr},
  6457  	}
  6458  
  6459  	if !reflect.DeepEqual(expected, observedOpts) {
  6460  		t.Errorf("unary call did not observe expected options: expected %#v, got %#v", expected, observedOpts)
  6461  	}
  6462  
  6463  	observedOpts = observedOptions{} // reset
  6464  
  6465  	tc.StreamingInputCall(ctx,
  6466  		grpc.WaitForReady(false),
  6467  		grpc.MaxCallSendMsgSize(2020),
  6468  		grpc.UseCompressor("comp-type"),
  6469  		grpc.CallContentSubtype("json"))
  6470  	expected = observedOptions{
  6471  		failFast:    []bool{false, true},
  6472  		maxRecvSize: []int{1010},
  6473  		maxSendSize: []int{2020},
  6474  		compressor:  []string{"comp-type"},
  6475  		subtype:     []string{"json"},
  6476  	}
  6477  
  6478  	if !reflect.DeepEqual(expected, observedOpts) {
  6479  		t.Errorf("streaming call did not observe expected options: expected %#v, got %#v", expected, observedOpts)
  6480  	}
  6481  }
  6482  
  6483  func (s) TestCompressorRegister(t *testing.T) {
  6484  	for _, e := range listTestEnv() {
  6485  		testCompressorRegister(t, e)
  6486  	}
  6487  }
  6488  
  6489  func testCompressorRegister(t *testing.T, e env) {
  6490  	te := newTest(t, e)
  6491  	te.clientCompression = false
  6492  	te.serverCompression = false
  6493  	te.clientUseCompression = true
  6494  
  6495  	te.startServer(&testServer{security: e.security})
  6496  	defer te.tearDown()
  6497  	tc := testpb.NewTestServiceClient(te.clientConn())
  6498  
  6499  	// Unary call
  6500  	const argSize = 271828
  6501  	const respSize = 314159
  6502  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  6503  	if err != nil {
  6504  		t.Fatal(err)
  6505  	}
  6506  	req := &testpb.SimpleRequest{
  6507  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  6508  		ResponseSize: respSize,
  6509  		Payload:      payload,
  6510  	}
  6511  	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something"))
  6512  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  6513  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
  6514  	}
  6515  	// Streaming RPC
  6516  	ctx, cancel := context.WithCancel(context.Background())
  6517  	defer cancel()
  6518  	stream, err := tc.FullDuplexCall(ctx)
  6519  	if err != nil {
  6520  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  6521  	}
  6522  	respParam := []*testpb.ResponseParameters{
  6523  		{
  6524  			Size: 31415,
  6525  		},
  6526  	}
  6527  	payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415))
  6528  	if err != nil {
  6529  		t.Fatal(err)
  6530  	}
  6531  	sreq := &testpb.StreamingOutputCallRequest{
  6532  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  6533  		ResponseParameters: respParam,
  6534  		Payload:            payload,
  6535  	}
  6536  	if err := stream.Send(sreq); err != nil {
  6537  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  6538  	}
  6539  	if _, err := stream.Recv(); err != nil {
  6540  		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  6541  	}
  6542  }
  6543  
  6544  func (s) TestServeExitsWhenListenerClosed(t *testing.T) {
  6545  	ss := &stubserver.StubServer{
  6546  		EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) {
  6547  			return &testpb.Empty{}, nil
  6548  		},
  6549  	}
  6550  
  6551  	s := grpc.NewServer()
  6552  	defer s.Stop()
  6553  	testpb.RegisterTestServiceServer(s, ss)
  6554  
  6555  	lis, err := net.Listen("tcp", "localhost:0")
  6556  	if err != nil {
  6557  		t.Fatalf("Failed to create listener: %v", err)
  6558  	}
  6559  
  6560  	done := make(chan struct{})
  6561  	go func() {
  6562  		s.Serve(lis)
  6563  		close(done)
  6564  	}()
  6565  
  6566  	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
  6567  	if err != nil {
  6568  		t.Fatalf("Failed to dial server: %v", err)
  6569  	}
  6570  	defer cc.Close()
  6571  	c := testpb.NewTestServiceClient(cc)
  6572  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  6573  	defer cancel()
  6574  	if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  6575  		t.Fatalf("Failed to send test RPC to server: %v", err)
  6576  	}
  6577  
  6578  	if err := lis.Close(); err != nil {
  6579  		t.Fatalf("Failed to close listener: %v", err)
  6580  	}
  6581  	const timeout = 5 * time.Second
  6582  	timer := time.NewTimer(timeout)
  6583  	select {
  6584  	case <-done:
  6585  		return
  6586  	case <-timer.C:
  6587  		t.Fatalf("Serve did not return after %v", timeout)
  6588  	}
  6589  }
  6590  
  6591  // Service handler returns status with invalid utf8 message.
  6592  func (s) TestStatusInvalidUTF8Message(t *testing.T) {
  6593  	var (
  6594  		origMsg = string([]byte{0xff, 0xfe, 0xfd})
  6595  		wantMsg = "���"
  6596  	)
  6597  
  6598  	ss := &stubserver.StubServer{
  6599  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  6600  			return nil, status.Errorf(codes.Internal, origMsg)
  6601  		},
  6602  	}
  6603  	if err := ss.Start(nil); err != nil {
  6604  		t.Fatalf("Error starting endpoint server: %v", err)
  6605  	}
  6606  	defer ss.Stop()
  6607  
  6608  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  6609  	defer cancel()
  6610  
  6611  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Message() != wantMsg {
  6612  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, status.Convert(err).Message(), wantMsg)
  6613  	}
  6614  }
  6615  
  6616  // Service handler returns status with details and invalid utf8 message. Proto
  6617  // will fail to marshal the status because of the invalid utf8 message. Details
  6618  // will be dropped when sending.
  6619  func (s) TestStatusInvalidUTF8Details(t *testing.T) {
  6620  	grpctest.TLogger.ExpectError("transport: failed to marshal rpc status")
  6621  
  6622  	var (
  6623  		origMsg = string([]byte{0xff, 0xfe, 0xfd})
  6624  		wantMsg = "���"
  6625  	)
  6626  
  6627  	ss := &stubserver.StubServer{
  6628  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  6629  			st := status.New(codes.Internal, origMsg)
  6630  			st, err := st.WithDetails(&testpb.Empty{})
  6631  			if err != nil {
  6632  				return nil, err
  6633  			}
  6634  			return nil, st.Err()
  6635  		},
  6636  	}
  6637  	if err := ss.Start(nil); err != nil {
  6638  		t.Fatalf("Error starting endpoint server: %v", err)
  6639  	}
  6640  	defer ss.Stop()
  6641  
  6642  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
  6643  	defer cancel()
  6644  
  6645  	_, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
  6646  	st := status.Convert(err)
  6647  	if st.Message() != wantMsg {
  6648  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, st.Message(), wantMsg)
  6649  	}
  6650  	if len(st.Details()) != 0 {
  6651  		// Details should be dropped on the server side.
  6652  		t.Fatalf("RPC status contain details: %v, want no details", st.Details())
  6653  	}
  6654  }
  6655  
  6656  func (s) TestClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T) {
  6657  	for _, e := range listTestEnv() {
  6658  		if e.httpHandler {
  6659  			continue
  6660  		}
  6661  		testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t, e)
  6662  	}
  6663  }
  6664  
  6665  func testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T, e env) {
  6666  	te := newTest(t, e)
  6667  	te.userAgent = testAppUA
  6668  	smallSize := 1024
  6669  	te.maxServerReceiveMsgSize = &smallSize
  6670  	te.startServer(&testServer{security: e.security})
  6671  	defer te.tearDown()
  6672  	tc := testpb.NewTestServiceClient(te.clientConn())
  6673  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1048576)
  6674  	if err != nil {
  6675  		t.Fatal(err)
  6676  	}
  6677  	req := &testpb.SimpleRequest{
  6678  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  6679  		Payload:      payload,
  6680  	}
  6681  	var wg sync.WaitGroup
  6682  	for i := 0; i < 10; i++ {
  6683  		wg.Add(1)
  6684  		go func() {
  6685  			defer wg.Done()
  6686  			for j := 0; j < 100; j++ {
  6687  				ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
  6688  				defer cancel()
  6689  				if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.ResourceExhausted {
  6690  					t.Errorf("TestService/UnaryCall(_,_) = _. %v, want code: %s", err, codes.ResourceExhausted)
  6691  					return
  6692  				}
  6693  			}
  6694  		}()
  6695  	}
  6696  	wg.Wait()
  6697  }
  6698  
  6699  func (s) TestRPCTimeout(t *testing.T) {
  6700  	for _, e := range listTestEnv() {
  6701  		testRPCTimeout(t, e)
  6702  	}
  6703  }
  6704  
  6705  func testRPCTimeout(t *testing.T, e env) {
  6706  	te := newTest(t, e)
  6707  	te.startServer(&testServer{security: e.security, unaryCallSleepTime: 500 * time.Millisecond})
  6708  	defer te.tearDown()
  6709  
  6710  	cc := te.clientConn()
  6711  	tc := testpb.NewTestServiceClient(cc)
  6712  
  6713  	const argSize = 2718
  6714  	const respSize = 314
  6715  
  6716  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  6717  	if err != nil {
  6718  		t.Fatal(err)
  6719  	}
  6720  
  6721  	req := &testpb.SimpleRequest{
  6722  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  6723  		ResponseSize: respSize,
  6724  		Payload:      payload,
  6725  	}
  6726  	for i := -1; i <= 10; i++ {
  6727  		ctx, cancel := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond)
  6728  		if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.DeadlineExceeded {
  6729  			t.Fatalf("TestService/UnaryCallv(_, _) = _, %v; want <nil>, error code: %s", err, codes.DeadlineExceeded)
  6730  		}
  6731  		cancel()
  6732  	}
  6733  }
  6734  
  6735  func (s) TestDisabledIOBuffers(t *testing.T) {
  6736  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(60000))
  6737  	if err != nil {
  6738  		t.Fatalf("Failed to create payload: %v", err)
  6739  	}
  6740  	req := &testpb.StreamingOutputCallRequest{
  6741  		Payload: payload,
  6742  	}
  6743  	resp := &testpb.StreamingOutputCallResponse{
  6744  		Payload: payload,
  6745  	}
  6746  
  6747  	ss := &stubserver.StubServer{
  6748  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  6749  			for {
  6750  				in, err := stream.Recv()
  6751  				if err == io.EOF {
  6752  					return nil
  6753  				}
  6754  				if err != nil {
  6755  					t.Errorf("stream.Recv() = _, %v, want _, <nil>", err)
  6756  					return err
  6757  				}
  6758  				if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
  6759  					t.Errorf("Received message(len: %v) on server not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
  6760  					return err
  6761  				}
  6762  				if err := stream.Send(resp); err != nil {
  6763  					t.Errorf("stream.Send(_)= %v, want <nil>", err)
  6764  					return err
  6765  				}
  6766  
  6767  			}
  6768  		},
  6769  	}
  6770  
  6771  	s := grpc.NewServer(grpc.WriteBufferSize(0), grpc.ReadBufferSize(0))
  6772  	testpb.RegisterTestServiceServer(s, ss)
  6773  
  6774  	lis, err := net.Listen("tcp", "localhost:0")
  6775  	if err != nil {
  6776  		t.Fatalf("Failed to create listener: %v", err)
  6777  	}
  6778  
  6779  	go func() {
  6780  		s.Serve(lis)
  6781  	}()
  6782  	defer s.Stop()
  6783  	dctx, dcancel := context.WithTimeout(context.Background(), 5*time.Second)
  6784  	defer dcancel()
  6785  	cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithInsecure(), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0))
  6786  	if err != nil {
  6787  		t.Fatalf("Failed to dial server")
  6788  	}
  6789  	defer cc.Close()
  6790  	c := testpb.NewTestServiceClient(cc)
  6791  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  6792  	defer cancel()
  6793  	stream, err := c.FullDuplexCall(ctx, grpc.WaitForReady(true))
  6794  	if err != nil {
  6795  		t.Fatalf("Failed to send test RPC to server")
  6796  	}
  6797  	for i := 0; i < 10; i++ {
  6798  		if err := stream.Send(req); err != nil {
  6799  			t.Fatalf("stream.Send(_) = %v, want <nil>", err)
  6800  		}
  6801  		in, err := stream.Recv()
  6802  		if err != nil {
  6803  			t.Fatalf("stream.Recv() = _, %v, want _, <nil>", err)
  6804  		}
  6805  		if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
  6806  			t.Fatalf("Received message(len: %v) on client not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
  6807  		}
  6808  	}
  6809  	stream.CloseSend()
  6810  	if _, err := stream.Recv(); err != io.EOF {
  6811  		t.Fatalf("stream.Recv() = _, %v, want _, io.EOF", err)
  6812  	}
  6813  }
  6814  
  6815  func (s) TestServerMaxHeaderListSizeClientUserViolation(t *testing.T) {
  6816  	for _, e := range listTestEnv() {
  6817  		if e.httpHandler {
  6818  			continue
  6819  		}
  6820  		testServerMaxHeaderListSizeClientUserViolation(t, e)
  6821  	}
  6822  }
  6823  
  6824  func testServerMaxHeaderListSizeClientUserViolation(t *testing.T, e env) {
  6825  	te := newTest(t, e)
  6826  	te.maxServerHeaderListSize = new(uint32)
  6827  	*te.maxServerHeaderListSize = 216
  6828  	te.startServer(&testServer{security: e.security})
  6829  	defer te.tearDown()
  6830  
  6831  	cc := te.clientConn()
  6832  	tc := testpb.NewTestServiceClient(cc)
  6833  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  6834  	defer cancel()
  6835  	metadata.AppendToOutgoingContext(ctx, "oversize", string(make([]byte, 216)))
  6836  	var err error
  6837  	if err = verifyResultWithDelay(func() (bool, error) {
  6838  		if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
  6839  			return true, nil
  6840  		}
  6841  		return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
  6842  	}); err != nil {
  6843  		t.Fatal(err)
  6844  	}
  6845  }
  6846  
  6847  func (s) TestClientMaxHeaderListSizeServerUserViolation(t *testing.T) {
  6848  	for _, e := range listTestEnv() {
  6849  		if e.httpHandler {
  6850  			continue
  6851  		}
  6852  		testClientMaxHeaderListSizeServerUserViolation(t, e)
  6853  	}
  6854  }
  6855  
  6856  func testClientMaxHeaderListSizeServerUserViolation(t *testing.T, e env) {
  6857  	te := newTest(t, e)
  6858  	te.maxClientHeaderListSize = new(uint32)
  6859  	*te.maxClientHeaderListSize = 1 // any header server sends will violate
  6860  	te.startServer(&testServer{security: e.security})
  6861  	defer te.tearDown()
  6862  
  6863  	cc := te.clientConn()
  6864  	tc := testpb.NewTestServiceClient(cc)
  6865  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  6866  	defer cancel()
  6867  	var err error
  6868  	if err = verifyResultWithDelay(func() (bool, error) {
  6869  		if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
  6870  			return true, nil
  6871  		}
  6872  		return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
  6873  	}); err != nil {
  6874  		t.Fatal(err)
  6875  	}
  6876  }
  6877  
  6878  func (s) TestServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T) {
  6879  	for _, e := range listTestEnv() {
  6880  		if e.httpHandler || e.security == "tls" {
  6881  			continue
  6882  		}
  6883  		testServerMaxHeaderListSizeClientIntentionalViolation(t, e)
  6884  	}
  6885  }
  6886  
  6887  func testServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T, e env) {
  6888  	te := newTest(t, e)
  6889  	te.maxServerHeaderListSize = new(uint32)
  6890  	*te.maxServerHeaderListSize = 512
  6891  	te.startServer(&testServer{security: e.security})
  6892  	defer te.tearDown()
  6893  
  6894  	cc, dw := te.clientConnWithConnControl()
  6895  	tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)}
  6896  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  6897  	defer cancel()
  6898  	stream, err := tc.FullDuplexCall(ctx)
  6899  	if err != nil {
  6900  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
  6901  	}
  6902  	rcw := dw.getRawConnWrapper()
  6903  	val := make([]string, 512)
  6904  	for i := range val {
  6905  		val[i] = "a"
  6906  	}
  6907  	// allow for client to send the initial header
  6908  	time.Sleep(100 * time.Millisecond)
  6909  	rcw.writeHeaders(http2.HeadersFrameParam{
  6910  		StreamID:      tc.getCurrentStreamID(),
  6911  		BlockFragment: rcw.encodeHeader("oversize", strings.Join(val, "")),
  6912  		EndStream:     false,
  6913  		EndHeaders:    true,
  6914  	})
  6915  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
  6916  		t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
  6917  	}
  6918  }
  6919  
  6920  func (s) TestClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T) {
  6921  	for _, e := range listTestEnv() {
  6922  		if e.httpHandler || e.security == "tls" {
  6923  			continue
  6924  		}
  6925  		testClientMaxHeaderListSizeServerIntentionalViolation(t, e)
  6926  	}
  6927  }
  6928  
  6929  func testClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T, e env) {
  6930  	te := newTest(t, e)
  6931  	te.maxClientHeaderListSize = new(uint32)
  6932  	*te.maxClientHeaderListSize = 200
  6933  	lw := te.startServerWithConnControl(&testServer{security: e.security, setHeaderOnly: true})
  6934  	defer te.tearDown()
  6935  	cc, _ := te.clientConnWithConnControl()
  6936  	tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)}
  6937  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  6938  	defer cancel()
  6939  	stream, err := tc.FullDuplexCall(ctx)
  6940  	if err != nil {
  6941  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
  6942  	}
  6943  	var i int
  6944  	var rcw *rawConnWrapper
  6945  	for i = 0; i < 100; i++ {
  6946  		rcw = lw.getLastConn()
  6947  		if rcw != nil {
  6948  			break
  6949  		}
  6950  		time.Sleep(10 * time.Millisecond)
  6951  		continue
  6952  	}
  6953  	if i == 100 {
  6954  		t.Fatalf("failed to create server transport after 1s")
  6955  	}
  6956  
  6957  	val := make([]string, 200)
  6958  	for i := range val {
  6959  		val[i] = "a"
  6960  	}
  6961  	// allow for client to send the initial header.
  6962  	time.Sleep(100 * time.Millisecond)
  6963  	rcw.writeHeaders(http2.HeadersFrameParam{
  6964  		StreamID:      tc.getCurrentStreamID(),
  6965  		BlockFragment: rcw.encodeRawHeader("oversize", strings.Join(val, "")),
  6966  		EndStream:     false,
  6967  		EndHeaders:    true,
  6968  	})
  6969  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
  6970  		t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
  6971  	}
  6972  }
  6973  
  6974  func (s) TestNetPipeConn(t *testing.T) {
  6975  	// This test will block indefinitely if grpc writes both client and server
  6976  	// prefaces without either reading from the Conn.
  6977  	pl := testutils.NewPipeListener()
  6978  	s := grpc.NewServer()
  6979  	defer s.Stop()
  6980  	ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  6981  		return &testpb.SimpleResponse{}, nil
  6982  	}}
  6983  	testpb.RegisterTestServiceServer(s, ts)
  6984  	go s.Serve(pl)
  6985  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
  6986  	defer cancel()
  6987  	cc, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithDialer(pl.Dialer()))
  6988  	if err != nil {
  6989  		t.Fatalf("Error creating client: %v", err)
  6990  	}
  6991  	defer cc.Close()
  6992  	client := testpb.NewTestServiceClient(cc)
  6993  	if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  6994  		t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
  6995  	}
  6996  }
  6997  
  6998  func (s) TestLargeTimeout(t *testing.T) {
  6999  	for _, e := range listTestEnv() {
  7000  		testLargeTimeout(t, e)
  7001  	}
  7002  }
  7003  
  7004  func testLargeTimeout(t *testing.T, e env) {
  7005  	te := newTest(t, e)
  7006  	te.declareLogNoise("Server.processUnaryRPC failed to write status")
  7007  
  7008  	ts := &funcServer{}
  7009  	te.startServer(ts)
  7010  	defer te.tearDown()
  7011  	tc := testpb.NewTestServiceClient(te.clientConn())
  7012  
  7013  	timeouts := []time.Duration{
  7014  		time.Duration(math.MaxInt64), // will be (correctly) converted to
  7015  		// 2562048 hours, which overflows upon converting back to an int64
  7016  		2562047 * time.Hour, // the largest timeout that does not overflow
  7017  	}
  7018  
  7019  	for i, maxTimeout := range timeouts {
  7020  		ts.unaryCall = func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  7021  			deadline, ok := ctx.Deadline()
  7022  			timeout := time.Until(deadline)
  7023  			minTimeout := maxTimeout - 5*time.Second
  7024  			if !ok || timeout < minTimeout || timeout > maxTimeout {
  7025  				t.Errorf("ctx.Deadline() = (now+%v), %v; want [%v, %v], true", timeout, ok, minTimeout, maxTimeout)
  7026  				return nil, status.Error(codes.OutOfRange, "deadline error")
  7027  			}
  7028  			return &testpb.SimpleResponse{}, nil
  7029  		}
  7030  
  7031  		ctx, cancel := context.WithTimeout(context.Background(), maxTimeout)
  7032  		defer cancel()
  7033  
  7034  		if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  7035  			t.Errorf("case %v: UnaryCall(_) = _, %v; want _, nil", i, err)
  7036  		}
  7037  	}
  7038  }
  7039  
  7040  // Proxies typically send GO_AWAY followed by connection closure a minute or so later. This
  7041  // test ensures that the connection is re-created after GO_AWAY and not affected by the
  7042  // subsequent (old) connection closure.
  7043  func (s) TestGoAwayThenClose(t *testing.T) {
  7044  	ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
  7045  	defer cancel()
  7046  
  7047  	lis1, err := net.Listen("tcp", "localhost:0")
  7048  	if err != nil {
  7049  		t.Fatalf("Error while listening. Err: %v", err)
  7050  	}
  7051  	s1 := grpc.NewServer()
  7052  	defer s1.Stop()
  7053  	ts := &funcServer{
  7054  		unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  7055  			return &testpb.SimpleResponse{}, nil
  7056  		},
  7057  		fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error {
  7058  			if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil {
  7059  				t.Errorf("unexpected error from send: %v", err)
  7060  				return err
  7061  			}
  7062  			// Wait forever.
  7063  			_, err := stream.Recv()
  7064  			if err == nil {
  7065  				t.Error("expected to never receive any message")
  7066  			}
  7067  			return err
  7068  		},
  7069  	}
  7070  	testpb.RegisterTestServiceServer(s1, ts)
  7071  	go s1.Serve(lis1)
  7072  
  7073  	conn2Established := grpcsync.NewEvent()
  7074  	lis2, err := listenWithNotifyingListener("tcp", "localhost:0", conn2Established)
  7075  	if err != nil {
  7076  		t.Fatalf("Error while listening. Err: %v", err)
  7077  	}
  7078  	s2 := grpc.NewServer()
  7079  	defer s2.Stop()
  7080  	testpb.RegisterTestServiceServer(s2, ts)
  7081  
  7082  	r := manual.NewBuilderWithScheme("whatever")
  7083  	r.InitialState(resolver.State{Addresses: []resolver.Address{
  7084  		{Addr: lis1.Addr().String()},
  7085  		{Addr: lis2.Addr().String()},
  7086  	}})
  7087  	cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithInsecure())
  7088  	if err != nil {
  7089  		t.Fatalf("Error creating client: %v", err)
  7090  	}
  7091  	defer cc.Close()
  7092  
  7093  	client := testpb.NewTestServiceClient(cc)
  7094  
  7095  	// We make a streaming RPC and do an one-message-round-trip to make sure
  7096  	// it's created on connection 1.
  7097  	//
  7098  	// We use a long-lived RPC because it will cause GracefulStop to send
  7099  	// GO_AWAY, but the connection doesn't get closed until the server stops and
  7100  	// the client receives the error.
  7101  	stream, err := client.FullDuplexCall(ctx)
  7102  	if err != nil {
  7103  		t.Fatalf("FullDuplexCall(_) = _, %v; want _, nil", err)
  7104  	}
  7105  	if _, err = stream.Recv(); err != nil {
  7106  		t.Fatalf("unexpected error from first recv: %v", err)
  7107  	}
  7108  
  7109  	go s2.Serve(lis2)
  7110  
  7111  	// Send GO_AWAY to connection 1.
  7112  	go s1.GracefulStop()
  7113  
  7114  	// Wait for the ClientConn to enter IDLE state.
  7115  	state := cc.GetState()
  7116  	for ; state != connectivity.Idle && cc.WaitForStateChange(ctx, state); state = cc.GetState() {
  7117  	}
  7118  	if state != connectivity.Idle {
  7119  		t.Fatalf("timed out waiting for IDLE channel state; last state = %v", state)
  7120  	}
  7121  
  7122  	// Initiate another RPC to create another connection.
  7123  	if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  7124  		t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
  7125  	}
  7126  
  7127  	// Assert that connection 2 has been established.
  7128  	<-conn2Established.Done()
  7129  
  7130  	// Close the listener for server2 to prevent it from allowing new connections.
  7131  	lis2.Close()
  7132  
  7133  	// Close connection 1.
  7134  	s1.Stop()
  7135  
  7136  	// Wait for client to close.
  7137  	if _, err = stream.Recv(); err == nil {
  7138  		t.Fatal("expected the stream to die, but got a successful Recv")
  7139  	}
  7140  
  7141  	// Do a bunch of RPCs, make sure it stays stable. These should go to connection 2.
  7142  	for i := 0; i < 10; i++ {
  7143  		if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  7144  			t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
  7145  		}
  7146  	}
  7147  }
  7148  
  7149  func listenWithNotifyingListener(network, address string, event *grpcsync.Event) (net.Listener, error) {
  7150  	lis, err := net.Listen(network, address)
  7151  	if err != nil {
  7152  		return nil, err
  7153  	}
  7154  	return notifyingListener{connEstablished: event, Listener: lis}, nil
  7155  }
  7156  
  7157  type notifyingListener struct {
  7158  	connEstablished *grpcsync.Event
  7159  	net.Listener
  7160  }
  7161  
  7162  func (lis notifyingListener) Accept() (net.Conn, error) {
  7163  	defer lis.connEstablished.Fire()
  7164  	return lis.Listener.Accept()
  7165  }
  7166  
  7167  func (s) TestRPCWaitsForResolver(t *testing.T) {
  7168  	te := testServiceConfigSetup(t, tcpClearRREnv)
  7169  	te.startServer(&testServer{security: tcpClearRREnv.security})
  7170  	defer te.tearDown()
  7171  	r := manual.NewBuilderWithScheme("whatever")
  7172  
  7173  	te.resolverScheme = r.Scheme()
  7174  	cc := te.clientConn(grpc.WithResolvers(r))
  7175  	tc := testpb.NewTestServiceClient(cc)
  7176  
  7177  	ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
  7178  	defer cancel()
  7179  	// With no resolved addresses yet, this will timeout.
  7180  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  7181  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  7182  	}
  7183  
  7184  	ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
  7185  	defer cancel()
  7186  	go func() {
  7187  		time.Sleep(time.Second)
  7188  		r.UpdateState(resolver.State{
  7189  			Addresses: []resolver.Address{{Addr: te.srvAddr}},
  7190  			ServiceConfig: parseCfg(r, `{
  7191  		    "methodConfig": [
  7192  		        {
  7193  		            "name": [
  7194  		                {
  7195  		                    "service": "grpc.testing.TestService",
  7196  		                    "method": "UnaryCall"
  7197  		                }
  7198  		            ],
  7199                      "maxRequestMessageBytes": 0
  7200  		        }
  7201  		    ]
  7202  		}`)})
  7203  	}()
  7204  	// We wait a second before providing a service config and resolving
  7205  	// addresses.  So this will wait for that and then honor the
  7206  	// maxRequestMessageBytes it contains.
  7207  	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{ResponseType: testpb.PayloadType_UNCOMPRESSABLE}); status.Code(err) != codes.ResourceExhausted {
  7208  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err)
  7209  	}
  7210  	if got := ctx.Err(); got != nil {
  7211  		t.Fatalf("ctx.Err() = %v; want nil (deadline should be set short by service config)", got)
  7212  	}
  7213  	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  7214  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err)
  7215  	}
  7216  }
  7217  
  7218  func (s) TestHTTPHeaderFrameErrorHandlingHTTPMode(t *testing.T) {
  7219  	// Non-gRPC content-type fallback path.
  7220  	for httpCode := range transport.HTTPStatusConvTab {
  7221  		doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{
  7222  			":status", fmt.Sprintf("%d", httpCode),
  7223  			"content-type", "text/html", // non-gRPC content type to switch to HTTP mode.
  7224  			"grpc-status", "1", // Make up a gRPC status error
  7225  			"grpc-status-details-bin", "???", // Make up a gRPC field parsing error
  7226  		})
  7227  	}
  7228  
  7229  	// Missing content-type fallback path.
  7230  	for httpCode := range transport.HTTPStatusConvTab {
  7231  		doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{
  7232  			":status", fmt.Sprintf("%d", httpCode),
  7233  			// Omitting content type to switch to HTTP mode.
  7234  			"grpc-status", "1", // Make up a gRPC status error
  7235  			"grpc-status-details-bin", "???", // Make up a gRPC field parsing error
  7236  		})
  7237  	}
  7238  
  7239  	// Malformed HTTP status when fallback.
  7240  	doHTTPHeaderTest(t, codes.Internal, []string{
  7241  		":status", "abc",
  7242  		// Omitting content type to switch to HTTP mode.
  7243  		"grpc-status", "1", // Make up a gRPC status error
  7244  		"grpc-status-details-bin", "???", // Make up a gRPC field parsing error
  7245  	})
  7246  }
  7247  
  7248  // Testing erroneous ResponseHeader or Trailers-only (delivered in the first HEADERS frame).
  7249  func (s) TestHTTPHeaderFrameErrorHandlingInitialHeader(t *testing.T) {
  7250  	for _, test := range []struct {
  7251  		header  []string
  7252  		errCode codes.Code
  7253  	}{
  7254  		{
  7255  			// missing gRPC status.
  7256  			header: []string{
  7257  				":status", "403",
  7258  				"content-type", "application/grpc",
  7259  			},
  7260  			errCode: codes.PermissionDenied,
  7261  		},
  7262  		{
  7263  			// malformed grpc-status.
  7264  			header: []string{
  7265  				":status", "502",
  7266  				"content-type", "application/grpc",
  7267  				"grpc-status", "abc",
  7268  			},
  7269  			errCode: codes.Internal,
  7270  		},
  7271  		{
  7272  			// Malformed grpc-tags-bin field.
  7273  			header: []string{
  7274  				":status", "502",
  7275  				"content-type", "application/grpc",
  7276  				"grpc-status", "0",
  7277  				"grpc-tags-bin", "???",
  7278  			},
  7279  			errCode: codes.Unavailable,
  7280  		},
  7281  		{
  7282  			// gRPC status error.
  7283  			header: []string{
  7284  				":status", "502",
  7285  				"content-type", "application/grpc",
  7286  				"grpc-status", "3",
  7287  			},
  7288  			errCode: codes.Unavailable,
  7289  		},
  7290  	} {
  7291  		doHTTPHeaderTest(t, test.errCode, test.header)
  7292  	}
  7293  }
  7294  
  7295  // Testing non-Trailers-only Trailers (delivered in second HEADERS frame)
  7296  func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) {
  7297  	for _, test := range []struct {
  7298  		responseHeader []string
  7299  		trailer        []string
  7300  		errCode        codes.Code
  7301  	}{
  7302  		{
  7303  			responseHeader: []string{
  7304  				":status", "200",
  7305  				"content-type", "application/grpc",
  7306  			},
  7307  			trailer: []string{
  7308  				// trailer missing grpc-status
  7309  				":status", "502",
  7310  			},
  7311  			errCode: codes.Unavailable,
  7312  		},
  7313  		{
  7314  			responseHeader: []string{
  7315  				":status", "404",
  7316  				"content-type", "application/grpc",
  7317  			},
  7318  			trailer: []string{
  7319  				// malformed grpc-status-details-bin field
  7320  				"grpc-status", "0",
  7321  				"grpc-status-details-bin", "????",
  7322  			},
  7323  			errCode: codes.Unimplemented,
  7324  		},
  7325  		{
  7326  			responseHeader: []string{
  7327  				":status", "200",
  7328  				"content-type", "application/grpc",
  7329  			},
  7330  			trailer: []string{
  7331  				// malformed grpc-status-details-bin field
  7332  				"grpc-status", "0",
  7333  				"grpc-status-details-bin", "????",
  7334  			},
  7335  			errCode: codes.Internal,
  7336  		},
  7337  	} {
  7338  		doHTTPHeaderTest(t, test.errCode, test.responseHeader, test.trailer)
  7339  	}
  7340  }
  7341  
  7342  func (s) TestHTTPHeaderFrameErrorHandlingMoreThanTwoHeaders(t *testing.T) {
  7343  	header := []string{
  7344  		":status", "200",
  7345  		"content-type", "application/grpc",
  7346  	}
  7347  	doHTTPHeaderTest(t, codes.Internal, header, header, header)
  7348  }
  7349  
  7350  type httpServerResponse struct {
  7351  	headers  [][]string
  7352  	payload  []byte
  7353  	trailers [][]string
  7354  }
  7355  
  7356  type httpServer struct {
  7357  	// If waitForEndStream is set, wait for the client to send a frame with end
  7358  	// stream in it before sending a response/refused stream.
  7359  	waitForEndStream bool
  7360  	refuseStream     func(uint32) bool
  7361  	responses        []httpServerResponse
  7362  }
  7363  
  7364  func (s *httpServer) writeHeader(framer *http2.Framer, sid uint32, headerFields []string, endStream bool) error {
  7365  	if len(headerFields)%2 == 1 {
  7366  		panic("odd number of kv args")
  7367  	}
  7368  
  7369  	var buf bytes.Buffer
  7370  	henc := hpack.NewEncoder(&buf)
  7371  	for len(headerFields) > 0 {
  7372  		k, v := headerFields[0], headerFields[1]
  7373  		headerFields = headerFields[2:]
  7374  		henc.WriteField(hpack.HeaderField{Name: k, Value: v})
  7375  	}
  7376  
  7377  	return framer.WriteHeaders(http2.HeadersFrameParam{
  7378  		StreamID:      sid,
  7379  		BlockFragment: buf.Bytes(),
  7380  		EndStream:     endStream,
  7381  		EndHeaders:    true,
  7382  	})
  7383  }
  7384  
  7385  func (s *httpServer) writePayload(framer *http2.Framer, sid uint32, payload []byte) error {
  7386  	return framer.WriteData(sid, false, payload)
  7387  }
  7388  
  7389  func (s *httpServer) start(t *testing.T, lis net.Listener) {
  7390  	// Launch an HTTP server to send back header.
  7391  	go func() {
  7392  		conn, err := lis.Accept()
  7393  		if err != nil {
  7394  			t.Errorf("Error accepting connection: %v", err)
  7395  			return
  7396  		}
  7397  		defer conn.Close()
  7398  		// Read preface sent by client.
  7399  		if _, err = io.ReadFull(conn, make([]byte, len(http2.ClientPreface))); err != nil {
  7400  			t.Errorf("Error at server-side while reading preface from client. Err: %v", err)
  7401  			return
  7402  		}
  7403  		reader := bufio.NewReader(conn)
  7404  		writer := bufio.NewWriter(conn)
  7405  		framer := http2.NewFramer(writer, reader)
  7406  		if err = framer.WriteSettingsAck(); err != nil {
  7407  			t.Errorf("Error at server-side while sending Settings ack. Err: %v", err)
  7408  			return
  7409  		}
  7410  		writer.Flush() // necessary since client is expecting preface before declaring connection fully setup.
  7411  
  7412  		var sid uint32
  7413  		// Loop until conn is closed and framer returns io.EOF
  7414  		for requestNum := 0; ; requestNum = (requestNum + 1) % len(s.responses) {
  7415  			// Read frames until a header is received.
  7416  			for {
  7417  				frame, err := framer.ReadFrame()
  7418  				if err != nil {
  7419  					if err != io.EOF {
  7420  						t.Errorf("Error at server-side while reading frame. Err: %v", err)
  7421  					}
  7422  					return
  7423  				}
  7424  				sid = 0
  7425  				switch fr := frame.(type) {
  7426  				case *http2.HeadersFrame:
  7427  					// Respond after this if we are not waiting for an end
  7428  					// stream or if this frame ends it.
  7429  					if !s.waitForEndStream || fr.StreamEnded() {
  7430  						sid = fr.Header().StreamID
  7431  					}
  7432  
  7433  				case *http2.DataFrame:
  7434  					// Respond after this if we were waiting for an end stream
  7435  					// and this frame ends it.  (If we were not waiting for an
  7436  					// end stream, this stream was already responded to when
  7437  					// the headers were received.)
  7438  					if s.waitForEndStream && fr.StreamEnded() {
  7439  						sid = fr.Header().StreamID
  7440  					}
  7441  				}
  7442  				if sid != 0 {
  7443  					if s.refuseStream == nil || !s.refuseStream(sid) {
  7444  						break
  7445  					}
  7446  					framer.WriteRSTStream(sid, http2.ErrCodeRefusedStream)
  7447  					writer.Flush()
  7448  				}
  7449  			}
  7450  
  7451  			response := s.responses[requestNum]
  7452  			for _, header := range response.headers {
  7453  				if err = s.writeHeader(framer, sid, header, false); err != nil {
  7454  					t.Errorf("Error at server-side while writing headers. Err: %v", err)
  7455  					return
  7456  				}
  7457  				writer.Flush()
  7458  			}
  7459  			if response.payload != nil {
  7460  				if err = s.writePayload(framer, sid, response.payload); err != nil {
  7461  					t.Errorf("Error at server-side while writing payload. Err: %v", err)
  7462  					return
  7463  				}
  7464  				writer.Flush()
  7465  			}
  7466  			for i, trailer := range response.trailers {
  7467  				if err = s.writeHeader(framer, sid, trailer, i == len(response.trailers)-1); err != nil {
  7468  					t.Errorf("Error at server-side while writing trailers. Err: %v", err)
  7469  					return
  7470  				}
  7471  				writer.Flush()
  7472  			}
  7473  		}
  7474  	}()
  7475  }
  7476  
  7477  func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string) {
  7478  	t.Helper()
  7479  	lis, err := net.Listen("tcp", "localhost:0")
  7480  	if err != nil {
  7481  		t.Fatalf("Failed to listen. Err: %v", err)
  7482  	}
  7483  	defer lis.Close()
  7484  	server := &httpServer{
  7485  		responses: []httpServerResponse{{trailers: headerFields}},
  7486  	}
  7487  	server.start(t, lis)
  7488  	cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
  7489  	if err != nil {
  7490  		t.Fatalf("failed to dial due to err: %v", err)
  7491  	}
  7492  	defer cc.Close()
  7493  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  7494  	defer cancel()
  7495  	client := testpb.NewTestServiceClient(cc)
  7496  	stream, err := client.FullDuplexCall(ctx)
  7497  	if err != nil {
  7498  		t.Fatalf("error creating stream due to err: %v", err)
  7499  	}
  7500  	if _, err := stream.Recv(); err == nil || status.Code(err) != errCode {
  7501  		t.Fatalf("stream.Recv() = _, %v, want error code: %v", err, errCode)
  7502  	}
  7503  }
  7504  
  7505  func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult {
  7506  	g := r.CC.ParseServiceConfig(s)
  7507  	if g.Err != nil {
  7508  		panic(fmt.Sprintf("Error parsing config %q: %v", s, g.Err))
  7509  	}
  7510  	return g
  7511  }
  7512  
  7513  func (s) TestClientCancellationPropagatesUnary(t *testing.T) {
  7514  	wg := &sync.WaitGroup{}
  7515  	called, done := make(chan struct{}), make(chan struct{})
  7516  	ss := &stubserver.StubServer{
  7517  		EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) {
  7518  			close(called)
  7519  			<-ctx.Done()
  7520  			err := ctx.Err()
  7521  			if err != context.Canceled {
  7522  				t.Errorf("ctx.Err() = %v; want context.Canceled", err)
  7523  			}
  7524  			close(done)
  7525  			return nil, err
  7526  		},
  7527  	}
  7528  	if err := ss.Start(nil); err != nil {
  7529  		t.Fatalf("Error starting endpoint server: %v", err)
  7530  	}
  7531  	defer ss.Stop()
  7532  
  7533  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  7534  
  7535  	wg.Add(1)
  7536  	go func() {
  7537  		if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Canceled {
  7538  			t.Errorf("ss.Client.EmptyCall() = _, %v; want _, Code()=codes.Canceled", err)
  7539  		}
  7540  		wg.Done()
  7541  	}()
  7542  
  7543  	select {
  7544  	case <-called:
  7545  	case <-time.After(5 * time.Second):
  7546  		t.Fatalf("failed to perform EmptyCall after 10s")
  7547  	}
  7548  	cancel()
  7549  	select {
  7550  	case <-done:
  7551  	case <-time.After(5 * time.Second):
  7552  		t.Fatalf("server failed to close done chan due to cancellation propagation")
  7553  	}
  7554  	wg.Wait()
  7555  }
  7556  
  7557  type badGzipCompressor struct{}
  7558  
  7559  func (badGzipCompressor) Do(w io.Writer, p []byte) error {
  7560  	buf := &bytes.Buffer{}
  7561  	gzw := gzip.NewWriter(buf)
  7562  	if _, err := gzw.Write(p); err != nil {
  7563  		return err
  7564  	}
  7565  	err := gzw.Close()
  7566  	bs := buf.Bytes()
  7567  	if len(bs) >= 6 {
  7568  		bs[len(bs)-6] ^= 1 // modify checksum at end by 1 byte
  7569  	}
  7570  	w.Write(bs)
  7571  	return err
  7572  }
  7573  
  7574  func (badGzipCompressor) Type() string {
  7575  	return "gzip"
  7576  }
  7577  
  7578  func (s) TestGzipBadChecksum(t *testing.T) {
  7579  	ss := &stubserver.StubServer{
  7580  		UnaryCallF: func(ctx context.Context, _ *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  7581  			return &testpb.SimpleResponse{}, nil
  7582  		},
  7583  	}
  7584  	if err := ss.Start(nil, grpc.WithCompressor(badGzipCompressor{})); err != nil {
  7585  		t.Fatalf("Error starting endpoint server: %v", err)
  7586  	}
  7587  	defer ss.Stop()
  7588  
  7589  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  7590  	defer cancel()
  7591  
  7592  	p, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1024))
  7593  	if err != nil {
  7594  		t.Fatalf("Unexpected error from newPayload: %v", err)
  7595  	}
  7596  	if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: p}); err == nil ||
  7597  		status.Code(err) != codes.Internal ||
  7598  		!strings.Contains(status.Convert(err).Message(), gzip.ErrChecksum.Error()) {
  7599  		t.Errorf("ss.Client.UnaryCall(_) = _, %v\n\twant: _, status(codes.Internal, contains %q)", err, gzip.ErrChecksum)
  7600  	}
  7601  }
  7602  
  7603  // When an RPC is canceled, it's possible that the last Recv() returns before
  7604  // all call options' after are executed.
  7605  func (s) TestCanceledRPCCallOptionRace(t *testing.T) {
  7606  	ss := &stubserver.StubServer{
  7607  		FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error {
  7608  			err := stream.Send(&testpb.StreamingOutputCallResponse{})
  7609  			if err != nil {
  7610  				return err
  7611  			}
  7612  			<-stream.Context().Done()
  7613  			return nil
  7614  		},
  7615  	}
  7616  	if err := ss.Start(nil); err != nil {
  7617  		t.Fatalf("Error starting endpoint server: %v", err)
  7618  	}
  7619  	defer ss.Stop()
  7620  
  7621  	const count = 1000
  7622  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  7623  	defer cancel()
  7624  
  7625  	var wg sync.WaitGroup
  7626  	wg.Add(count)
  7627  	for i := 0; i < count; i++ {
  7628  		go func() {
  7629  			defer wg.Done()
  7630  			var p peer.Peer
  7631  			ctx, cancel := context.WithCancel(ctx)
  7632  			defer cancel()
  7633  			stream, err := ss.Client.FullDuplexCall(ctx, grpc.Peer(&p))
  7634  			if err != nil {
  7635  				t.Errorf("_.FullDuplexCall(_) = _, %v", err)
  7636  				return
  7637  			}
  7638  			if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil {
  7639  				t.Errorf("_ has error %v while sending", err)
  7640  				return
  7641  			}
  7642  			if _, err := stream.Recv(); err != nil {
  7643  				t.Errorf("%v.Recv() = %v", stream, err)
  7644  				return
  7645  			}
  7646  			cancel()
  7647  			if _, err := stream.Recv(); status.Code(err) != codes.Canceled {
  7648  				t.Errorf("%v compleled with error %v, want %s", stream, err, codes.Canceled)
  7649  				return
  7650  			}
  7651  			// If recv returns before call options are executed, peer.Addr is not set,
  7652  			// fail the test.
  7653  			if p.Addr == nil {
  7654  				t.Errorf("peer.Addr is nil, want non-nil")
  7655  				return
  7656  			}
  7657  		}()
  7658  	}
  7659  	wg.Wait()
  7660  }
  7661  
  7662  func (s) TestClientSettingsFloodCloseConn(t *testing.T) {
  7663  	// Tests that the server properly closes its transport if the client floods
  7664  	// settings frames and then closes the connection.
  7665  
  7666  	// Minimize buffer sizes to stimulate failure condition more quickly.
  7667  	s := grpc.NewServer(grpc.WriteBufferSize(20))
  7668  	l := bufconn.Listen(20)
  7669  	go s.Serve(l)
  7670  
  7671  	// Dial our server and handshake.
  7672  	conn, err := l.Dial()
  7673  	if err != nil {
  7674  		t.Fatalf("Error dialing bufconn: %v", err)
  7675  	}
  7676  
  7677  	n, err := conn.Write([]byte(http2.ClientPreface))
  7678  	if err != nil || n != len(http2.ClientPreface) {
  7679  		t.Fatalf("Error writing client preface: %v, %v", n, err)
  7680  	}
  7681  
  7682  	fr := http2.NewFramer(conn, conn)
  7683  	f, err := fr.ReadFrame()
  7684  	if err != nil {
  7685  		t.Fatalf("Error reading initial settings frame: %v", err)
  7686  	}
  7687  	if _, ok := f.(*http2.SettingsFrame); ok {
  7688  		if err := fr.WriteSettingsAck(); err != nil {
  7689  			t.Fatalf("Error writing settings ack: %v", err)
  7690  		}
  7691  	} else {
  7692  		t.Fatalf("Error reading initial settings frame: type=%T", f)
  7693  	}
  7694  
  7695  	// Confirm settings can be written, and that an ack is read.
  7696  	if err = fr.WriteSettings(); err != nil {
  7697  		t.Fatalf("Error writing settings frame: %v", err)
  7698  	}
  7699  	if f, err = fr.ReadFrame(); err != nil {
  7700  		t.Fatalf("Error reading frame: %v", err)
  7701  	}
  7702  	if sf, ok := f.(*http2.SettingsFrame); !ok || !sf.IsAck() {
  7703  		t.Fatalf("Unexpected frame: %v", f)
  7704  	}
  7705  
  7706  	// Flood settings frames until a timeout occurs, indiciating the server has
  7707  	// stopped reading from the connection, then close the conn.
  7708  	for {
  7709  		conn.SetWriteDeadline(time.Now().Add(50 * time.Millisecond))
  7710  		if err := fr.WriteSettings(); err != nil {
  7711  			if to, ok := err.(interface{ Timeout() bool }); !ok || !to.Timeout() {
  7712  				t.Fatalf("Received unexpected write error: %v", err)
  7713  			}
  7714  			break
  7715  		}
  7716  	}
  7717  	conn.Close()
  7718  
  7719  	// If the server does not handle this situation correctly, it will never
  7720  	// close the transport.  This is because its loopyWriter.run() will have
  7721  	// exited, and thus not handle the goAway the draining process initiates.
  7722  	// Also, we would see a goroutine leak in this case, as the reader would be
  7723  	// blocked on the controlBuf's throttle() method indefinitely.
  7724  
  7725  	timer := time.AfterFunc(5*time.Second, func() {
  7726  		t.Errorf("Timeout waiting for GracefulStop to return")
  7727  		s.Stop()
  7728  	})
  7729  	s.GracefulStop()
  7730  	timer.Stop()
  7731  }
  7732  
  7733  // TestDeadlineSetOnConnectionOnClientCredentialHandshake tests that there is a deadline
  7734  // set on the net.Conn when a credential handshake happens in http2_client.
  7735  func (s) TestDeadlineSetOnConnectionOnClientCredentialHandshake(t *testing.T) {
  7736  	lis, err := net.Listen("tcp", "localhost:0")
  7737  	if err != nil {
  7738  		t.Fatalf("Failed to listen: %v", err)
  7739  	}
  7740  	connCh := make(chan net.Conn, 1)
  7741  	go func() {
  7742  		defer close(connCh)
  7743  		conn, err := lis.Accept()
  7744  		if err != nil {
  7745  			t.Errorf("Error accepting connection: %v", err)
  7746  			return
  7747  		}
  7748  		connCh <- conn
  7749  	}()
  7750  	defer func() {
  7751  		conn := <-connCh
  7752  		if conn != nil {
  7753  			conn.Close()
  7754  		}
  7755  	}()
  7756  	deadlineCh := testutils.NewChannel()
  7757  	cvd := &credentialsVerifyDeadline{
  7758  		deadlineCh: deadlineCh,
  7759  	}
  7760  	dOpt := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
  7761  		conn, err := (&net.Dialer{}).DialContext(ctx, "tcp", addr)
  7762  		if err != nil {
  7763  			return nil, err
  7764  		}
  7765  		return &infoConn{Conn: conn}, nil
  7766  	})
  7767  	cc, err := grpc.Dial(lis.Addr().String(), dOpt, grpc.WithTransportCredentials(cvd))
  7768  	if err != nil {
  7769  		t.Fatalf("Failed to dial: %v", err)
  7770  	}
  7771  	defer cc.Close()
  7772  
  7773  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  7774  	defer cancel()
  7775  	deadline, err := deadlineCh.Receive(ctx)
  7776  	if err != nil {
  7777  		t.Fatalf("Error receiving from credsInvoked: %v", err)
  7778  	}
  7779  	// Default connection timeout is 20 seconds, so if the deadline exceeds now
  7780  	// + 18 seconds it should be valid.
  7781  	if !deadline.(time.Time).After(time.Now().Add(time.Second * 18)) {
  7782  		t.Fatalf("Connection did not have deadline set.")
  7783  	}
  7784  }
  7785  
  7786  type infoConn struct {
  7787  	net.Conn
  7788  	deadline time.Time
  7789  }
  7790  
  7791  func (c *infoConn) SetDeadline(t time.Time) error {
  7792  	c.deadline = t
  7793  	return c.Conn.SetDeadline(t)
  7794  }
  7795  
  7796  type credentialsVerifyDeadline struct {
  7797  	deadlineCh *testutils.Channel
  7798  }
  7799  
  7800  func (cvd *credentialsVerifyDeadline) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
  7801  	return rawConn, nil, nil
  7802  }
  7803  
  7804  func (cvd *credentialsVerifyDeadline) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
  7805  	cvd.deadlineCh.Send(rawConn.(*infoConn).deadline)
  7806  	return rawConn, nil, nil
  7807  }
  7808  
  7809  func (cvd *credentialsVerifyDeadline) Info() credentials.ProtocolInfo {
  7810  	return credentials.ProtocolInfo{}
  7811  }
  7812  func (cvd *credentialsVerifyDeadline) Clone() credentials.TransportCredentials {
  7813  	return cvd
  7814  }
  7815  func (cvd *credentialsVerifyDeadline) OverrideServerName(s string) error {
  7816  	return nil
  7817  }
  7818  
  7819  func unaryInterceptorVerifyConn(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
  7820  	conn := transport.GetConnection(ctx)
  7821  	if conn == nil {
  7822  		return nil, status.Error(codes.NotFound, "connection was not in context")
  7823  	}
  7824  	return nil, status.Error(codes.OK, "")
  7825  }
  7826  
  7827  // TestUnaryServerInterceptorGetsConnection tests whether the accepted conn on
  7828  // the server gets to any unary interceptors on the server side.
  7829  func (s) TestUnaryServerInterceptorGetsConnection(t *testing.T) {
  7830  	ss := &stubserver.StubServer{}
  7831  	if err := ss.Start([]grpc.ServerOption{grpc.UnaryInterceptor(unaryInterceptorVerifyConn)}); err != nil {
  7832  		t.Fatalf("Error starting endpoint server: %v", err)
  7833  	}
  7834  	defer ss.Stop()
  7835  
  7836  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  7837  	defer cancel()
  7838  
  7839  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK {
  7840  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v, want _, error code %s", err, codes.OK)
  7841  	}
  7842  }
  7843  
  7844  func streamingInterceptorVerifyConn(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
  7845  	conn := transport.GetConnection(ss.Context())
  7846  	if conn == nil {
  7847  		return status.Error(codes.NotFound, "connection was not in context")
  7848  	}
  7849  	return status.Error(codes.OK, "")
  7850  }
  7851  
  7852  // TestStreamingServerInterceptorGetsConnection tests whether the accepted conn on
  7853  // the server gets to any streaming interceptors on the server side.
  7854  func (s) TestStreamingServerInterceptorGetsConnection(t *testing.T) {
  7855  	ss := &stubserver.StubServer{}
  7856  	if err := ss.Start([]grpc.ServerOption{grpc.StreamInterceptor(streamingInterceptorVerifyConn)}); err != nil {
  7857  		t.Fatalf("Error starting endpoint server: %v", err)
  7858  	}
  7859  	defer ss.Stop()
  7860  
  7861  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  7862  	defer cancel()
  7863  
  7864  	s, err := ss.Client.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{})
  7865  	if err != nil {
  7866  		t.Fatalf("ss.Client.StreamingOutputCall(_) = _, %v, want _, <nil>", err)
  7867  	}
  7868  	if _, err := s.Recv(); err != io.EOF {
  7869  		t.Fatalf("ss.Client.StreamingInputCall(_) = _, %v, want _, %v", err, io.EOF)
  7870  	}
  7871  }
  7872  
  7873  // unaryInterceptorVerifyAuthority verifies there is an unambiguous :authority
  7874  // once the request gets to an interceptor. An unambiguous :authority is defined
  7875  // as at most a single :authority header, and no host header according to A41.
  7876  func unaryInterceptorVerifyAuthority(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
  7877  	md, ok := metadata.FromIncomingContext(ctx)
  7878  	if !ok {
  7879  		return nil, status.Error(codes.NotFound, "metadata was not in context")
  7880  	}
  7881  	authority := md.Get(":authority")
  7882  	if len(authority) > 1 { // Should be an unambiguous authority by the time it gets to interceptor.
  7883  		return nil, status.Error(codes.NotFound, ":authority value had more than one value")
  7884  	}
  7885  	// Host header shouldn't be present by the time it gets to the interceptor
  7886  	// level (should either be renamed to :authority or explicitly deleted).
  7887  	host := md.Get("host")
  7888  	if len(host) != 0 {
  7889  		return nil, status.Error(codes.NotFound, "host header should not be present in metadata")
  7890  	}
  7891  	// Pass back the authority for verification on client - NotFound so
  7892  	// grpc-message will be available to read for verification.
  7893  	if len(authority) == 0 {
  7894  		// Represent no :authority header present with an empty string.
  7895  		return nil, status.Error(codes.NotFound, "")
  7896  	}
  7897  	return nil, status.Error(codes.NotFound, authority[0])
  7898  }
  7899  
  7900  // TestAuthorityHeader tests that the eventual :authority that reaches the grpc
  7901  // layer is unambiguous due to logic added in A41.
  7902  func (s) TestAuthorityHeader(t *testing.T) {
  7903  	tests := []struct {
  7904  		name          string
  7905  		headers       []string
  7906  		wantAuthority string
  7907  	}{
  7908  		// "If :authority is missing, Host must be renamed to :authority." - A41
  7909  		{
  7910  			name: "Missing :authority",
  7911  			// Codepath triggered by incoming headers with no authority but with
  7912  			// a host.
  7913  			headers: []string{
  7914  				":method", "POST",
  7915  				":path", "/grpc.testing.TestService/UnaryCall",
  7916  				"content-type", "application/grpc",
  7917  				"te", "trailers",
  7918  				"host", "localhost",
  7919  			},
  7920  			wantAuthority: "localhost",
  7921  		},
  7922  		{
  7923  			name: "Missing :authority and host",
  7924  			// Codepath triggered by incoming headers with no :authority and no
  7925  			// host.
  7926  			headers: []string{
  7927  				":method", "POST",
  7928  				":path", "/grpc.testing.TestService/UnaryCall",
  7929  				"content-type", "application/grpc",
  7930  				"te", "trailers",
  7931  			},
  7932  			wantAuthority: "",
  7933  		},
  7934  		// "If :authority is present, Host must be discarded." - A41
  7935  		{
  7936  			name: ":authority and host present",
  7937  			// Codepath triggered by incoming headers with both an authority
  7938  			// header and a host header.
  7939  			headers: []string{
  7940  				":method", "POST",
  7941  				":path", "/grpc.testing.TestService/UnaryCall",
  7942  				":authority", "localhost",
  7943  				"content-type", "application/grpc",
  7944  				"host", "localhost2",
  7945  			},
  7946  			wantAuthority: "localhost",
  7947  		},
  7948  	}
  7949  	for _, test := range tests {
  7950  		t.Run(test.name, func(t *testing.T) {
  7951  			te := newTest(t, tcpClearRREnv)
  7952  			ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  7953  				return &testpb.SimpleResponse{}, nil
  7954  			}}
  7955  			te.unaryServerInt = unaryInterceptorVerifyAuthority
  7956  			te.startServer(ts)
  7957  			defer te.tearDown()
  7958  			success := testutils.NewChannel()
  7959  			te.withServerTester(func(st *serverTester) {
  7960  				st.writeHeaders(http2.HeadersFrameParam{
  7961  					StreamID:      1,
  7962  					BlockFragment: st.encodeHeader(test.headers...),
  7963  					EndStream:     false,
  7964  					EndHeaders:    true,
  7965  				})
  7966  				st.writeData(1, true, []byte{0, 0, 0, 0, 0})
  7967  
  7968  				for {
  7969  					frame := st.wantAnyFrame()
  7970  					f, ok := frame.(*http2.MetaHeadersFrame)
  7971  					if !ok {
  7972  						continue
  7973  					}
  7974  					for _, header := range f.Fields {
  7975  						if header.Name == "grpc-message" {
  7976  							success.Send(header.Value)
  7977  							return
  7978  						}
  7979  					}
  7980  				}
  7981  			})
  7982  
  7983  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  7984  			defer cancel()
  7985  			gotAuthority, err := success.Receive(ctx)
  7986  			if err != nil {
  7987  				t.Fatalf("Error receiving from channel: %v", err)
  7988  			}
  7989  			if gotAuthority != test.wantAuthority {
  7990  				t.Fatalf("gotAuthority: %v, wantAuthority %v", gotAuthority, test.wantAuthority)
  7991  			}
  7992  		})
  7993  	}
  7994  }
  7995  
  7996  // wrapCloseListener tracks Accepts/Closes and maintains a counter of the
  7997  // number of open connections.
  7998  type wrapCloseListener struct {
  7999  	net.Listener
  8000  	connsOpen int32
  8001  }
  8002  
  8003  // wrapCloseListener is returned by wrapCloseListener.Accept and decrements its
  8004  // connsOpen when Close is called.
  8005  type wrapCloseConn struct {
  8006  	net.Conn
  8007  	lis       *wrapCloseListener
  8008  	closeOnce sync.Once
  8009  }
  8010  
  8011  func (w *wrapCloseListener) Accept() (net.Conn, error) {
  8012  	conn, err := w.Listener.Accept()
  8013  	if err != nil {
  8014  		return nil, err
  8015  	}
  8016  	atomic.AddInt32(&w.connsOpen, 1)
  8017  	return &wrapCloseConn{Conn: conn, lis: w}, nil
  8018  }
  8019  
  8020  func (w *wrapCloseConn) Close() error {
  8021  	defer w.closeOnce.Do(func() { atomic.AddInt32(&w.lis.connsOpen, -1) })
  8022  	return w.Conn.Close()
  8023  }
  8024  
  8025  // TestServerClosesConn ensures conn.Close is always closed even if the client
  8026  // doesn't complete the HTTP/2 handshake.
  8027  func (s) TestServerClosesConn(t *testing.T) {
  8028  	lis := bufconn.Listen(20)
  8029  	wrapLis := &wrapCloseListener{Listener: lis}
  8030  
  8031  	s := grpc.NewServer()
  8032  	go s.Serve(wrapLis)
  8033  	defer s.Stop()
  8034  
  8035  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  8036  	defer cancel()
  8037  
  8038  	for i := 0; i < 10; i++ {
  8039  		conn, err := lis.DialContext(ctx)
  8040  		if err != nil {
  8041  			t.Fatalf("Dial = _, %v; want _, nil", err)
  8042  		}
  8043  		conn.Close()
  8044  	}
  8045  	for ctx.Err() == nil {
  8046  		if atomic.LoadInt32(&wrapLis.connsOpen) == 0 {
  8047  			return
  8048  		}
  8049  		time.Sleep(50 * time.Millisecond)
  8050  	}
  8051  	t.Fatalf("timed out waiting for conns to be closed by server; still open: %v", atomic.LoadInt32(&wrapLis.connsOpen))
  8052  }