google.golang.org/grpc@v1.74.2/test/end2end_test.go (about)

     1  /*
     2   *
     3   * Copyright 2014 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package test
    20  
    21  import (
    22  	"bufio"
    23  	"bytes"
    24  	"context"
    25  	"crypto/tls"
    26  	"encoding/json"
    27  	"errors"
    28  	"flag"
    29  	"fmt"
    30  	"io"
    31  	"math"
    32  	"net"
    33  	"net/http"
    34  	"os"
    35  	"reflect"
    36  	"runtime"
    37  	"strings"
    38  	"sync"
    39  	"sync/atomic"
    40  	"syscall"
    41  	"testing"
    42  	"time"
    43  
    44  	"github.com/google/go-cmp/cmp"
    45  	"golang.org/x/net/http2"
    46  	"golang.org/x/net/http2/hpack"
    47  	"google.golang.org/grpc"
    48  	"google.golang.org/grpc/balancer"
    49  	"google.golang.org/grpc/balancer/roundrobin"
    50  	"google.golang.org/grpc/codes"
    51  	"google.golang.org/grpc/connectivity"
    52  	"google.golang.org/grpc/credentials"
    53  	"google.golang.org/grpc/credentials/insecure"
    54  	"google.golang.org/grpc/health"
    55  	"google.golang.org/grpc/internal"
    56  	"google.golang.org/grpc/internal/binarylog"
    57  	"google.golang.org/grpc/internal/channelz"
    58  	"google.golang.org/grpc/internal/grpcsync"
    59  	"google.golang.org/grpc/internal/grpctest"
    60  	"google.golang.org/grpc/internal/stubserver"
    61  	"google.golang.org/grpc/internal/testutils"
    62  	"google.golang.org/grpc/internal/transport"
    63  	"google.golang.org/grpc/metadata"
    64  	"google.golang.org/grpc/peer"
    65  	"google.golang.org/grpc/resolver"
    66  	"google.golang.org/grpc/resolver/manual"
    67  	"google.golang.org/grpc/serviceconfig"
    68  	"google.golang.org/grpc/stats"
    69  	"google.golang.org/grpc/status"
    70  	"google.golang.org/grpc/tap"
    71  	"google.golang.org/grpc/test/bufconn"
    72  	"google.golang.org/grpc/testdata"
    73  
    74  	spb "google.golang.org/genproto/googleapis/rpc/status"
    75  	healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
    76  	healthpb "google.golang.org/grpc/health/grpc_health_v1"
    77  	testgrpc "google.golang.org/grpc/interop/grpc_testing"
    78  	testpb "google.golang.org/grpc/interop/grpc_testing"
    79  	"google.golang.org/protobuf/proto"
    80  	"google.golang.org/protobuf/types/known/anypb"
    81  
    82  	_ "google.golang.org/grpc/encoding/gzip"
    83  )
    84  
    85  const defaultHealthService = "grpc.health.v1.Health"
    86  
    87  func init() {
    88  	channelz.TurnOn()
    89  	balancer.Register(triggerRPCBlockPickerBalancerBuilder{})
    90  }
    91  
    92  type s struct {
    93  	grpctest.Tester
    94  }
    95  
    96  func Test(t *testing.T) {
    97  	grpctest.RunSubTests(t, s{})
    98  }
    99  
   100  var (
   101  	// For headers:
   102  	testMetadata = metadata.MD{
   103  		"key1":     []string{"value1"},
   104  		"key2":     []string{"value2"},
   105  		"key3-bin": []string{"binvalue1", string([]byte{1, 2, 3})},
   106  	}
   107  	testMetadata2 = metadata.MD{
   108  		"key1": []string{"value12"},
   109  		"key2": []string{"value22"},
   110  	}
   111  	// For trailers:
   112  	testTrailerMetadata = metadata.MD{
   113  		"tkey1":     []string{"trailerValue1"},
   114  		"tkey2":     []string{"trailerValue2"},
   115  		"tkey3-bin": []string{"trailerbinvalue1", string([]byte{3, 2, 1})},
   116  	}
   117  	testTrailerMetadata2 = metadata.MD{
   118  		"tkey1": []string{"trailerValue12"},
   119  		"tkey2": []string{"trailerValue22"},
   120  	}
   121  	// capital "Key" is illegal in HTTP/2.
   122  	malformedHTTP2Metadata = metadata.MD{
   123  		"Key": []string{"foo"},
   124  	}
   125  	testAppUA     = "myApp1/1.0 myApp2/0.9"
   126  	failAppUA     = "fail-this-RPC"
   127  	detailedError = status.ErrorProto(&spb.Status{
   128  		Code:    int32(codes.DataLoss),
   129  		Message: "error for testing: " + failAppUA,
   130  		Details: []*anypb.Any{{
   131  			TypeUrl: "url",
   132  			Value:   []byte{6, 0, 0, 6, 1, 3},
   133  		}},
   134  	})
   135  )
   136  
   137  var raceMode bool // set by race.go in race mode
   138  
   139  // Note : Do not use this for further tests.
   140  type testServer struct {
   141  	testgrpc.UnimplementedTestServiceServer
   142  
   143  	security           string // indicate the authentication protocol used by this server.
   144  	earlyFail          bool   // whether to error out the execution of a service handler prematurely.
   145  	setAndSendHeader   bool   // whether to call setHeader and sendHeader.
   146  	setHeaderOnly      bool   // whether to only call setHeader, not sendHeader.
   147  	multipleSetTrailer bool   // whether to call setTrailer multiple times.
   148  	unaryCallSleepTime time.Duration
   149  }
   150  
   151  func (s *testServer) EmptyCall(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) {
   152  	if md, ok := metadata.FromIncomingContext(ctx); ok {
   153  		// For testing purpose, returns an error if user-agent is failAppUA.
   154  		// To test that client gets the correct error.
   155  		if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) {
   156  			return nil, detailedError
   157  		}
   158  		var str []string
   159  		for _, entry := range md["user-agent"] {
   160  			str = append(str, "ua", entry)
   161  		}
   162  		grpc.SendHeader(ctx, metadata.Pairs(str...))
   163  	}
   164  	return new(testpb.Empty), nil
   165  }
   166  
   167  func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) {
   168  	if size < 0 {
   169  		return nil, fmt.Errorf("requested a response with invalid length %d", size)
   170  	}
   171  	body := make([]byte, size)
   172  	switch t {
   173  	case testpb.PayloadType_COMPRESSABLE:
   174  	default:
   175  		return nil, fmt.Errorf("unsupported payload type: %d", t)
   176  	}
   177  	return &testpb.Payload{
   178  		Type: t,
   179  		Body: body,
   180  	}, nil
   181  }
   182  
   183  func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
   184  	md, ok := metadata.FromIncomingContext(ctx)
   185  	if ok {
   186  		if _, exists := md[":authority"]; !exists {
   187  			return nil, status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md)
   188  		}
   189  		if s.setAndSendHeader {
   190  			if err := grpc.SetHeader(ctx, md); err != nil {
   191  				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", md, err)
   192  			}
   193  			if err := grpc.SendHeader(ctx, testMetadata2); err != nil {
   194  				return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", testMetadata2, err)
   195  			}
   196  		} else if s.setHeaderOnly {
   197  			if err := grpc.SetHeader(ctx, md); err != nil {
   198  				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", md, err)
   199  			}
   200  			if err := grpc.SetHeader(ctx, testMetadata2); err != nil {
   201  				return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want <nil>", testMetadata2, err)
   202  			}
   203  		} else {
   204  			if err := grpc.SendHeader(ctx, md); err != nil {
   205  				return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", md, err)
   206  			}
   207  		}
   208  		if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil {
   209  			return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata, err)
   210  		}
   211  		if s.multipleSetTrailer {
   212  			if err := grpc.SetTrailer(ctx, testTrailerMetadata2); err != nil {
   213  				return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata2, err)
   214  			}
   215  		}
   216  	}
   217  	pr, ok := peer.FromContext(ctx)
   218  	if !ok {
   219  		return nil, status.Error(codes.DataLoss, "failed to get peer from ctx")
   220  	}
   221  	if pr.Addr == net.Addr(nil) {
   222  		return nil, status.Error(codes.DataLoss, "failed to get peer address")
   223  	}
   224  	if s.security != "" {
   225  		// Check Auth info
   226  		var authType, serverName string
   227  		switch info := pr.AuthInfo.(type) {
   228  		case credentials.TLSInfo:
   229  			authType = info.AuthType()
   230  			serverName = info.State.ServerName
   231  		default:
   232  			return nil, status.Error(codes.Unauthenticated, "Unknown AuthInfo type")
   233  		}
   234  		if authType != s.security {
   235  			return nil, status.Errorf(codes.Unauthenticated, "Wrong auth type: got %q, want %q", authType, s.security)
   236  		}
   237  		if serverName != "x.test.example.com" {
   238  			return nil, status.Errorf(codes.Unauthenticated, "Unknown server name %q", serverName)
   239  		}
   240  	}
   241  	// Simulate some service delay.
   242  	time.Sleep(s.unaryCallSleepTime)
   243  
   244  	payload, err := newPayload(in.GetResponseType(), in.GetResponseSize())
   245  	if err != nil {
   246  		return nil, err
   247  	}
   248  
   249  	return &testpb.SimpleResponse{
   250  		Payload: payload,
   251  	}, nil
   252  }
   253  
   254  func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testgrpc.TestService_StreamingOutputCallServer) error {
   255  	if md, ok := metadata.FromIncomingContext(stream.Context()); ok {
   256  		if _, exists := md[":authority"]; !exists {
   257  			return status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md)
   258  		}
   259  		// For testing purpose, returns an error if user-agent is failAppUA.
   260  		// To test that client gets the correct error.
   261  		if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) {
   262  			return status.Error(codes.DataLoss, "error for testing: "+failAppUA)
   263  		}
   264  	}
   265  	cs := args.GetResponseParameters()
   266  	for _, c := range cs {
   267  		if us := c.GetIntervalUs(); us > 0 {
   268  			time.Sleep(time.Duration(us) * time.Microsecond)
   269  		}
   270  
   271  		payload, err := newPayload(args.GetResponseType(), c.GetSize())
   272  		if err != nil {
   273  			return err
   274  		}
   275  
   276  		if err := stream.Send(&testpb.StreamingOutputCallResponse{
   277  			Payload: payload,
   278  		}); err != nil {
   279  			return err
   280  		}
   281  	}
   282  	return nil
   283  }
   284  
   285  func (s *testServer) StreamingInputCall(stream testgrpc.TestService_StreamingInputCallServer) error {
   286  	var sum int
   287  	for {
   288  		in, err := stream.Recv()
   289  		if err == io.EOF {
   290  			return stream.SendAndClose(&testpb.StreamingInputCallResponse{
   291  				AggregatedPayloadSize: int32(sum),
   292  			})
   293  		}
   294  		if err != nil {
   295  			return err
   296  		}
   297  		p := in.GetPayload().GetBody()
   298  		sum += len(p)
   299  		if s.earlyFail {
   300  			return status.Error(codes.NotFound, "not found")
   301  		}
   302  	}
   303  }
   304  
   305  func (s *testServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error {
   306  	md, ok := metadata.FromIncomingContext(stream.Context())
   307  	if ok {
   308  		if s.setAndSendHeader {
   309  			if err := stream.SetHeader(md); err != nil {
   310  				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, md, err)
   311  			}
   312  			if err := stream.SendHeader(testMetadata2); err != nil {
   313  				return status.Errorf(status.Code(err), "%v.SendHeader(_, %v) = %v, want <nil>", stream, testMetadata2, err)
   314  			}
   315  		} else if s.setHeaderOnly {
   316  			if err := stream.SetHeader(md); err != nil {
   317  				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, md, err)
   318  			}
   319  			if err := stream.SetHeader(testMetadata2); err != nil {
   320  				return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want <nil>", stream, testMetadata2, err)
   321  			}
   322  		} else {
   323  			if err := stream.SendHeader(md); err != nil {
   324  				return status.Errorf(status.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil)
   325  			}
   326  		}
   327  		stream.SetTrailer(testTrailerMetadata)
   328  		if s.multipleSetTrailer {
   329  			stream.SetTrailer(testTrailerMetadata2)
   330  		}
   331  	}
   332  	for {
   333  		in, err := stream.Recv()
   334  		if err == io.EOF {
   335  			// read done.
   336  			return nil
   337  		}
   338  		if err != nil {
   339  			// to facilitate testSvrWriteStatusEarlyWrite
   340  			if status.Code(err) == codes.ResourceExhausted {
   341  				return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error())
   342  			}
   343  			return err
   344  		}
   345  		cs := in.GetResponseParameters()
   346  		for _, c := range cs {
   347  			if us := c.GetIntervalUs(); us > 0 {
   348  				time.Sleep(time.Duration(us) * time.Microsecond)
   349  			}
   350  
   351  			payload, err := newPayload(in.GetResponseType(), c.GetSize())
   352  			if err != nil {
   353  				return err
   354  			}
   355  
   356  			if err := stream.Send(&testpb.StreamingOutputCallResponse{
   357  				Payload: payload,
   358  			}); err != nil {
   359  				// to facilitate testSvrWriteStatusEarlyWrite
   360  				if status.Code(err) == codes.ResourceExhausted {
   361  					return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error())
   362  				}
   363  				return err
   364  			}
   365  		}
   366  	}
   367  }
   368  
   369  func (s *testServer) HalfDuplexCall(stream testgrpc.TestService_HalfDuplexCallServer) error {
   370  	var msgBuf []*testpb.StreamingOutputCallRequest
   371  	for {
   372  		in, err := stream.Recv()
   373  		if err == io.EOF {
   374  			// read done.
   375  			break
   376  		}
   377  		if err != nil {
   378  			return err
   379  		}
   380  		msgBuf = append(msgBuf, in)
   381  	}
   382  	for _, m := range msgBuf {
   383  		cs := m.GetResponseParameters()
   384  		for _, c := range cs {
   385  			if us := c.GetIntervalUs(); us > 0 {
   386  				time.Sleep(time.Duration(us) * time.Microsecond)
   387  			}
   388  
   389  			payload, err := newPayload(m.GetResponseType(), c.GetSize())
   390  			if err != nil {
   391  				return err
   392  			}
   393  
   394  			if err := stream.Send(&testpb.StreamingOutputCallResponse{
   395  				Payload: payload,
   396  			}); err != nil {
   397  				return err
   398  			}
   399  		}
   400  	}
   401  	return nil
   402  }
   403  
   404  type env struct {
   405  	name         string
   406  	network      string // The type of network such as tcp, unix, etc.
   407  	security     string // The security protocol such as TLS, SSH, etc.
   408  	httpHandler  bool   // whether to use the http.Handler ServerTransport; requires TLS
   409  	balancer     string // One of "round_robin", "pick_first", or "".
   410  	customDialer func(string, string, time.Duration) (net.Conn, error)
   411  }
   412  
   413  func (e env) runnable() bool {
   414  	if runtime.GOOS == "windows" && e.network == "unix" {
   415  		return false
   416  	}
   417  	return true
   418  }
   419  
   420  func (e env) dialer(addr string, timeout time.Duration) (net.Conn, error) {
   421  	if e.customDialer != nil {
   422  		return e.customDialer(e.network, addr, timeout)
   423  	}
   424  	return net.DialTimeout(e.network, addr, timeout)
   425  }
   426  
   427  var (
   428  	tcpClearEnv   = env{name: "tcp-clear-v1-balancer", network: "tcp"}
   429  	tcpTLSEnv     = env{name: "tcp-tls-v1-balancer", network: "tcp", security: "tls"}
   430  	tcpClearRREnv = env{name: "tcp-clear", network: "tcp", balancer: "round_robin"}
   431  	tcpTLSRREnv   = env{name: "tcp-tls", network: "tcp", security: "tls", balancer: "round_robin"}
   432  	handlerEnv    = env{name: "handler-tls", network: "tcp", security: "tls", httpHandler: true, balancer: "round_robin"}
   433  	noBalancerEnv = env{name: "no-balancer", network: "tcp", security: "tls"}
   434  	allEnv        = []env{tcpClearEnv, tcpTLSEnv, tcpClearRREnv, tcpTLSRREnv, handlerEnv, noBalancerEnv}
   435  )
   436  
   437  var onlyEnv = flag.String("only_env", "", "If non-empty, one of 'tcp-clear', 'tcp-tls', 'unix-clear', 'unix-tls', or 'handler-tls' to only run the tests for that environment. Empty means all.")
   438  
   439  func listTestEnv() (envs []env) {
   440  	if *onlyEnv != "" {
   441  		for _, e := range allEnv {
   442  			if e.name == *onlyEnv {
   443  				if !e.runnable() {
   444  					panic(fmt.Sprintf("--only_env environment %q does not run on %s", *onlyEnv, runtime.GOOS))
   445  				}
   446  				return []env{e}
   447  			}
   448  		}
   449  		panic(fmt.Sprintf("invalid --only_env value %q", *onlyEnv))
   450  	}
   451  	for _, e := range allEnv {
   452  		if e.runnable() {
   453  			envs = append(envs, e)
   454  		}
   455  	}
   456  	return envs
   457  }
   458  
   459  // test is an end-to-end test. It should be created with the newTest
   460  // func, modified as needed, and then started with its startServer method.
   461  // It should be cleaned up with the tearDown method.
   462  type test struct {
   463  	// The following are setup in newTest().
   464  	t      *testing.T
   465  	e      env
   466  	ctx    context.Context // valid for life of test, before tearDown
   467  	cancel context.CancelFunc
   468  
   469  	// The following knobs are for the server-side, and should be set after
   470  	// calling newTest() and before calling startServer().
   471  
   472  	// whether or not to expose the server's health via the default health
   473  	// service implementation.
   474  	enableHealthServer bool
   475  	// In almost all cases, one should set the 'enableHealthServer' flag above to
   476  	// expose the server's health using the default health service
   477  	// implementation. This should only be used when a non-default health service
   478  	// implementation is required.
   479  	healthServer            healthgrpc.HealthServer
   480  	maxStream               uint32
   481  	tapHandle               tap.ServerInHandle
   482  	maxServerMsgSize        *int
   483  	maxServerReceiveMsgSize *int
   484  	maxServerSendMsgSize    *int
   485  	maxServerHeaderListSize *uint32
   486  	// Used to test the deprecated API WithCompressor and WithDecompressor.
   487  	serverCompression           bool
   488  	unknownHandler              grpc.StreamHandler
   489  	unaryServerInt              grpc.UnaryServerInterceptor
   490  	streamServerInt             grpc.StreamServerInterceptor
   491  	serverInitialWindowSize     int32
   492  	serverInitialConnWindowSize int32
   493  	customServerOptions         []grpc.ServerOption
   494  
   495  	// The following knobs are for the client-side, and should be set after
   496  	// calling newTest() and before calling clientConn().
   497  	maxClientMsgSize        *int
   498  	maxClientReceiveMsgSize *int
   499  	maxClientSendMsgSize    *int
   500  	maxClientHeaderListSize *uint32
   501  	userAgent               string
   502  	// Used to test the deprecated API WithCompressor and WithDecompressor.
   503  	clientCompression bool
   504  	// Used to test the new compressor registration API UseCompressor.
   505  	clientUseCompression bool
   506  	// clientNopCompression is set to create a compressor whose type is not supported.
   507  	clientNopCompression        bool
   508  	unaryClientInt              grpc.UnaryClientInterceptor
   509  	streamClientInt             grpc.StreamClientInterceptor
   510  	clientInitialWindowSize     int32
   511  	clientInitialConnWindowSize int32
   512  	perRPCCreds                 credentials.PerRPCCredentials
   513  	customDialOptions           []grpc.DialOption
   514  	resolverScheme              string
   515  
   516  	// These are set once startServer is called. The common case is to have
   517  	// only one testServer.
   518  	srv     stopper
   519  	hSrv    healthgrpc.HealthServer
   520  	srvAddr string
   521  
   522  	// These are set once startServers is called.
   523  	srvs     []stopper
   524  	hSrvs    []healthgrpc.HealthServer
   525  	srvAddrs []string
   526  
   527  	cc          *grpc.ClientConn // nil until requested via clientConn
   528  	restoreLogs func()           // nil unless declareLogNoise is used
   529  }
   530  
   531  type stopper interface {
   532  	Stop()
   533  	GracefulStop()
   534  }
   535  
   536  func (te *test) tearDown() {
   537  	if te.cancel != nil {
   538  		te.cancel()
   539  		te.cancel = nil
   540  	}
   541  
   542  	if te.cc != nil {
   543  		te.cc.Close()
   544  		te.cc = nil
   545  	}
   546  
   547  	if te.restoreLogs != nil {
   548  		te.restoreLogs()
   549  		te.restoreLogs = nil
   550  	}
   551  
   552  	if te.srv != nil {
   553  		te.srv.Stop()
   554  	}
   555  	for _, s := range te.srvs {
   556  		s.Stop()
   557  	}
   558  }
   559  
   560  // newTest returns a new test using the provided testing.T and
   561  // environment.  It is returned with default values. Tests should
   562  // modify it before calling its startServer and clientConn methods.
   563  func newTest(t *testing.T, e env) *test {
   564  	te := &test{
   565  		t:         t,
   566  		e:         e,
   567  		maxStream: math.MaxUint32,
   568  	}
   569  	te.ctx, te.cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
   570  	return te
   571  }
   572  
   573  func (te *test) listenAndServe(ts testgrpc.TestServiceServer, listen func(network, address string) (net.Listener, error)) net.Listener {
   574  	te.t.Helper()
   575  	te.t.Logf("Running test in %s environment...", te.e.name)
   576  	sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)}
   577  	if te.maxServerMsgSize != nil {
   578  		sopts = append(sopts, grpc.MaxMsgSize(*te.maxServerMsgSize))
   579  	}
   580  	if te.maxServerReceiveMsgSize != nil {
   581  		sopts = append(sopts, grpc.MaxRecvMsgSize(*te.maxServerReceiveMsgSize))
   582  	}
   583  	if te.maxServerSendMsgSize != nil {
   584  		sopts = append(sopts, grpc.MaxSendMsgSize(*te.maxServerSendMsgSize))
   585  	}
   586  	if te.maxServerHeaderListSize != nil {
   587  		sopts = append(sopts, grpc.MaxHeaderListSize(*te.maxServerHeaderListSize))
   588  	}
   589  	if te.tapHandle != nil {
   590  		sopts = append(sopts, grpc.InTapHandle(te.tapHandle))
   591  	}
   592  	if te.serverCompression {
   593  		sopts = append(sopts,
   594  			grpc.RPCCompressor(grpc.NewGZIPCompressor()),
   595  			grpc.RPCDecompressor(grpc.NewGZIPDecompressor()),
   596  		)
   597  	}
   598  	if te.unaryServerInt != nil {
   599  		sopts = append(sopts, grpc.UnaryInterceptor(te.unaryServerInt))
   600  	}
   601  	if te.streamServerInt != nil {
   602  		sopts = append(sopts, grpc.StreamInterceptor(te.streamServerInt))
   603  	}
   604  	if te.unknownHandler != nil {
   605  		sopts = append(sopts, grpc.UnknownServiceHandler(te.unknownHandler))
   606  	}
   607  	if te.serverInitialWindowSize > 0 {
   608  		sopts = append(sopts, grpc.InitialWindowSize(te.serverInitialWindowSize))
   609  	}
   610  	if te.serverInitialConnWindowSize > 0 {
   611  		sopts = append(sopts, grpc.InitialConnWindowSize(te.serverInitialConnWindowSize))
   612  	}
   613  	la := ":0"
   614  	if te.e.network == "unix" {
   615  		la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now().UnixNano())
   616  		syscall.Unlink(la)
   617  	}
   618  	lis, err := listen(te.e.network, la)
   619  	if err != nil {
   620  		te.t.Fatalf("Failed to listen: %v", err)
   621  	}
   622  	if te.e.security == "tls" {
   623  		creds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem"))
   624  		if err != nil {
   625  			te.t.Fatalf("Failed to generate credentials %v", err)
   626  		}
   627  		sopts = append(sopts, grpc.Creds(creds))
   628  	}
   629  	sopts = append(sopts, te.customServerOptions...)
   630  	s := grpc.NewServer(sopts...)
   631  	if ts != nil {
   632  		testgrpc.RegisterTestServiceServer(s, ts)
   633  	}
   634  
   635  	// Create a new default health server if enableHealthServer is set, or use
   636  	// the provided one.
   637  	hs := te.healthServer
   638  	if te.enableHealthServer {
   639  		hs = health.NewServer()
   640  	}
   641  	if hs != nil {
   642  		healthgrpc.RegisterHealthServer(s, hs)
   643  	}
   644  
   645  	addr := la
   646  	switch te.e.network {
   647  	case "unix":
   648  	default:
   649  		_, port, err := net.SplitHostPort(lis.Addr().String())
   650  		if err != nil {
   651  			te.t.Fatalf("Failed to parse listener address: %v", err)
   652  		}
   653  		addr = "localhost:" + port
   654  	}
   655  
   656  	te.srv = s
   657  	te.hSrv = hs
   658  	te.srvAddr = addr
   659  
   660  	if te.e.httpHandler {
   661  		if te.e.security != "tls" {
   662  			te.t.Fatalf("unsupported environment settings")
   663  		}
   664  		cert, err := tls.LoadX509KeyPair(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem"))
   665  		if err != nil {
   666  			te.t.Fatal("tls.LoadX509KeyPair(server1.pem, server1.key) failed: ", err)
   667  		}
   668  		hs := &http.Server{
   669  			Handler:   s,
   670  			TLSConfig: &tls.Config{Certificates: []tls.Certificate{cert}},
   671  		}
   672  		if err := http2.ConfigureServer(hs, &http2.Server{MaxConcurrentStreams: te.maxStream}); err != nil {
   673  			te.t.Fatal("http2.ConfigureServer(_, _) failed: ", err)
   674  		}
   675  		te.srv = wrapHS{hs}
   676  		tlsListener := tls.NewListener(lis, hs.TLSConfig)
   677  		go hs.Serve(tlsListener)
   678  		return lis
   679  	}
   680  
   681  	go s.Serve(lis)
   682  	return lis
   683  }
   684  
   685  type wrapHS struct {
   686  	s *http.Server
   687  }
   688  
   689  func (w wrapHS) GracefulStop() {
   690  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   691  	defer cancel()
   692  	w.s.Shutdown(ctx)
   693  }
   694  
   695  func (w wrapHS) Stop() {
   696  	w.s.Close()
   697  	w.s.Handler.(*grpc.Server).Stop()
   698  }
   699  
   700  func (te *test) startServerWithConnControl(ts testgrpc.TestServiceServer) *listenerWrapper {
   701  	l := te.listenAndServe(ts, listenWithConnControl)
   702  	return l.(*listenerWrapper)
   703  }
   704  
   705  // startServer starts a gRPC server exposing the provided TestService
   706  // implementation. Callers should defer a call to te.tearDown to clean up
   707  func (te *test) startServer(ts testgrpc.TestServiceServer) {
   708  	te.t.Helper()
   709  	te.listenAndServe(ts, net.Listen)
   710  }
   711  
   712  // startServers starts 'num' gRPC servers exposing the provided TestService.
   713  func (te *test) startServers(ts testgrpc.TestServiceServer, num int) {
   714  	for i := 0; i < num; i++ {
   715  		te.startServer(ts)
   716  		te.srvs = append(te.srvs, te.srv.(*grpc.Server))
   717  		te.hSrvs = append(te.hSrvs, te.hSrv)
   718  		te.srvAddrs = append(te.srvAddrs, te.srvAddr)
   719  		te.srv = nil
   720  		te.hSrv = nil
   721  		te.srvAddr = ""
   722  	}
   723  }
   724  
   725  // setHealthServingStatus is a helper function to set the health status.
   726  func (te *test) setHealthServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) {
   727  	hs, ok := te.hSrv.(*health.Server)
   728  	if !ok {
   729  		panic(fmt.Sprintf("SetServingStatus(%v, %v) called for health server of type %T", service, status, hs))
   730  	}
   731  	hs.SetServingStatus(service, status)
   732  }
   733  
   734  type nopCompressor struct {
   735  	grpc.Compressor
   736  }
   737  
   738  // newNopCompressor creates a compressor to test the case that type is not supported.
   739  func newNopCompressor() grpc.Compressor {
   740  	return &nopCompressor{grpc.NewGZIPCompressor()}
   741  }
   742  
   743  func (c *nopCompressor) Type() string {
   744  	return "nop"
   745  }
   746  
   747  type nopDecompressor struct {
   748  	grpc.Decompressor
   749  }
   750  
   751  // newNopDecompressor creates a decompressor to test the case that type is not supported.
   752  func newNopDecompressor() grpc.Decompressor {
   753  	return &nopDecompressor{grpc.NewGZIPDecompressor()}
   754  }
   755  
   756  func (d *nopDecompressor) Type() string {
   757  	return "nop"
   758  }
   759  
   760  func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string) {
   761  	opts = append(opts, grpc.WithDialer(te.e.dialer), grpc.WithUserAgent(te.userAgent))
   762  
   763  	if te.clientCompression {
   764  		opts = append(opts,
   765  			grpc.WithCompressor(grpc.NewGZIPCompressor()),
   766  			grpc.WithDecompressor(grpc.NewGZIPDecompressor()),
   767  		)
   768  	}
   769  	if te.clientUseCompression {
   770  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor("gzip")))
   771  	}
   772  	if te.clientNopCompression {
   773  		opts = append(opts,
   774  			grpc.WithCompressor(newNopCompressor()),
   775  			grpc.WithDecompressor(newNopDecompressor()),
   776  		)
   777  	}
   778  	if te.unaryClientInt != nil {
   779  		opts = append(opts, grpc.WithUnaryInterceptor(te.unaryClientInt))
   780  	}
   781  	if te.streamClientInt != nil {
   782  		opts = append(opts, grpc.WithStreamInterceptor(te.streamClientInt))
   783  	}
   784  	if te.maxClientMsgSize != nil {
   785  		opts = append(opts, grpc.WithMaxMsgSize(*te.maxClientMsgSize))
   786  	}
   787  	if te.maxClientReceiveMsgSize != nil {
   788  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(*te.maxClientReceiveMsgSize)))
   789  	}
   790  	if te.maxClientSendMsgSize != nil {
   791  		opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(*te.maxClientSendMsgSize)))
   792  	}
   793  	if te.maxClientHeaderListSize != nil {
   794  		opts = append(opts, grpc.WithMaxHeaderListSize(*te.maxClientHeaderListSize))
   795  	}
   796  	switch te.e.security {
   797  	case "tls":
   798  		creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "x.test.example.com")
   799  		if err != nil {
   800  			te.t.Fatalf("Failed to load credentials: %v", err)
   801  		}
   802  		opts = append(opts, grpc.WithTransportCredentials(creds))
   803  	case "empty":
   804  		// Don't add any transport creds option.
   805  	default:
   806  		opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
   807  	}
   808  	// TODO(bar) switch balancer case "pick_first".
   809  	var scheme string
   810  	if te.resolverScheme == "" {
   811  		scheme = "passthrough:///"
   812  	} else {
   813  		scheme = te.resolverScheme + ":///"
   814  	}
   815  	if te.e.balancer != "" {
   816  		opts = append(opts, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, te.e.balancer)))
   817  	}
   818  	if te.clientInitialWindowSize > 0 {
   819  		opts = append(opts, grpc.WithInitialWindowSize(te.clientInitialWindowSize))
   820  	}
   821  	if te.clientInitialConnWindowSize > 0 {
   822  		opts = append(opts, grpc.WithInitialConnWindowSize(te.clientInitialConnWindowSize))
   823  	}
   824  	if te.perRPCCreds != nil {
   825  		opts = append(opts, grpc.WithPerRPCCredentials(te.perRPCCreds))
   826  	}
   827  	if te.srvAddr == "" {
   828  		te.srvAddr = "client.side.only.test"
   829  	}
   830  	opts = append(opts, te.customDialOptions...)
   831  	return opts, scheme
   832  }
   833  
   834  func (te *test) clientConnWithConnControl() (*grpc.ClientConn, *dialerWrapper) {
   835  	if te.cc != nil {
   836  		return te.cc, nil
   837  	}
   838  	opts, scheme := te.configDial()
   839  	dw := &dialerWrapper{}
   840  	// overwrite the dialer before
   841  	opts = append(opts, grpc.WithDialer(dw.dialer))
   842  	var err error
   843  	te.cc, err = grpc.NewClient(scheme+te.srvAddr, opts...)
   844  	if err != nil {
   845  		te.t.Fatalf("NewClient(%q) = %v", scheme+te.srvAddr, err)
   846  	}
   847  	return te.cc, dw
   848  }
   849  
   850  func (te *test) clientConn(opts ...grpc.DialOption) *grpc.ClientConn {
   851  	if te.cc != nil {
   852  		return te.cc
   853  	}
   854  	var scheme string
   855  	opts, scheme = te.configDial(opts...)
   856  	var err error
   857  	te.cc, err = grpc.NewClient(scheme+te.srvAddr, opts...)
   858  	if err != nil {
   859  		te.t.Fatalf("grpc.NewClient(%q) failed: %v", scheme+te.srvAddr, err)
   860  	}
   861  	te.cc.Connect()
   862  	return te.cc
   863  }
   864  
   865  func (te *test) declareLogNoise(phrases ...string) {
   866  	te.restoreLogs = declareLogNoise(te.t, phrases...)
   867  }
   868  
   869  func (te *test) withServerTester(fn func(st *serverTester)) {
   870  	c, err := te.e.dialer(te.srvAddr, 10*time.Second)
   871  	if err != nil {
   872  		te.t.Fatal(err)
   873  	}
   874  	defer c.Close()
   875  	if te.e.security == "tls" {
   876  		c = tls.Client(c, &tls.Config{
   877  			InsecureSkipVerify: true,
   878  			NextProtos:         []string{http2.NextProtoTLS},
   879  		})
   880  	}
   881  	st := newServerTesterFromConn(te.t, c)
   882  	st.greet()
   883  	fn(st)
   884  }
   885  
   886  type lazyConn struct {
   887  	net.Conn
   888  	beLazy int32
   889  }
   890  
   891  // possible conn closed errors.
   892  const possibleConnResetMsg = "connection reset by peer"
   893  const possibleEOFMsg = "error reading from server: EOF"
   894  
   895  // isConnClosedErr checks the error msg for possible conn closed messages. There
   896  // is a raceyness in the timing of when TCP packets are sent from client to
   897  // server, and when we tell the server to stop, so we need to check for both of
   898  // these possible error messages:
   899  //  1. If the call to ss.S.Stop() causes the server's sockets to close while
   900  //     there's still in-fight data from the client on the TCP connection, then
   901  //     the kernel can send an RST back to the client (also see
   902  //     https://stackoverflow.com/questions/33053507/econnreset-in-send-linux-c).
   903  //     Note that while this condition is expected to be rare due to the
   904  //     test httpServer start synchronization, in theory it should be possible,
   905  //     e.g. if the client sends a BDP ping at the right time.
   906  //  2. If, for example, the call to ss.S.Stop() happens after the RPC headers
   907  //     have been received at the server, then the TCP connection can shutdown
   908  //     gracefully when the server's socket closes.
   909  //  3. If there is an actual io.EOF received because the client stopped the stream.
   910  func isConnClosedErr(err error) bool {
   911  	errContainsConnResetMsg := strings.Contains(err.Error(), possibleConnResetMsg)
   912  	errContainsEOFMsg := strings.Contains(err.Error(), possibleEOFMsg)
   913  
   914  	return errContainsConnResetMsg || errContainsEOFMsg || err == io.EOF
   915  }
   916  
   917  func (l *lazyConn) Write(b []byte) (int, error) {
   918  	if atomic.LoadInt32(&(l.beLazy)) == 1 {
   919  		time.Sleep(time.Second)
   920  	}
   921  	return l.Conn.Write(b)
   922  }
   923  
   924  func (s) TestContextDeadlineNotIgnored(t *testing.T) {
   925  	e := noBalancerEnv
   926  	var lc *lazyConn
   927  	e.customDialer = func(network, addr string, timeout time.Duration) (net.Conn, error) {
   928  		conn, err := net.DialTimeout(network, addr, timeout)
   929  		if err != nil {
   930  			return nil, err
   931  		}
   932  		lc = &lazyConn{Conn: conn}
   933  		return lc, nil
   934  	}
   935  
   936  	te := newTest(t, e)
   937  	te.startServer(&testServer{security: e.security})
   938  	defer te.tearDown()
   939  
   940  	cc := te.clientConn()
   941  	tc := testgrpc.NewTestServiceClient(cc)
   942  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   943  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
   944  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
   945  	}
   946  	cancel()
   947  	atomic.StoreInt32(&(lc.beLazy), 1)
   948  	ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout)
   949  	defer cancel()
   950  	t1 := time.Now()
   951  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
   952  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, context.DeadlineExceeded", err)
   953  	}
   954  	if time.Since(t1) > 2*time.Second {
   955  		t.Fatalf("TestService/EmptyCall(_, _) ran over the deadline")
   956  	}
   957  }
   958  
   959  func (s) TestTimeoutOnDeadServer(t *testing.T) {
   960  	for _, e := range listTestEnv() {
   961  		testTimeoutOnDeadServer(t, e)
   962  	}
   963  }
   964  
   965  func testTimeoutOnDeadServer(t *testing.T, e env) {
   966  	te := newTest(t, e)
   967  	te.userAgent = testAppUA
   968  	te.startServer(&testServer{security: e.security})
   969  	defer te.tearDown()
   970  
   971  	cc := te.clientConn()
   972  	tc := testgrpc.NewTestServiceClient(cc)
   973  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   974  	defer cancel()
   975  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
   976  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
   977  	}
   978  	// Wait for the client to report READY, stop the server, then wait for the
   979  	// client to notice the connection is gone.
   980  	testutils.AwaitState(ctx, t, cc, connectivity.Ready)
   981  	te.srv.Stop()
   982  	testutils.AwaitNotState(ctx, t, cc, connectivity.Ready)
   983  	ctx, cancel = context.WithTimeout(ctx, defaultTestShortTimeout)
   984  	defer cancel()
   985  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
   986  		t.Fatalf("TestService/EmptyCall(%v, _) = _, %v, want _, error code: %s", ctx, err, codes.DeadlineExceeded)
   987  	}
   988  	awaitNewConnLogOutput()
   989  }
   990  
   991  func (s) TestServerGracefulStopIdempotent(t *testing.T) {
   992  	for _, e := range listTestEnv() {
   993  		if e.name == "handler-tls" {
   994  			continue
   995  		}
   996  		testServerGracefulStopIdempotent(t, e)
   997  	}
   998  }
   999  
  1000  func testServerGracefulStopIdempotent(t *testing.T, e env) {
  1001  	te := newTest(t, e)
  1002  	te.userAgent = testAppUA
  1003  	te.startServer(&testServer{security: e.security})
  1004  	defer te.tearDown()
  1005  
  1006  	for i := 0; i < 3; i++ {
  1007  		te.srv.GracefulStop()
  1008  	}
  1009  }
  1010  
  1011  func (s) TestDetailedConnectionCloseErrorPropagatesToRPCError(t *testing.T) {
  1012  	rpcStartedOnServer := make(chan struct{})
  1013  	rpcDoneOnClient := make(chan struct{})
  1014  	defer close(rpcDoneOnClient)
  1015  	ss := &stubserver.StubServer{
  1016  		FullDuplexCallF: func(testgrpc.TestService_FullDuplexCallServer) error {
  1017  			close(rpcStartedOnServer)
  1018  			<-rpcDoneOnClient
  1019  			return status.Error(codes.Internal, "arbitrary status")
  1020  		},
  1021  	}
  1022  	if err := ss.Start(nil); err != nil {
  1023  		t.Fatalf("Error starting endpoint server: %v", err)
  1024  	}
  1025  	defer ss.Stop()
  1026  
  1027  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1028  	defer cancel()
  1029  
  1030  	// Start an RPC. Then, while the RPC is still being accepted or handled at
  1031  	// the server, abruptly stop the server, killing the connection. The RPC
  1032  	// error message should include details about the specific connection error
  1033  	// that was encountered.
  1034  	stream, err := ss.Client.FullDuplexCall(ctx)
  1035  	if err != nil {
  1036  		t.Fatalf("%v.FullDuplexCall = _, %v, want _, <nil>", ss.Client, err)
  1037  	}
  1038  	// Block until the RPC has been started on the server. This ensures that the
  1039  	// ClientConn will find a healthy connection for the RPC to go out on
  1040  	// initially, and that the TCP connection will shut down strictly after the
  1041  	// RPC has been started on it.
  1042  	<-rpcStartedOnServer
  1043  	ss.S.Stop()
  1044  	// The precise behavior of this test is subject to raceyness around the
  1045  	// timing of when TCP packets are sent from client to server, and when we
  1046  	// tell the server to stop, so we need to account for both possible error
  1047  	// messages.
  1048  	if _, err := stream.Recv(); err == io.EOF || !isConnClosedErr(err) {
  1049  		t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q OR %q", stream, err, possibleConnResetMsg, possibleEOFMsg)
  1050  	}
  1051  }
  1052  
  1053  func (s) TestFailFast(t *testing.T) {
  1054  	for _, e := range listTestEnv() {
  1055  		testFailFast(t, e)
  1056  	}
  1057  }
  1058  
  1059  func testFailFast(t *testing.T, e env) {
  1060  	te := newTest(t, e)
  1061  	te.userAgent = testAppUA
  1062  	te.startServer(&testServer{security: e.security})
  1063  	defer te.tearDown()
  1064  
  1065  	cc := te.clientConn()
  1066  	tc := testgrpc.NewTestServiceClient(cc)
  1067  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1068  	defer cancel()
  1069  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  1070  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  1071  	}
  1072  	// Stop the server and tear down all the existing connections.
  1073  	te.srv.Stop()
  1074  	// Loop until the server teardown is propagated to the client.
  1075  	for {
  1076  		if err := ctx.Err(); err != nil {
  1077  			t.Fatalf("EmptyCall did not return UNAVAILABLE before timeout")
  1078  		}
  1079  		_, err := tc.EmptyCall(ctx, &testpb.Empty{})
  1080  		if status.Code(err) == codes.Unavailable {
  1081  			break
  1082  		}
  1083  		t.Logf("%v.EmptyCall(_, _) = _, %v", tc, err)
  1084  		time.Sleep(10 * time.Millisecond)
  1085  	}
  1086  	// The client keeps reconnecting and ongoing fail-fast RPCs should fail with code.Unavailable.
  1087  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
  1088  		t.Fatalf("TestService/EmptyCall(_, _, _) = _, %v, want _, error code: %s", err, codes.Unavailable)
  1089  	}
  1090  	if _, err := tc.StreamingInputCall(ctx); status.Code(err) != codes.Unavailable {
  1091  		t.Fatalf("TestService/StreamingInputCall(_) = _, %v, want _, error code: %s", err, codes.Unavailable)
  1092  	}
  1093  
  1094  	awaitNewConnLogOutput()
  1095  }
  1096  
  1097  func testServiceConfigSetup(t *testing.T, e env) *test {
  1098  	te := newTest(t, e)
  1099  	te.userAgent = testAppUA
  1100  	te.declareLogNoise(
  1101  		"Failed to dial : context canceled; please retry.",
  1102  	)
  1103  	return te
  1104  }
  1105  
  1106  func newInt(b int) (a *int) {
  1107  	return &b
  1108  }
  1109  
  1110  func (s) TestGetMethodConfig(t *testing.T) {
  1111  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1112  	defer te.tearDown()
  1113  	r := manual.NewBuilderWithScheme("whatever")
  1114  
  1115  	te.resolverScheme = r.Scheme()
  1116  	cc := te.clientConn(grpc.WithResolvers(r))
  1117  	addrs := []resolver.Address{{Addr: te.srvAddr}}
  1118  	r.UpdateState(resolver.State{
  1119  		Addresses: addrs,
  1120  		ServiceConfig: parseServiceConfig(t, r, `{
  1121      "methodConfig": [
  1122          {
  1123              "name": [
  1124                  {
  1125                      "service": "grpc.testing.TestService",
  1126                      "method": "EmptyCall"
  1127                  }
  1128              ],
  1129              "waitForReady": true,
  1130              "timeout": ".001s"
  1131          },
  1132          {
  1133              "name": [
  1134                  {
  1135                      "service": "grpc.testing.TestService"
  1136                  }
  1137              ],
  1138              "waitForReady": false
  1139          }
  1140      ]
  1141  }`)})
  1142  
  1143  	tc := testgrpc.NewTestServiceClient(cc)
  1144  
  1145  	// Make sure service config has been processed by grpc.
  1146  	for {
  1147  		if cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil {
  1148  			break
  1149  		}
  1150  		time.Sleep(time.Millisecond)
  1151  	}
  1152  
  1153  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1154  	defer cancel()
  1155  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  1156  	var err error
  1157  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  1158  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1159  	}
  1160  
  1161  	r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseServiceConfig(t, r, `{
  1162      "methodConfig": [
  1163          {
  1164              "name": [
  1165                  {
  1166                      "service": "grpc.testing.TestService",
  1167                      "method": "UnaryCall"
  1168                  }
  1169              ],
  1170              "waitForReady": true,
  1171              "timeout": ".001s"
  1172          },
  1173          {
  1174              "name": [
  1175                  {
  1176                      "service": "grpc.testing.TestService"
  1177                  }
  1178              ],
  1179              "waitForReady": false
  1180          }
  1181      ]
  1182  }`)})
  1183  
  1184  	// Make sure service config has been processed by grpc.
  1185  	for {
  1186  		if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && !*mc.WaitForReady {
  1187  			break
  1188  		}
  1189  		time.Sleep(time.Millisecond)
  1190  	}
  1191  	// The following RPCs are expected to become fail-fast.
  1192  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable {
  1193  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable)
  1194  	}
  1195  }
  1196  
  1197  func (s) TestServiceConfigWaitForReady(t *testing.T) {
  1198  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1199  	defer te.tearDown()
  1200  	r := manual.NewBuilderWithScheme("whatever")
  1201  
  1202  	// Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds.
  1203  	te.resolverScheme = r.Scheme()
  1204  	cc := te.clientConn(grpc.WithResolvers(r))
  1205  	addrs := []resolver.Address{{Addr: te.srvAddr}}
  1206  	r.UpdateState(resolver.State{
  1207  		Addresses: addrs,
  1208  		ServiceConfig: parseServiceConfig(t, r, `{
  1209      "methodConfig": [
  1210          {
  1211              "name": [
  1212                  {
  1213                      "service": "grpc.testing.TestService",
  1214                      "method": "EmptyCall"
  1215                  },
  1216                  {
  1217                      "service": "grpc.testing.TestService",
  1218                      "method": "FullDuplexCall"
  1219                  }
  1220              ],
  1221              "waitForReady": false,
  1222              "timeout": ".001s"
  1223          }
  1224      ]
  1225  }`)})
  1226  
  1227  	tc := testgrpc.NewTestServiceClient(cc)
  1228  
  1229  	// Make sure service config has been processed by grpc.
  1230  	for {
  1231  		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").WaitForReady != nil {
  1232  			break
  1233  		}
  1234  		time.Sleep(time.Millisecond)
  1235  	}
  1236  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1237  	defer cancel()
  1238  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  1239  	var err error
  1240  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1241  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1242  	}
  1243  	if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1244  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1245  	}
  1246  
  1247  	// Generate a service config update.
  1248  	// Case2:Client API set failfast to be false, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds.
  1249  	r.UpdateState(resolver.State{
  1250  		Addresses: addrs,
  1251  		ServiceConfig: parseServiceConfig(t, r, `{
  1252      "methodConfig": [
  1253          {
  1254              "name": [
  1255                  {
  1256                      "service": "grpc.testing.TestService",
  1257                      "method": "EmptyCall"
  1258                  },
  1259                  {
  1260                      "service": "grpc.testing.TestService",
  1261                      "method": "FullDuplexCall"
  1262                  }
  1263              ],
  1264              "waitForReady": true,
  1265              "timeout": ".001s"
  1266          }
  1267      ]
  1268  }`)})
  1269  
  1270  	// Wait for the new service config to take effect.
  1271  	for {
  1272  		if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && *mc.WaitForReady {
  1273  			break
  1274  		}
  1275  		time.Sleep(time.Millisecond)
  1276  	}
  1277  	// The following RPCs are expected to become non-fail-fast ones with 1ms deadline.
  1278  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  1279  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1280  	}
  1281  	if _, err := tc.FullDuplexCall(ctx); status.Code(err) != codes.DeadlineExceeded {
  1282  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1283  	}
  1284  }
  1285  
  1286  func (s) TestServiceConfigTimeout(t *testing.T) {
  1287  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1288  	defer te.tearDown()
  1289  	r := manual.NewBuilderWithScheme("whatever")
  1290  
  1291  	// Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
  1292  	te.resolverScheme = r.Scheme()
  1293  	cc := te.clientConn(grpc.WithResolvers(r))
  1294  	addrs := []resolver.Address{{Addr: te.srvAddr}}
  1295  	r.UpdateState(resolver.State{
  1296  		Addresses: addrs,
  1297  		ServiceConfig: parseServiceConfig(t, r, `{
  1298      "methodConfig": [
  1299          {
  1300              "name": [
  1301                  {
  1302                      "service": "grpc.testing.TestService",
  1303                      "method": "EmptyCall"
  1304                  },
  1305                  {
  1306                      "service": "grpc.testing.TestService",
  1307                      "method": "FullDuplexCall"
  1308                  }
  1309              ],
  1310              "waitForReady": true,
  1311              "timeout": "3600s"
  1312          }
  1313      ]
  1314  }`)})
  1315  
  1316  	tc := testgrpc.NewTestServiceClient(cc)
  1317  
  1318  	// Make sure service config has been processed by grpc.
  1319  	for {
  1320  		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil {
  1321  			break
  1322  		}
  1323  		time.Sleep(time.Millisecond)
  1324  	}
  1325  
  1326  	// The following RPCs are expected to become non-fail-fast ones with 1ns deadline.
  1327  	var err error
  1328  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  1329  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1330  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1331  	}
  1332  	cancel()
  1333  
  1334  	ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout)
  1335  	if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1336  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1337  	}
  1338  	cancel()
  1339  
  1340  	// Generate a service config update.
  1341  	// Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds.
  1342  	r.UpdateState(resolver.State{
  1343  		Addresses: addrs,
  1344  		ServiceConfig: parseServiceConfig(t, r, `{
  1345      "methodConfig": [
  1346          {
  1347              "name": [
  1348                  {
  1349                      "service": "grpc.testing.TestService",
  1350                      "method": "EmptyCall"
  1351                  },
  1352                  {
  1353                      "service": "grpc.testing.TestService",
  1354                      "method": "FullDuplexCall"
  1355                  }
  1356              ],
  1357              "waitForReady": true,
  1358              "timeout": ".000000001s"
  1359          }
  1360      ]
  1361  }`)})
  1362  
  1363  	// Wait for the new service config to take effect.
  1364  	for {
  1365  		if mc := cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall"); mc.Timeout != nil && *mc.Timeout == time.Nanosecond {
  1366  			break
  1367  		}
  1368  		time.Sleep(time.Millisecond)
  1369  	}
  1370  
  1371  	ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
  1372  	defer cancel()
  1373  	if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1374  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  1375  	}
  1376  
  1377  	if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  1378  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded)
  1379  	}
  1380  }
  1381  
  1382  func (s) TestServiceConfigMaxMsgSize(t *testing.T) {
  1383  	e := tcpClearRREnv
  1384  	r := manual.NewBuilderWithScheme("whatever")
  1385  
  1386  	// Setting up values and objects shared across all test cases.
  1387  	const smallSize = 1
  1388  	const largeSize = 1024
  1389  	const extraLargeSize = 2048
  1390  
  1391  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  1392  	if err != nil {
  1393  		t.Fatal(err)
  1394  	}
  1395  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  1396  	if err != nil {
  1397  		t.Fatal(err)
  1398  	}
  1399  	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
  1400  	if err != nil {
  1401  		t.Fatal(err)
  1402  	}
  1403  
  1404  	// Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  1405  	te1 := testServiceConfigSetup(t, e)
  1406  	defer te1.tearDown()
  1407  
  1408  	te1.resolverScheme = r.Scheme()
  1409  	te1.startServer(&testServer{security: e.security})
  1410  	cc1 := te1.clientConn(grpc.WithResolvers(r))
  1411  
  1412  	addrs := []resolver.Address{{Addr: te1.srvAddr}}
  1413  	sc := parseServiceConfig(t, r, `{
  1414      "methodConfig": [
  1415          {
  1416              "name": [
  1417                  {
  1418                      "service": "grpc.testing.TestService",
  1419                      "method": "UnaryCall"
  1420                  },
  1421                  {
  1422                      "service": "grpc.testing.TestService",
  1423                      "method": "FullDuplexCall"
  1424                  }
  1425              ],
  1426              "maxRequestMessageBytes": 2048,
  1427              "maxResponseMessageBytes": 2048
  1428          }
  1429      ]
  1430  }`)
  1431  	r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: sc})
  1432  	tc := testgrpc.NewTestServiceClient(cc1)
  1433  
  1434  	req := &testpb.SimpleRequest{
  1435  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  1436  		ResponseSize: int32(extraLargeSize),
  1437  		Payload:      smallPayload,
  1438  	}
  1439  
  1440  	for {
  1441  		if cc1.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
  1442  			break
  1443  		}
  1444  		time.Sleep(time.Millisecond)
  1445  	}
  1446  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1447  	defer cancel()
  1448  	// Test for unary RPC recv.
  1449  	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted {
  1450  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1451  	}
  1452  
  1453  	// Test for unary RPC send.
  1454  	req.Payload = extraLargePayload
  1455  	req.ResponseSize = int32(smallSize)
  1456  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1457  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1458  	}
  1459  
  1460  	// Test for streaming RPC recv.
  1461  	respParam := []*testpb.ResponseParameters{
  1462  		{
  1463  			Size: int32(extraLargeSize),
  1464  		},
  1465  	}
  1466  	sreq := &testpb.StreamingOutputCallRequest{
  1467  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1468  		ResponseParameters: respParam,
  1469  		Payload:            smallPayload,
  1470  	}
  1471  	stream, err := tc.FullDuplexCall(te1.ctx)
  1472  	if err != nil {
  1473  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1474  	}
  1475  	if err = stream.Send(sreq); err != nil {
  1476  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1477  	}
  1478  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  1479  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  1480  	}
  1481  
  1482  	// Test for streaming RPC send.
  1483  	respParam[0].Size = int32(smallSize)
  1484  	sreq.Payload = extraLargePayload
  1485  	stream, err = tc.FullDuplexCall(te1.ctx)
  1486  	if err != nil {
  1487  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1488  	}
  1489  	if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  1490  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  1491  	}
  1492  
  1493  	// Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  1494  	te2 := testServiceConfigSetup(t, e)
  1495  	te2.resolverScheme = r.Scheme()
  1496  	te2.maxClientReceiveMsgSize = newInt(1024)
  1497  	te2.maxClientSendMsgSize = newInt(1024)
  1498  
  1499  	te2.startServer(&testServer{security: e.security})
  1500  	defer te2.tearDown()
  1501  	cc2 := te2.clientConn(grpc.WithResolvers(r))
  1502  	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te2.srvAddr}}, ServiceConfig: sc})
  1503  	tc = testgrpc.NewTestServiceClient(cc2)
  1504  
  1505  	for {
  1506  		if cc2.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
  1507  			break
  1508  		}
  1509  		time.Sleep(time.Millisecond)
  1510  	}
  1511  
  1512  	// Test for unary RPC recv.
  1513  	req.Payload = smallPayload
  1514  	req.ResponseSize = int32(largeSize)
  1515  
  1516  	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted {
  1517  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1518  	}
  1519  
  1520  	// Test for unary RPC send.
  1521  	req.Payload = largePayload
  1522  	req.ResponseSize = int32(smallSize)
  1523  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1524  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1525  	}
  1526  
  1527  	// Test for streaming RPC recv.
  1528  	stream, err = tc.FullDuplexCall(te2.ctx)
  1529  	respParam[0].Size = int32(largeSize)
  1530  	sreq.Payload = smallPayload
  1531  	if err != nil {
  1532  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1533  	}
  1534  	if err = stream.Send(sreq); err != nil {
  1535  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1536  	}
  1537  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  1538  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  1539  	}
  1540  
  1541  	// Test for streaming RPC send.
  1542  	respParam[0].Size = int32(smallSize)
  1543  	sreq.Payload = largePayload
  1544  	stream, err = tc.FullDuplexCall(te2.ctx)
  1545  	if err != nil {
  1546  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1547  	}
  1548  	if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  1549  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  1550  	}
  1551  
  1552  	// Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv).
  1553  	te3 := testServiceConfigSetup(t, e)
  1554  	te3.resolverScheme = r.Scheme()
  1555  	te3.maxClientReceiveMsgSize = newInt(4096)
  1556  	te3.maxClientSendMsgSize = newInt(4096)
  1557  
  1558  	te3.startServer(&testServer{security: e.security})
  1559  	defer te3.tearDown()
  1560  
  1561  	cc3 := te3.clientConn(grpc.WithResolvers(r))
  1562  	r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te3.srvAddr}}, ServiceConfig: sc})
  1563  	tc = testgrpc.NewTestServiceClient(cc3)
  1564  
  1565  	for {
  1566  		if cc3.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil {
  1567  			break
  1568  		}
  1569  		time.Sleep(time.Millisecond)
  1570  	}
  1571  
  1572  	// Test for unary RPC recv.
  1573  	req.Payload = smallPayload
  1574  	req.ResponseSize = int32(largeSize)
  1575  
  1576  	if _, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); err != nil {
  1577  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
  1578  	}
  1579  
  1580  	req.ResponseSize = int32(extraLargeSize)
  1581  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1582  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1583  	}
  1584  
  1585  	// Test for unary RPC send.
  1586  	req.Payload = largePayload
  1587  	req.ResponseSize = int32(smallSize)
  1588  	if _, err := tc.UnaryCall(ctx, req); err != nil {
  1589  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want <nil>", err)
  1590  	}
  1591  
  1592  	req.Payload = extraLargePayload
  1593  	if _, err = tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1594  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1595  	}
  1596  
  1597  	// Test for streaming RPC recv.
  1598  	stream, err = tc.FullDuplexCall(te3.ctx)
  1599  	if err != nil {
  1600  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1601  	}
  1602  	respParam[0].Size = int32(largeSize)
  1603  	sreq.Payload = smallPayload
  1604  
  1605  	if err = stream.Send(sreq); err != nil {
  1606  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1607  	}
  1608  	if _, err = stream.Recv(); err != nil {
  1609  		t.Fatalf("%v.Recv() = _, %v, want <nil>", stream, err)
  1610  	}
  1611  
  1612  	respParam[0].Size = int32(extraLargeSize)
  1613  
  1614  	if err = stream.Send(sreq); err != nil {
  1615  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1616  	}
  1617  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  1618  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  1619  	}
  1620  
  1621  	// Test for streaming RPC send.
  1622  	respParam[0].Size = int32(smallSize)
  1623  	sreq.Payload = largePayload
  1624  	stream, err = tc.FullDuplexCall(te3.ctx)
  1625  	if err != nil {
  1626  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1627  	}
  1628  	if err := stream.Send(sreq); err != nil {
  1629  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1630  	}
  1631  	sreq.Payload = extraLargePayload
  1632  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  1633  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  1634  	}
  1635  }
  1636  
  1637  // Reading from a streaming RPC may fail with context canceled if timeout was
  1638  // set by service config (https://github.com/grpc/grpc-go/issues/1818). This
  1639  // test makes sure read from streaming RPC doesn't fail in this case.
  1640  func (s) TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) {
  1641  	te := testServiceConfigSetup(t, tcpClearRREnv)
  1642  	te.startServer(&testServer{security: tcpClearRREnv.security})
  1643  	defer te.tearDown()
  1644  	r := manual.NewBuilderWithScheme("whatever")
  1645  
  1646  	te.resolverScheme = r.Scheme()
  1647  	cc := te.clientConn(grpc.WithResolvers(r))
  1648  	tc := testgrpc.NewTestServiceClient(cc)
  1649  
  1650  	r.UpdateState(resolver.State{
  1651  		Addresses: []resolver.Address{{Addr: te.srvAddr}},
  1652  		ServiceConfig: parseServiceConfig(t, r, `{
  1653  	    "methodConfig": [
  1654  	        {
  1655  	            "name": [
  1656  	                {
  1657  	                    "service": "grpc.testing.TestService",
  1658  	                    "method": "FullDuplexCall"
  1659  	                }
  1660  	            ],
  1661  	            "waitForReady": true,
  1662  	            "timeout": "10s"
  1663  	        }
  1664  	    ]
  1665  	}`)})
  1666  	// Make sure service config has been processed by grpc.
  1667  	for {
  1668  		if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil {
  1669  			break
  1670  		}
  1671  		time.Sleep(time.Millisecond)
  1672  	}
  1673  
  1674  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1675  	defer cancel()
  1676  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  1677  	if err != nil {
  1678  		t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want <nil>", err)
  1679  	}
  1680  
  1681  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 0)
  1682  	if err != nil {
  1683  		t.Fatalf("failed to newPayload: %v", err)
  1684  	}
  1685  	req := &testpb.StreamingOutputCallRequest{
  1686  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1687  		ResponseParameters: []*testpb.ResponseParameters{{Size: 0}},
  1688  		Payload:            payload,
  1689  	}
  1690  	if err := stream.Send(req); err != nil {
  1691  		t.Fatalf("stream.Send(%v) = %v, want <nil>", req, err)
  1692  	}
  1693  	stream.CloseSend()
  1694  	time.Sleep(time.Second)
  1695  	// Sleep 1 second before recv to make sure the final status is received
  1696  	// before the recv.
  1697  	if _, err := stream.Recv(); err != nil {
  1698  		t.Fatalf("stream.Recv = _, %v, want _, <nil>", err)
  1699  	}
  1700  	// Keep reading to drain the stream.
  1701  	for {
  1702  		if _, err := stream.Recv(); err != nil {
  1703  			break
  1704  		}
  1705  	}
  1706  }
  1707  
  1708  func (s) TestPreloaderClientSend(t *testing.T) {
  1709  	for _, e := range listTestEnv() {
  1710  		testPreloaderClientSend(t, e)
  1711  	}
  1712  }
  1713  
  1714  func testPreloaderClientSend(t *testing.T, e env) {
  1715  	te := newTest(t, e)
  1716  	te.userAgent = testAppUA
  1717  	te.declareLogNoise(
  1718  		"Failed to dial : context canceled; please retry.",
  1719  	)
  1720  	te.startServer(&testServer{security: e.security})
  1721  
  1722  	defer te.tearDown()
  1723  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  1724  
  1725  	// Test for streaming RPC recv.
  1726  	// Set context for send with proper RPC Information
  1727  	stream, err := tc.FullDuplexCall(te.ctx, grpc.UseCompressor("gzip"))
  1728  	if err != nil {
  1729  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1730  	}
  1731  	var index int
  1732  	for index < len(reqSizes) {
  1733  		respParam := []*testpb.ResponseParameters{
  1734  			{
  1735  				Size: int32(respSizes[index]),
  1736  			},
  1737  		}
  1738  
  1739  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
  1740  		if err != nil {
  1741  			t.Fatal(err)
  1742  		}
  1743  
  1744  		req := &testpb.StreamingOutputCallRequest{
  1745  			ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1746  			ResponseParameters: respParam,
  1747  			Payload:            payload,
  1748  		}
  1749  		preparedMsg := &grpc.PreparedMsg{}
  1750  		err = preparedMsg.Encode(stream, req)
  1751  		if err != nil {
  1752  			t.Fatalf("PrepareMsg failed for size %d : %v", reqSizes[index], err)
  1753  		}
  1754  		if err := stream.SendMsg(preparedMsg); err != nil {
  1755  			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  1756  		}
  1757  		reply, err := stream.Recv()
  1758  		if err != nil {
  1759  			t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  1760  		}
  1761  		pt := reply.GetPayload().GetType()
  1762  		if pt != testpb.PayloadType_COMPRESSABLE {
  1763  			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
  1764  		}
  1765  		size := len(reply.GetPayload().GetBody())
  1766  		if size != int(respSizes[index]) {
  1767  			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
  1768  		}
  1769  		index++
  1770  	}
  1771  	if err := stream.CloseSend(); err != nil {
  1772  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  1773  	}
  1774  	if _, err := stream.Recv(); err != io.EOF {
  1775  		t.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
  1776  	}
  1777  }
  1778  
  1779  func (s) TestPreloaderSenderSend(t *testing.T) {
  1780  	ss := &stubserver.StubServer{
  1781  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  1782  			for i := 0; i < 10; i++ {
  1783  				preparedMsg := &grpc.PreparedMsg{}
  1784  				err := preparedMsg.Encode(stream, &testpb.StreamingOutputCallResponse{
  1785  					Payload: &testpb.Payload{
  1786  						Body: []byte{'0' + uint8(i)},
  1787  					},
  1788  				})
  1789  				if err != nil {
  1790  					return err
  1791  				}
  1792  				stream.SendMsg(preparedMsg)
  1793  			}
  1794  			return nil
  1795  		},
  1796  	}
  1797  	if err := ss.Start(nil); err != nil {
  1798  		t.Fatalf("Error starting endpoint server: %v", err)
  1799  	}
  1800  	defer ss.Stop()
  1801  
  1802  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1803  	defer cancel()
  1804  
  1805  	stream, err := ss.Client.FullDuplexCall(ctx)
  1806  	if err != nil {
  1807  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  1808  	}
  1809  
  1810  	var ngot int
  1811  	var buf bytes.Buffer
  1812  	for {
  1813  		reply, err := stream.Recv()
  1814  		if err == io.EOF {
  1815  			break
  1816  		}
  1817  		if err != nil {
  1818  			t.Fatal(err)
  1819  		}
  1820  		ngot++
  1821  		if buf.Len() > 0 {
  1822  			buf.WriteByte(',')
  1823  		}
  1824  		buf.Write(reply.GetPayload().GetBody())
  1825  	}
  1826  	if want := 10; ngot != want {
  1827  		t.Errorf("Got %d replies, want %d", ngot, want)
  1828  	}
  1829  	if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want {
  1830  		t.Errorf("Got replies %q; want %q", got, want)
  1831  	}
  1832  }
  1833  
  1834  func (s) TestMaxMsgSizeClientDefault(t *testing.T) {
  1835  	for _, e := range listTestEnv() {
  1836  		testMaxMsgSizeClientDefault(t, e)
  1837  	}
  1838  }
  1839  
  1840  func testMaxMsgSizeClientDefault(t *testing.T, e env) {
  1841  	te := newTest(t, e)
  1842  	te.userAgent = testAppUA
  1843  	te.declareLogNoise(
  1844  		"Failed to dial : context canceled; please retry.",
  1845  	)
  1846  	te.startServer(&testServer{security: e.security})
  1847  
  1848  	defer te.tearDown()
  1849  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  1850  
  1851  	const smallSize = 1
  1852  	const largeSize = 4 * 1024 * 1024
  1853  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  1854  	if err != nil {
  1855  		t.Fatal(err)
  1856  	}
  1857  	req := &testpb.SimpleRequest{
  1858  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  1859  		ResponseSize: int32(largeSize),
  1860  		Payload:      smallPayload,
  1861  	}
  1862  
  1863  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1864  	defer cancel()
  1865  	// Test for unary RPC recv.
  1866  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1867  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1868  	}
  1869  
  1870  	respParam := []*testpb.ResponseParameters{
  1871  		{
  1872  			Size: int32(largeSize),
  1873  		},
  1874  	}
  1875  	sreq := &testpb.StreamingOutputCallRequest{
  1876  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1877  		ResponseParameters: respParam,
  1878  		Payload:            smallPayload,
  1879  	}
  1880  
  1881  	// Test for streaming RPC recv.
  1882  	stream, err := tc.FullDuplexCall(te.ctx)
  1883  	if err != nil {
  1884  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1885  	}
  1886  	if err := stream.Send(sreq); err != nil {
  1887  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1888  	}
  1889  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  1890  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  1891  	}
  1892  }
  1893  
  1894  func (s) TestMaxMsgSizeClientAPI(t *testing.T) {
  1895  	for _, e := range listTestEnv() {
  1896  		testMaxMsgSizeClientAPI(t, e)
  1897  	}
  1898  }
  1899  
  1900  func testMaxMsgSizeClientAPI(t *testing.T, e env) {
  1901  	te := newTest(t, e)
  1902  	te.userAgent = testAppUA
  1903  	// To avoid error on server side.
  1904  	te.maxServerSendMsgSize = newInt(5 * 1024 * 1024)
  1905  	te.maxClientReceiveMsgSize = newInt(1024)
  1906  	te.maxClientSendMsgSize = newInt(1024)
  1907  	te.declareLogNoise(
  1908  		"Failed to dial : context canceled; please retry.",
  1909  	)
  1910  	te.startServer(&testServer{security: e.security})
  1911  
  1912  	defer te.tearDown()
  1913  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  1914  
  1915  	const smallSize = 1
  1916  	const largeSize = 1024
  1917  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  1918  	if err != nil {
  1919  		t.Fatal(err)
  1920  	}
  1921  
  1922  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  1923  	if err != nil {
  1924  		t.Fatal(err)
  1925  	}
  1926  	req := &testpb.SimpleRequest{
  1927  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  1928  		ResponseSize: int32(largeSize),
  1929  		Payload:      smallPayload,
  1930  	}
  1931  
  1932  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1933  	defer cancel()
  1934  	// Test for unary RPC recv.
  1935  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1936  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1937  	}
  1938  
  1939  	// Test for unary RPC send.
  1940  	req.Payload = largePayload
  1941  	req.ResponseSize = int32(smallSize)
  1942  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  1943  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  1944  	}
  1945  
  1946  	respParam := []*testpb.ResponseParameters{
  1947  		{
  1948  			Size: int32(largeSize),
  1949  		},
  1950  	}
  1951  	sreq := &testpb.StreamingOutputCallRequest{
  1952  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  1953  		ResponseParameters: respParam,
  1954  		Payload:            smallPayload,
  1955  	}
  1956  
  1957  	// Test for streaming RPC recv.
  1958  	stream, err := tc.FullDuplexCall(te.ctx)
  1959  	if err != nil {
  1960  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1961  	}
  1962  	if err := stream.Send(sreq); err != nil {
  1963  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  1964  	}
  1965  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  1966  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  1967  	}
  1968  
  1969  	// Test for streaming RPC send.
  1970  	respParam[0].Size = int32(smallSize)
  1971  	sreq.Payload = largePayload
  1972  	stream, err = tc.FullDuplexCall(te.ctx)
  1973  	if err != nil {
  1974  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  1975  	}
  1976  	if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted {
  1977  		t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted)
  1978  	}
  1979  }
  1980  
  1981  func (s) TestMaxMsgSizeServerAPI(t *testing.T) {
  1982  	for _, e := range listTestEnv() {
  1983  		testMaxMsgSizeServerAPI(t, e)
  1984  	}
  1985  }
  1986  
  1987  func testMaxMsgSizeServerAPI(t *testing.T, e env) {
  1988  	te := newTest(t, e)
  1989  	te.userAgent = testAppUA
  1990  	te.maxServerReceiveMsgSize = newInt(1024)
  1991  	te.maxServerSendMsgSize = newInt(1024)
  1992  	te.declareLogNoise(
  1993  		"Failed to dial : context canceled; please retry.",
  1994  	)
  1995  	te.startServer(&testServer{security: e.security})
  1996  
  1997  	defer te.tearDown()
  1998  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  1999  
  2000  	const smallSize = 1
  2001  	const largeSize = 1024
  2002  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  2003  	if err != nil {
  2004  		t.Fatal(err)
  2005  	}
  2006  
  2007  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  2008  	if err != nil {
  2009  		t.Fatal(err)
  2010  	}
  2011  	req := &testpb.SimpleRequest{
  2012  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2013  		ResponseSize: int32(largeSize),
  2014  		Payload:      smallPayload,
  2015  	}
  2016  
  2017  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2018  	defer cancel()
  2019  	// Test for unary RPC send.
  2020  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2021  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2022  	}
  2023  
  2024  	// Test for unary RPC recv.
  2025  	req.Payload = largePayload
  2026  	req.ResponseSize = int32(smallSize)
  2027  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2028  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2029  	}
  2030  
  2031  	respParam := []*testpb.ResponseParameters{
  2032  		{
  2033  			Size: int32(largeSize),
  2034  		},
  2035  	}
  2036  	sreq := &testpb.StreamingOutputCallRequest{
  2037  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2038  		ResponseParameters: respParam,
  2039  		Payload:            smallPayload,
  2040  	}
  2041  
  2042  	// Test for streaming RPC send.
  2043  	stream, err := tc.FullDuplexCall(te.ctx)
  2044  	if err != nil {
  2045  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2046  	}
  2047  	if err := stream.Send(sreq); err != nil {
  2048  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2049  	}
  2050  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2051  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2052  	}
  2053  
  2054  	// Test for streaming RPC recv.
  2055  	respParam[0].Size = int32(smallSize)
  2056  	sreq.Payload = largePayload
  2057  	stream, err = tc.FullDuplexCall(te.ctx)
  2058  	if err != nil {
  2059  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2060  	}
  2061  	if err := stream.Send(sreq); err != nil {
  2062  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2063  	}
  2064  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2065  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2066  	}
  2067  }
  2068  
  2069  func (s) TestTap(t *testing.T) {
  2070  	for _, e := range listTestEnv() {
  2071  		if e.name == "handler-tls" {
  2072  			continue
  2073  		}
  2074  		testTap(t, e)
  2075  	}
  2076  }
  2077  
  2078  type myTap struct {
  2079  	cnt int
  2080  }
  2081  
  2082  func (t *myTap) handle(ctx context.Context, info *tap.Info) (context.Context, error) {
  2083  	if info != nil {
  2084  		switch info.FullMethodName {
  2085  		case "/grpc.testing.TestService/EmptyCall":
  2086  			t.cnt++
  2087  
  2088  			if vals := info.Header.Get("return-error"); len(vals) > 0 && vals[0] == "true" {
  2089  				return nil, status.Errorf(codes.Unknown, "tap error")
  2090  			}
  2091  		case "/grpc.testing.TestService/UnaryCall":
  2092  			return nil, fmt.Errorf("tap error")
  2093  		case "/grpc.testing.TestService/FullDuplexCall":
  2094  			return nil, status.Errorf(codes.FailedPrecondition, "test custom error")
  2095  		}
  2096  	}
  2097  	return ctx, nil
  2098  }
  2099  
  2100  func testTap(t *testing.T, e env) {
  2101  	te := newTest(t, e)
  2102  	te.userAgent = testAppUA
  2103  	ttap := &myTap{}
  2104  	te.tapHandle = ttap.handle
  2105  	te.startServer(&testServer{security: e.security})
  2106  	defer te.tearDown()
  2107  
  2108  	cc := te.clientConn()
  2109  	tc := testgrpc.NewTestServiceClient(cc)
  2110  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2111  	defer cancel()
  2112  
  2113  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  2114  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  2115  	}
  2116  	if ttap.cnt != 1 {
  2117  		t.Fatalf("Get the count in ttap %d, want 1", ttap.cnt)
  2118  	}
  2119  
  2120  	if _, err := tc.EmptyCall(metadata.AppendToOutgoingContext(ctx, "return-error", "false"), &testpb.Empty{}); err != nil {
  2121  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  2122  	}
  2123  	if ttap.cnt != 2 {
  2124  		t.Fatalf("Get the count in ttap %d, want 2", ttap.cnt)
  2125  	}
  2126  
  2127  	if _, err := tc.EmptyCall(metadata.AppendToOutgoingContext(ctx, "return-error", "true"), &testpb.Empty{}); status.Code(err) != codes.Unknown {
  2128  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unknown)
  2129  	}
  2130  	if ttap.cnt != 3 {
  2131  		t.Fatalf("Get the count in ttap %d, want 3", ttap.cnt)
  2132  	}
  2133  
  2134  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 31)
  2135  	if err != nil {
  2136  		t.Fatal(err)
  2137  	}
  2138  
  2139  	req := &testpb.SimpleRequest{
  2140  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2141  		ResponseSize: 45,
  2142  		Payload:      payload,
  2143  	}
  2144  	if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.PermissionDenied {
  2145  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.PermissionDenied)
  2146  	}
  2147  	str, err := tc.FullDuplexCall(ctx)
  2148  	if err != nil {
  2149  		t.Fatalf("Unexpected error creating stream: %v", err)
  2150  	}
  2151  	if _, err := str.Recv(); status.Code(err) != codes.FailedPrecondition {
  2152  		t.Fatalf("FullDuplexCall Recv() = _, %v, want _, %s", err, codes.FailedPrecondition)
  2153  	}
  2154  }
  2155  
  2156  func (s) TestEmptyUnaryWithUserAgent(t *testing.T) {
  2157  	for _, e := range listTestEnv() {
  2158  		testEmptyUnaryWithUserAgent(t, e)
  2159  	}
  2160  }
  2161  
  2162  func testEmptyUnaryWithUserAgent(t *testing.T, e env) {
  2163  	te := newTest(t, e)
  2164  	te.userAgent = testAppUA
  2165  	te.startServer(&testServer{security: e.security})
  2166  	defer te.tearDown()
  2167  
  2168  	cc := te.clientConn()
  2169  	tc := testgrpc.NewTestServiceClient(cc)
  2170  	var header metadata.MD
  2171  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2172  	defer cancel()
  2173  	reply, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Header(&header))
  2174  	if err != nil || !proto.Equal(&testpb.Empty{}, reply) {
  2175  		t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{})
  2176  	}
  2177  	if v, ok := header["ua"]; !ok || !strings.HasPrefix(v[0], testAppUA) {
  2178  		t.Fatalf("header[\"ua\"] = %q, %t, want string with prefix %q, true", v, ok, testAppUA)
  2179  	}
  2180  
  2181  	te.srv.Stop()
  2182  }
  2183  
  2184  func (s) TestFailedEmptyUnary(t *testing.T) {
  2185  	for _, e := range listTestEnv() {
  2186  		if e.name == "handler-tls" {
  2187  			// This test covers status details, but
  2188  			// Grpc-Status-Details-Bin is not support in handler_server.
  2189  			continue
  2190  		}
  2191  		testFailedEmptyUnary(t, e)
  2192  	}
  2193  }
  2194  
  2195  func testFailedEmptyUnary(t *testing.T, e env) {
  2196  	te := newTest(t, e)
  2197  	te.userAgent = failAppUA
  2198  	te.startServer(&testServer{security: e.security})
  2199  	defer te.tearDown()
  2200  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2201  
  2202  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2203  	defer cancel()
  2204  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2205  	wantErr := detailedError
  2206  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !testutils.StatusErrEqual(err, wantErr) {
  2207  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr)
  2208  	}
  2209  }
  2210  
  2211  func (s) TestLargeUnary(t *testing.T) {
  2212  	for _, e := range listTestEnv() {
  2213  		testLargeUnary(t, e)
  2214  	}
  2215  }
  2216  
  2217  func testLargeUnary(t *testing.T, e env) {
  2218  	te := newTest(t, e)
  2219  	te.startServer(&testServer{security: e.security})
  2220  	defer te.tearDown()
  2221  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2222  
  2223  	const argSize = 271828
  2224  	const respSize = 314159
  2225  
  2226  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2227  	if err != nil {
  2228  		t.Fatal(err)
  2229  	}
  2230  
  2231  	req := &testpb.SimpleRequest{
  2232  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2233  		ResponseSize: respSize,
  2234  		Payload:      payload,
  2235  	}
  2236  
  2237  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2238  	defer cancel()
  2239  	reply, err := tc.UnaryCall(ctx, req)
  2240  	if err != nil {
  2241  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err)
  2242  	}
  2243  	pt := reply.GetPayload().GetType()
  2244  	ps := len(reply.GetPayload().GetBody())
  2245  	if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize {
  2246  		t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize)
  2247  	}
  2248  }
  2249  
  2250  // Test backward-compatibility API for setting msg size limit.
  2251  func (s) TestExceedMsgLimit(t *testing.T) {
  2252  	for _, e := range listTestEnv() {
  2253  		testExceedMsgLimit(t, e)
  2254  	}
  2255  }
  2256  
  2257  func testExceedMsgLimit(t *testing.T, e env) {
  2258  	te := newTest(t, e)
  2259  	maxMsgSize := 1024
  2260  	te.maxServerMsgSize, te.maxClientMsgSize = newInt(maxMsgSize), newInt(maxMsgSize)
  2261  	te.startServer(&testServer{security: e.security})
  2262  	defer te.tearDown()
  2263  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2264  
  2265  	largeSize := int32(maxMsgSize + 1)
  2266  	const smallSize = 1
  2267  
  2268  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  2269  	if err != nil {
  2270  		t.Fatal(err)
  2271  	}
  2272  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  2273  	if err != nil {
  2274  		t.Fatal(err)
  2275  	}
  2276  
  2277  	// Make sure the server cannot receive a unary RPC of largeSize.
  2278  	req := &testpb.SimpleRequest{
  2279  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2280  		ResponseSize: smallSize,
  2281  		Payload:      largePayload,
  2282  	}
  2283  
  2284  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2285  	defer cancel()
  2286  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2287  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2288  	}
  2289  	// Make sure the client cannot receive a unary RPC of largeSize.
  2290  	req.ResponseSize = largeSize
  2291  	req.Payload = smallPayload
  2292  	if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted {
  2293  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2294  	}
  2295  
  2296  	// Make sure the server cannot receive a streaming RPC of largeSize.
  2297  	stream, err := tc.FullDuplexCall(te.ctx)
  2298  	if err != nil {
  2299  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2300  	}
  2301  	respParam := []*testpb.ResponseParameters{
  2302  		{
  2303  			Size: 1,
  2304  		},
  2305  	}
  2306  
  2307  	sreq := &testpb.StreamingOutputCallRequest{
  2308  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  2309  		ResponseParameters: respParam,
  2310  		Payload:            largePayload,
  2311  	}
  2312  	if err := stream.Send(sreq); err != nil {
  2313  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2314  	}
  2315  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2316  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2317  	}
  2318  
  2319  	// Test on client side for streaming RPC.
  2320  	stream, err = tc.FullDuplexCall(te.ctx)
  2321  	if err != nil {
  2322  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2323  	}
  2324  	respParam[0].Size = largeSize
  2325  	sreq.Payload = smallPayload
  2326  	if err := stream.Send(sreq); err != nil {
  2327  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  2328  	}
  2329  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  2330  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  2331  	}
  2332  }
  2333  
  2334  func (s) TestPeerClientSide(t *testing.T) {
  2335  	for _, e := range listTestEnv() {
  2336  		testPeerClientSide(t, e)
  2337  	}
  2338  }
  2339  
  2340  func testPeerClientSide(t *testing.T, e env) {
  2341  	te := newTest(t, e)
  2342  	te.userAgent = testAppUA
  2343  	te.startServer(&testServer{security: e.security})
  2344  	defer te.tearDown()
  2345  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2346  	peer := new(peer.Peer)
  2347  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2348  	defer cancel()
  2349  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil {
  2350  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  2351  	}
  2352  	pa := peer.Addr.String()
  2353  	if e.network == "unix" {
  2354  		if pa != te.srvAddr {
  2355  			t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr)
  2356  		}
  2357  		return
  2358  	}
  2359  	_, pp, err := net.SplitHostPort(pa)
  2360  	if err != nil {
  2361  		t.Fatalf("Failed to parse address from peer.")
  2362  	}
  2363  	_, sp, err := net.SplitHostPort(te.srvAddr)
  2364  	if err != nil {
  2365  		t.Fatalf("Failed to parse address of test server.")
  2366  	}
  2367  	if pp != sp {
  2368  		t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp)
  2369  	}
  2370  }
  2371  
  2372  // TestPeerNegative tests that if call fails setting peer
  2373  // doesn't cause a segmentation fault.
  2374  // issue#1141 https://github.com/grpc/grpc-go/issues/1141
  2375  func (s) TestPeerNegative(t *testing.T) {
  2376  	for _, e := range listTestEnv() {
  2377  		testPeerNegative(t, e)
  2378  	}
  2379  }
  2380  
  2381  func testPeerNegative(t *testing.T, e env) {
  2382  	te := newTest(t, e)
  2383  	te.startServer(&testServer{security: e.security})
  2384  	defer te.tearDown()
  2385  
  2386  	cc := te.clientConn()
  2387  	tc := testgrpc.NewTestServiceClient(cc)
  2388  	peer := new(peer.Peer)
  2389  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2390  	cancel()
  2391  	tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer))
  2392  }
  2393  
  2394  func (s) TestPeerFailedRPC(t *testing.T) {
  2395  	for _, e := range listTestEnv() {
  2396  		testPeerFailedRPC(t, e)
  2397  	}
  2398  }
  2399  
  2400  func testPeerFailedRPC(t *testing.T, e env) {
  2401  	te := newTest(t, e)
  2402  	te.maxServerReceiveMsgSize = newInt(1 * 1024)
  2403  	te.startServer(&testServer{security: e.security})
  2404  
  2405  	defer te.tearDown()
  2406  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2407  
  2408  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2409  	defer cancel()
  2410  	// first make a successful request to the server
  2411  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  2412  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, <nil>", err)
  2413  	}
  2414  
  2415  	// make a second request that will be rejected by the server
  2416  	const largeSize = 5 * 1024
  2417  	largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize)
  2418  	if err != nil {
  2419  		t.Fatal(err)
  2420  	}
  2421  	req := &testpb.SimpleRequest{
  2422  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2423  		Payload:      largePayload,
  2424  	}
  2425  
  2426  	peer := new(peer.Peer)
  2427  	if _, err := tc.UnaryCall(ctx, req, grpc.Peer(peer)); err == nil || status.Code(err) != codes.ResourceExhausted {
  2428  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted)
  2429  	} else {
  2430  		pa := peer.Addr.String()
  2431  		if e.network == "unix" {
  2432  			if pa != te.srvAddr {
  2433  				t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr)
  2434  			}
  2435  			return
  2436  		}
  2437  		_, pp, err := net.SplitHostPort(pa)
  2438  		if err != nil {
  2439  			t.Fatalf("Failed to parse address from peer.")
  2440  		}
  2441  		_, sp, err := net.SplitHostPort(te.srvAddr)
  2442  		if err != nil {
  2443  			t.Fatalf("Failed to parse address of test server.")
  2444  		}
  2445  		if pp != sp {
  2446  			t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp)
  2447  		}
  2448  	}
  2449  }
  2450  
  2451  func (s) TestMetadataUnaryRPC(t *testing.T) {
  2452  	for _, e := range listTestEnv() {
  2453  		testMetadataUnaryRPC(t, e)
  2454  	}
  2455  }
  2456  
  2457  func testMetadataUnaryRPC(t *testing.T, e env) {
  2458  	te := newTest(t, e)
  2459  	te.startServer(&testServer{security: e.security})
  2460  	defer te.tearDown()
  2461  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2462  
  2463  	const argSize = 2718
  2464  	const respSize = 314
  2465  
  2466  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2467  	if err != nil {
  2468  		t.Fatal(err)
  2469  	}
  2470  
  2471  	req := &testpb.SimpleRequest{
  2472  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2473  		ResponseSize: respSize,
  2474  		Payload:      payload,
  2475  	}
  2476  	var header, trailer metadata.MD
  2477  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2478  	defer cancel()
  2479  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2480  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil {
  2481  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  2482  	}
  2483  	// Ignore optional response headers that Servers may set:
  2484  	if header != nil {
  2485  		delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers
  2486  		delete(header, "date")    // the Date header is also optional
  2487  		delete(header, "user-agent")
  2488  		delete(header, "content-type")
  2489  		delete(header, "grpc-accept-encoding")
  2490  	}
  2491  	if !reflect.DeepEqual(header, testMetadata) {
  2492  		t.Fatalf("Received header metadata %v, want %v", header, testMetadata)
  2493  	}
  2494  	if !reflect.DeepEqual(trailer, testTrailerMetadata) {
  2495  		t.Fatalf("Received trailer metadata %v, want %v", trailer, testTrailerMetadata)
  2496  	}
  2497  }
  2498  
  2499  func (s) TestMetadataOrderUnaryRPC(t *testing.T) {
  2500  	for _, e := range listTestEnv() {
  2501  		testMetadataOrderUnaryRPC(t, e)
  2502  	}
  2503  }
  2504  
  2505  func testMetadataOrderUnaryRPC(t *testing.T, e env) {
  2506  	te := newTest(t, e)
  2507  	te.startServer(&testServer{security: e.security})
  2508  	defer te.tearDown()
  2509  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2510  
  2511  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2512  	defer cancel()
  2513  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2514  	ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value2")
  2515  	ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value3")
  2516  
  2517  	// using Join to built expected metadata instead of FromOutgoingContext
  2518  	newMetadata := metadata.Join(testMetadata, metadata.Pairs("key1", "value2", "key1", "value3"))
  2519  
  2520  	var header metadata.MD
  2521  	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}, grpc.Header(&header)); err != nil {
  2522  		t.Fatal(err)
  2523  	}
  2524  
  2525  	// Ignore optional response headers that Servers may set:
  2526  	if header != nil {
  2527  		delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers
  2528  		delete(header, "date")    // the Date header is also optional
  2529  		delete(header, "user-agent")
  2530  		delete(header, "content-type")
  2531  		delete(header, "grpc-accept-encoding")
  2532  	}
  2533  
  2534  	if !reflect.DeepEqual(header, newMetadata) {
  2535  		t.Fatalf("Received header metadata %v, want %v", header, newMetadata)
  2536  	}
  2537  }
  2538  
  2539  func (s) TestMultipleSetTrailerUnaryRPC(t *testing.T) {
  2540  	for _, e := range listTestEnv() {
  2541  		testMultipleSetTrailerUnaryRPC(t, e)
  2542  	}
  2543  }
  2544  
  2545  func testMultipleSetTrailerUnaryRPC(t *testing.T, e env) {
  2546  	te := newTest(t, e)
  2547  	te.startServer(&testServer{security: e.security, multipleSetTrailer: true})
  2548  	defer te.tearDown()
  2549  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2550  
  2551  	const (
  2552  		argSize  = 1
  2553  		respSize = 1
  2554  	)
  2555  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2556  	if err != nil {
  2557  		t.Fatal(err)
  2558  	}
  2559  
  2560  	req := &testpb.SimpleRequest{
  2561  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2562  		ResponseSize: respSize,
  2563  		Payload:      payload,
  2564  	}
  2565  	var trailer metadata.MD
  2566  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2567  	defer cancel()
  2568  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2569  	if _, err := tc.UnaryCall(ctx, req, grpc.Trailer(&trailer), grpc.WaitForReady(true)); err != nil {
  2570  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  2571  	}
  2572  	expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2)
  2573  	if !reflect.DeepEqual(trailer, expectedTrailer) {
  2574  		t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer)
  2575  	}
  2576  }
  2577  
  2578  func (s) TestMultipleSetTrailerStreamingRPC(t *testing.T) {
  2579  	for _, e := range listTestEnv() {
  2580  		testMultipleSetTrailerStreamingRPC(t, e)
  2581  	}
  2582  }
  2583  
  2584  func testMultipleSetTrailerStreamingRPC(t *testing.T, e env) {
  2585  	te := newTest(t, e)
  2586  	te.startServer(&testServer{security: e.security, multipleSetTrailer: true})
  2587  	defer te.tearDown()
  2588  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2589  
  2590  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2591  	defer cancel()
  2592  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2593  	stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true))
  2594  	if err != nil {
  2595  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2596  	}
  2597  	if err := stream.CloseSend(); err != nil {
  2598  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  2599  	}
  2600  	if _, err := stream.Recv(); err != io.EOF {
  2601  		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
  2602  	}
  2603  
  2604  	trailer := stream.Trailer()
  2605  	expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2)
  2606  	if !reflect.DeepEqual(trailer, expectedTrailer) {
  2607  		t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer)
  2608  	}
  2609  }
  2610  
  2611  func (s) TestSetAndSendHeaderUnaryRPC(t *testing.T) {
  2612  	for _, e := range listTestEnv() {
  2613  		if e.name == "handler-tls" {
  2614  			continue
  2615  		}
  2616  		testSetAndSendHeaderUnaryRPC(t, e)
  2617  	}
  2618  }
  2619  
  2620  // To test header metadata is sent on SendHeader().
  2621  func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) {
  2622  	te := newTest(t, e)
  2623  	te.startServer(&testServer{security: e.security, setAndSendHeader: true})
  2624  	defer te.tearDown()
  2625  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2626  
  2627  	const (
  2628  		argSize  = 1
  2629  		respSize = 1
  2630  	)
  2631  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2632  	if err != nil {
  2633  		t.Fatal(err)
  2634  	}
  2635  
  2636  	req := &testpb.SimpleRequest{
  2637  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2638  		ResponseSize: respSize,
  2639  		Payload:      payload,
  2640  	}
  2641  	var header metadata.MD
  2642  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2643  	defer cancel()
  2644  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2645  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
  2646  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  2647  	}
  2648  	delete(header, "user-agent")
  2649  	delete(header, "content-type")
  2650  	delete(header, "grpc-accept-encoding")
  2651  
  2652  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  2653  	if !reflect.DeepEqual(header, expectedHeader) {
  2654  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  2655  	}
  2656  }
  2657  
  2658  func (s) TestMultipleSetHeaderUnaryRPC(t *testing.T) {
  2659  	for _, e := range listTestEnv() {
  2660  		if e.name == "handler-tls" {
  2661  			continue
  2662  		}
  2663  		testMultipleSetHeaderUnaryRPC(t, e)
  2664  	}
  2665  }
  2666  
  2667  // To test header metadata is sent when sending response.
  2668  func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) {
  2669  	te := newTest(t, e)
  2670  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  2671  	defer te.tearDown()
  2672  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2673  
  2674  	const (
  2675  		argSize  = 1
  2676  		respSize = 1
  2677  	)
  2678  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2679  	if err != nil {
  2680  		t.Fatal(err)
  2681  	}
  2682  
  2683  	req := &testpb.SimpleRequest{
  2684  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2685  		ResponseSize: respSize,
  2686  		Payload:      payload,
  2687  	}
  2688  
  2689  	var header metadata.MD
  2690  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2691  	defer cancel()
  2692  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2693  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil {
  2694  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err)
  2695  	}
  2696  	delete(header, "user-agent")
  2697  	delete(header, "content-type")
  2698  	delete(header, "grpc-accept-encoding")
  2699  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  2700  	if !reflect.DeepEqual(header, expectedHeader) {
  2701  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  2702  	}
  2703  }
  2704  
  2705  func (s) TestMultipleSetHeaderUnaryRPCError(t *testing.T) {
  2706  	for _, e := range listTestEnv() {
  2707  		if e.name == "handler-tls" {
  2708  			continue
  2709  		}
  2710  		testMultipleSetHeaderUnaryRPCError(t, e)
  2711  	}
  2712  }
  2713  
  2714  // To test header metadata is sent when sending status.
  2715  func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) {
  2716  	te := newTest(t, e)
  2717  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  2718  	defer te.tearDown()
  2719  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2720  
  2721  	const (
  2722  		argSize  = 1
  2723  		respSize = -1 // Invalid respSize to make RPC fail.
  2724  	)
  2725  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2726  	if err != nil {
  2727  		t.Fatal(err)
  2728  	}
  2729  
  2730  	req := &testpb.SimpleRequest{
  2731  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2732  		ResponseSize: respSize,
  2733  		Payload:      payload,
  2734  	}
  2735  	var header metadata.MD
  2736  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2737  	defer cancel()
  2738  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2739  	if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err == nil {
  2740  		t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <non-nil>", ctx, err)
  2741  	}
  2742  	delete(header, "user-agent")
  2743  	delete(header, "content-type")
  2744  	delete(header, "grpc-accept-encoding")
  2745  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  2746  	if !reflect.DeepEqual(header, expectedHeader) {
  2747  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  2748  	}
  2749  }
  2750  
  2751  func (s) TestSetAndSendHeaderStreamingRPC(t *testing.T) {
  2752  	for _, e := range listTestEnv() {
  2753  		if e.name == "handler-tls" {
  2754  			continue
  2755  		}
  2756  		testSetAndSendHeaderStreamingRPC(t, e)
  2757  	}
  2758  }
  2759  
  2760  // To test header metadata is sent on SendHeader().
  2761  func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) {
  2762  	te := newTest(t, e)
  2763  	te.startServer(&testServer{security: e.security, setAndSendHeader: true})
  2764  	defer te.tearDown()
  2765  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2766  
  2767  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2768  	defer cancel()
  2769  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2770  	stream, err := tc.FullDuplexCall(ctx)
  2771  	if err != nil {
  2772  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2773  	}
  2774  	if err := stream.CloseSend(); err != nil {
  2775  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  2776  	}
  2777  	if _, err := stream.Recv(); err != io.EOF {
  2778  		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
  2779  	}
  2780  
  2781  	header, err := stream.Header()
  2782  	if err != nil {
  2783  		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
  2784  	}
  2785  	delete(header, "user-agent")
  2786  	delete(header, "content-type")
  2787  	delete(header, "grpc-accept-encoding")
  2788  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  2789  	if !reflect.DeepEqual(header, expectedHeader) {
  2790  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  2791  	}
  2792  }
  2793  
  2794  func (s) TestMultipleSetHeaderStreamingRPC(t *testing.T) {
  2795  	for _, e := range listTestEnv() {
  2796  		if e.name == "handler-tls" {
  2797  			continue
  2798  		}
  2799  		testMultipleSetHeaderStreamingRPC(t, e)
  2800  	}
  2801  }
  2802  
  2803  // To test header metadata is sent when sending response.
  2804  func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) {
  2805  	te := newTest(t, e)
  2806  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  2807  	defer te.tearDown()
  2808  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2809  
  2810  	const (
  2811  		argSize  = 1
  2812  		respSize = 1
  2813  	)
  2814  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2815  	defer cancel()
  2816  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2817  	stream, err := tc.FullDuplexCall(ctx)
  2818  	if err != nil {
  2819  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2820  	}
  2821  
  2822  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2823  	if err != nil {
  2824  		t.Fatal(err)
  2825  	}
  2826  
  2827  	req := &testpb.StreamingOutputCallRequest{
  2828  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2829  		ResponseParameters: []*testpb.ResponseParameters{
  2830  			{Size: respSize},
  2831  		},
  2832  		Payload: payload,
  2833  	}
  2834  	if err := stream.Send(req); err != nil {
  2835  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  2836  	}
  2837  	if _, err := stream.Recv(); err != nil {
  2838  		t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  2839  	}
  2840  	if err := stream.CloseSend(); err != nil {
  2841  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  2842  	}
  2843  	if _, err := stream.Recv(); err != io.EOF {
  2844  		t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err)
  2845  	}
  2846  
  2847  	header, err := stream.Header()
  2848  	if err != nil {
  2849  		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
  2850  	}
  2851  	delete(header, "user-agent")
  2852  	delete(header, "content-type")
  2853  	delete(header, "grpc-accept-encoding")
  2854  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  2855  	if !reflect.DeepEqual(header, expectedHeader) {
  2856  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  2857  	}
  2858  
  2859  }
  2860  
  2861  func (s) TestMultipleSetHeaderStreamingRPCError(t *testing.T) {
  2862  	for _, e := range listTestEnv() {
  2863  		if e.name == "handler-tls" {
  2864  			continue
  2865  		}
  2866  		testMultipleSetHeaderStreamingRPCError(t, e)
  2867  	}
  2868  }
  2869  
  2870  // To test header metadata is sent when sending status.
  2871  func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) {
  2872  	te := newTest(t, e)
  2873  	te.startServer(&testServer{security: e.security, setHeaderOnly: true})
  2874  	defer te.tearDown()
  2875  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2876  
  2877  	const (
  2878  		argSize  = 1
  2879  		respSize = -1
  2880  	)
  2881  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2882  	defer cancel()
  2883  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  2884  	stream, err := tc.FullDuplexCall(ctx)
  2885  	if err != nil {
  2886  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  2887  	}
  2888  
  2889  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  2890  	if err != nil {
  2891  		t.Fatal(err)
  2892  	}
  2893  
  2894  	req := &testpb.StreamingOutputCallRequest{
  2895  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2896  		ResponseParameters: []*testpb.ResponseParameters{
  2897  			{Size: respSize},
  2898  		},
  2899  		Payload: payload,
  2900  	}
  2901  	if err := stream.Send(req); err != nil {
  2902  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  2903  	}
  2904  	if _, err := stream.Recv(); err == nil {
  2905  		t.Fatalf("%v.Recv() = %v, want <non-nil>", stream, err)
  2906  	}
  2907  
  2908  	header, err := stream.Header()
  2909  	if err != nil {
  2910  		t.Fatalf("%v.Header() = _, %v, want _, <nil>", stream, err)
  2911  	}
  2912  	delete(header, "user-agent")
  2913  	delete(header, "content-type")
  2914  	delete(header, "grpc-accept-encoding")
  2915  	expectedHeader := metadata.Join(testMetadata, testMetadata2)
  2916  	if !reflect.DeepEqual(header, expectedHeader) {
  2917  		t.Fatalf("Received header metadata %v, want %v", header, expectedHeader)
  2918  	}
  2919  	if err := stream.CloseSend(); err != nil {
  2920  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  2921  	}
  2922  }
  2923  
  2924  // TestMalformedHTTP2Metadata verifies the returned error when the client
  2925  // sends an illegal metadata.
  2926  func (s) TestMalformedHTTP2Metadata(t *testing.T) {
  2927  	for _, e := range listTestEnv() {
  2928  		if e.name == "handler-tls" {
  2929  			// Failed with "server stops accepting new RPCs".
  2930  			// Server stops accepting new RPCs when the client sends an illegal http2 header.
  2931  			continue
  2932  		}
  2933  		testMalformedHTTP2Metadata(t, e)
  2934  	}
  2935  }
  2936  
  2937  func testMalformedHTTP2Metadata(t *testing.T, e env) {
  2938  	te := newTest(t, e)
  2939  	te.startServer(&testServer{security: e.security})
  2940  	defer te.tearDown()
  2941  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  2942  
  2943  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 2718)
  2944  	if err != nil {
  2945  		t.Fatal(err)
  2946  	}
  2947  
  2948  	req := &testpb.SimpleRequest{
  2949  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  2950  		ResponseSize: 314,
  2951  		Payload:      payload,
  2952  	}
  2953  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2954  	defer cancel()
  2955  	ctx = metadata.NewOutgoingContext(ctx, malformedHTTP2Metadata)
  2956  	if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Internal {
  2957  		t.Fatalf("TestService.UnaryCall(%v, _) = _, %v; want _, %s", ctx, err, codes.Internal)
  2958  	}
  2959  }
  2960  
  2961  // Tests that the client transparently retries correctly when receiving a
  2962  // RST_STREAM with code REFUSED_STREAM.
  2963  func (s) TestTransparentRetry(t *testing.T) {
  2964  	testCases := []struct {
  2965  		failFast bool
  2966  		errCode  codes.Code
  2967  	}{{
  2968  		// success attempt: 1, (stream ID 1)
  2969  	}, {
  2970  		// success attempt: 2, (stream IDs 3, 5)
  2971  	}, {
  2972  		// no success attempt (stream IDs 7, 9)
  2973  		errCode: codes.Unavailable,
  2974  	}, {
  2975  		// success attempt: 1 (stream ID 11),
  2976  		failFast: true,
  2977  	}, {
  2978  		// success attempt: 2 (stream IDs 13, 15),
  2979  		failFast: true,
  2980  	}, {
  2981  		// no success attempt (stream IDs 17, 19)
  2982  		failFast: true,
  2983  		errCode:  codes.Unavailable,
  2984  	}}
  2985  
  2986  	lis, err := net.Listen("tcp", "localhost:0")
  2987  	if err != nil {
  2988  		t.Fatalf("Failed to listen. Err: %v", err)
  2989  	}
  2990  	defer lis.Close()
  2991  	server := &httpServer{
  2992  		responses: []httpServerResponse{{
  2993  			trailers: [][]string{{
  2994  				":status", "200",
  2995  				"content-type", "application/grpc",
  2996  				"grpc-status", "0",
  2997  			}},
  2998  		}},
  2999  		refuseStream: func(i uint32) bool {
  3000  			switch i {
  3001  			case 1, 5, 11, 15: // these stream IDs succeed
  3002  				return false
  3003  			}
  3004  			return true // these are refused
  3005  		},
  3006  	}
  3007  	server.start(t, lis)
  3008  	cc, err := grpc.NewClient(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
  3009  	if err != nil {
  3010  		t.Fatalf("failed to create a client for the server: %v", err)
  3011  	}
  3012  	defer cc.Close()
  3013  
  3014  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3015  	defer cancel()
  3016  
  3017  	client := testgrpc.NewTestServiceClient(cc)
  3018  
  3019  	for i, tc := range testCases {
  3020  		stream, err := client.FullDuplexCall(ctx)
  3021  		if err != nil {
  3022  			t.Fatalf("error creating stream due to err: %v", err)
  3023  		}
  3024  		code := func(err error) codes.Code {
  3025  			if err == io.EOF {
  3026  				return codes.OK
  3027  			}
  3028  			return status.Code(err)
  3029  		}
  3030  		if _, err := stream.Recv(); code(err) != tc.errCode {
  3031  			t.Fatalf("%v: stream.Recv() = _, %v, want error code: %v", i, err, tc.errCode)
  3032  		}
  3033  
  3034  	}
  3035  }
  3036  
  3037  func (s) TestCancel(t *testing.T) {
  3038  	for _, e := range listTestEnv() {
  3039  		t.Run(e.name, func(t *testing.T) {
  3040  			testCancel(t, e)
  3041  		})
  3042  	}
  3043  }
  3044  
  3045  func testCancel(t *testing.T, e env) {
  3046  	te := newTest(t, e)
  3047  	te.declareLogNoise("grpc: the client connection is closing; please retry")
  3048  	te.startServer(&testServer{security: e.security, unaryCallSleepTime: time.Second})
  3049  	defer te.tearDown()
  3050  
  3051  	cc := te.clientConn()
  3052  	tc := testgrpc.NewTestServiceClient(cc)
  3053  
  3054  	const argSize = 2718
  3055  	const respSize = 314
  3056  
  3057  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  3058  	if err != nil {
  3059  		t.Fatal(err)
  3060  	}
  3061  
  3062  	req := &testpb.SimpleRequest{
  3063  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  3064  		ResponseSize: respSize,
  3065  		Payload:      payload,
  3066  	}
  3067  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3068  	time.AfterFunc(1*time.Millisecond, cancel)
  3069  	if r, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Canceled {
  3070  		t.Fatalf("TestService/UnaryCall(_, _) = %v, %v; want _, error code: %s", r, err, codes.Canceled)
  3071  	}
  3072  	awaitNewConnLogOutput()
  3073  }
  3074  
  3075  func (s) TestCancelNoIO(t *testing.T) {
  3076  	for _, e := range listTestEnv() {
  3077  		testCancelNoIO(t, e)
  3078  	}
  3079  }
  3080  
  3081  func testCancelNoIO(t *testing.T, e env) {
  3082  	te := newTest(t, e)
  3083  	te.declareLogNoise("http2Client.notifyError got notified that the client transport was broken")
  3084  	te.maxStream = 1 // Only allows 1 live stream per server transport.
  3085  	te.startServer(&testServer{security: e.security})
  3086  	defer te.tearDown()
  3087  
  3088  	cc := te.clientConn()
  3089  	tc := testgrpc.NewTestServiceClient(cc)
  3090  
  3091  	// Start one blocked RPC for which we'll never send streaming
  3092  	// input. This will consume the 1 maximum concurrent streams,
  3093  	// causing future RPCs to hang.
  3094  	ctx, cancelFirst := context.WithTimeout(context.Background(), defaultTestTimeout)
  3095  	_, err := tc.StreamingInputCall(ctx)
  3096  	if err != nil {
  3097  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
  3098  	}
  3099  
  3100  	// Loop until the ClientConn receives the initial settings
  3101  	// frame from the server, notifying it about the maximum
  3102  	// concurrent streams. We know when it's received it because
  3103  	// an RPC will fail with codes.DeadlineExceeded instead of
  3104  	// succeeding.
  3105  	// TODO(bradfitz): add internal test hook for this (Issue 534)
  3106  	for {
  3107  		ctx, cancelSecond := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  3108  		_, err := tc.StreamingInputCall(ctx)
  3109  		cancelSecond()
  3110  		if err == nil {
  3111  			continue
  3112  		}
  3113  		if status.Code(err) == codes.DeadlineExceeded {
  3114  			break
  3115  		}
  3116  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded)
  3117  	}
  3118  	// If there are any RPCs in flight before the client receives
  3119  	// the max streams setting, let them be expired.
  3120  	// TODO(bradfitz): add internal test hook for this (Issue 534)
  3121  	time.Sleep(50 * time.Millisecond)
  3122  
  3123  	go func() {
  3124  		time.Sleep(50 * time.Millisecond)
  3125  		cancelFirst()
  3126  	}()
  3127  
  3128  	// This should be blocked until the 1st is canceled, then succeed.
  3129  	ctx, cancelThird := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  3130  	if _, err := tc.StreamingInputCall(ctx); err != nil {
  3131  		t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
  3132  	}
  3133  	cancelThird()
  3134  }
  3135  
  3136  // The following tests the gRPC streaming RPC implementations.
  3137  // TODO(zhaoq): Have better coverage on error cases.
  3138  var (
  3139  	reqSizes  = []int{27182, 8, 1828, 45904}
  3140  	respSizes = []int{31415, 9, 2653, 58979}
  3141  )
  3142  
  3143  func (s) TestNoService(t *testing.T) {
  3144  	for _, e := range listTestEnv() {
  3145  		testNoService(t, e)
  3146  	}
  3147  }
  3148  
  3149  func testNoService(t *testing.T, e env) {
  3150  	te := newTest(t, e)
  3151  	te.startServer(nil)
  3152  	defer te.tearDown()
  3153  
  3154  	cc := te.clientConn()
  3155  	tc := testgrpc.NewTestServiceClient(cc)
  3156  
  3157  	stream, err := tc.FullDuplexCall(te.ctx, grpc.WaitForReady(true))
  3158  	if err != nil {
  3159  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3160  	}
  3161  	if _, err := stream.Recv(); status.Code(err) != codes.Unimplemented {
  3162  		t.Fatalf("stream.Recv() = _, %v, want _, error code %s", err, codes.Unimplemented)
  3163  	}
  3164  }
  3165  
  3166  func (s) TestPingPong(t *testing.T) {
  3167  	for _, e := range listTestEnv() {
  3168  		testPingPong(t, e)
  3169  	}
  3170  }
  3171  
  3172  func testPingPong(t *testing.T, e env) {
  3173  	te := newTest(t, e)
  3174  	te.startServer(&testServer{security: e.security})
  3175  	defer te.tearDown()
  3176  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3177  
  3178  	stream, err := tc.FullDuplexCall(te.ctx)
  3179  	if err != nil {
  3180  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3181  	}
  3182  	var index int
  3183  	for index < len(reqSizes) {
  3184  		respParam := []*testpb.ResponseParameters{
  3185  			{
  3186  				Size: int32(respSizes[index]),
  3187  			},
  3188  		}
  3189  
  3190  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
  3191  		if err != nil {
  3192  			t.Fatal(err)
  3193  		}
  3194  
  3195  		req := &testpb.StreamingOutputCallRequest{
  3196  			ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3197  			ResponseParameters: respParam,
  3198  			Payload:            payload,
  3199  		}
  3200  		if err := stream.Send(req); err != nil {
  3201  			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  3202  		}
  3203  		reply, err := stream.Recv()
  3204  		if err != nil {
  3205  			t.Fatalf("%v.Recv() = %v, want <nil>", stream, err)
  3206  		}
  3207  		pt := reply.GetPayload().GetType()
  3208  		if pt != testpb.PayloadType_COMPRESSABLE {
  3209  			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
  3210  		}
  3211  		size := len(reply.GetPayload().GetBody())
  3212  		if size != int(respSizes[index]) {
  3213  			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
  3214  		}
  3215  		index++
  3216  	}
  3217  	if err := stream.CloseSend(); err != nil {
  3218  		t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
  3219  	}
  3220  	if _, err := stream.Recv(); err != io.EOF {
  3221  		t.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
  3222  	}
  3223  }
  3224  
  3225  func (s) TestMetadataStreamingRPC(t *testing.T) {
  3226  	for _, e := range listTestEnv() {
  3227  		testMetadataStreamingRPC(t, e)
  3228  	}
  3229  }
  3230  
  3231  func testMetadataStreamingRPC(t *testing.T, e env) {
  3232  	te := newTest(t, e)
  3233  	te.startServer(&testServer{security: e.security})
  3234  	defer te.tearDown()
  3235  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3236  
  3237  	ctx := metadata.NewOutgoingContext(te.ctx, testMetadata)
  3238  	stream, err := tc.FullDuplexCall(ctx)
  3239  	if err != nil {
  3240  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  3241  	}
  3242  	go func() {
  3243  		headerMD, err := stream.Header()
  3244  		if e.security == "tls" {
  3245  			delete(headerMD, "transport_security_type")
  3246  		}
  3247  		delete(headerMD, "trailer") // ignore if present
  3248  		delete(headerMD, "user-agent")
  3249  		delete(headerMD, "content-type")
  3250  		delete(headerMD, "grpc-accept-encoding")
  3251  		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
  3252  			t.Errorf("#1 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
  3253  		}
  3254  		// test the cached value.
  3255  		headerMD, err = stream.Header()
  3256  		delete(headerMD, "trailer") // ignore if present
  3257  		delete(headerMD, "user-agent")
  3258  		delete(headerMD, "content-type")
  3259  		delete(headerMD, "grpc-accept-encoding")
  3260  		if err != nil || !reflect.DeepEqual(testMetadata, headerMD) {
  3261  			t.Errorf("#2 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata)
  3262  		}
  3263  		err = func() error {
  3264  			for index := 0; index < len(reqSizes); index++ {
  3265  				respParam := []*testpb.ResponseParameters{
  3266  					{
  3267  						Size: int32(respSizes[index]),
  3268  					},
  3269  				}
  3270  
  3271  				payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index]))
  3272  				if err != nil {
  3273  					return err
  3274  				}
  3275  
  3276  				req := &testpb.StreamingOutputCallRequest{
  3277  					ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3278  					ResponseParameters: respParam,
  3279  					Payload:            payload,
  3280  				}
  3281  				if err := stream.Send(req); err != nil {
  3282  					return fmt.Errorf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  3283  				}
  3284  			}
  3285  			return nil
  3286  		}()
  3287  		// Tell the server we're done sending args.
  3288  		stream.CloseSend()
  3289  		if err != nil {
  3290  			t.Error(err)
  3291  		}
  3292  	}()
  3293  	for {
  3294  		if _, err := stream.Recv(); err != nil {
  3295  			break
  3296  		}
  3297  	}
  3298  	trailerMD := stream.Trailer()
  3299  	if !reflect.DeepEqual(testTrailerMetadata, trailerMD) {
  3300  		t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testTrailerMetadata)
  3301  	}
  3302  }
  3303  
  3304  func (s) TestServerStreaming(t *testing.T) {
  3305  	for _, e := range listTestEnv() {
  3306  		testServerStreaming(t, e)
  3307  	}
  3308  }
  3309  
  3310  func testServerStreaming(t *testing.T, e env) {
  3311  	te := newTest(t, e)
  3312  	te.startServer(&testServer{security: e.security})
  3313  	defer te.tearDown()
  3314  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3315  
  3316  	respParam := make([]*testpb.ResponseParameters, len(respSizes))
  3317  	for i, s := range respSizes {
  3318  		respParam[i] = &testpb.ResponseParameters{
  3319  			Size: int32(s),
  3320  		}
  3321  	}
  3322  	req := &testpb.StreamingOutputCallRequest{
  3323  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3324  		ResponseParameters: respParam,
  3325  	}
  3326  
  3327  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3328  	defer cancel()
  3329  	stream, err := tc.StreamingOutputCall(ctx, req)
  3330  	if err != nil {
  3331  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
  3332  	}
  3333  	var rpcStatus error
  3334  	var respCnt int
  3335  	var index int
  3336  	for {
  3337  		reply, err := stream.Recv()
  3338  		if err != nil {
  3339  			rpcStatus = err
  3340  			break
  3341  		}
  3342  		pt := reply.GetPayload().GetType()
  3343  		if pt != testpb.PayloadType_COMPRESSABLE {
  3344  			t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE)
  3345  		}
  3346  		size := len(reply.GetPayload().GetBody())
  3347  		if size != int(respSizes[index]) {
  3348  			t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
  3349  		}
  3350  		index++
  3351  		respCnt++
  3352  	}
  3353  	if rpcStatus != io.EOF {
  3354  		t.Fatalf("Failed to finish the server streaming rpc: %v, want <EOF>", rpcStatus)
  3355  	}
  3356  	if respCnt != len(respSizes) {
  3357  		t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt)
  3358  	}
  3359  }
  3360  
  3361  func (s) TestFailedServerStreaming(t *testing.T) {
  3362  	for _, e := range listTestEnv() {
  3363  		testFailedServerStreaming(t, e)
  3364  	}
  3365  }
  3366  
  3367  func testFailedServerStreaming(t *testing.T, e env) {
  3368  	te := newTest(t, e)
  3369  	te.userAgent = failAppUA
  3370  	te.startServer(&testServer{security: e.security})
  3371  	defer te.tearDown()
  3372  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3373  
  3374  	respParam := make([]*testpb.ResponseParameters, len(respSizes))
  3375  	for i, s := range respSizes {
  3376  		respParam[i] = &testpb.ResponseParameters{
  3377  			Size: int32(s),
  3378  		}
  3379  	}
  3380  	req := &testpb.StreamingOutputCallRequest{
  3381  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3382  		ResponseParameters: respParam,
  3383  	}
  3384  	ctx := metadata.NewOutgoingContext(te.ctx, testMetadata)
  3385  	stream, err := tc.StreamingOutputCall(ctx, req)
  3386  	if err != nil {
  3387  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
  3388  	}
  3389  	wantErr := status.Error(codes.DataLoss, "error for testing: "+failAppUA)
  3390  	if _, err := stream.Recv(); !equalError(err, wantErr) {
  3391  		t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, wantErr)
  3392  	}
  3393  }
  3394  
  3395  func equalError(x, y error) bool {
  3396  	return x == y || (x != nil && y != nil && x.Error() == y.Error())
  3397  }
  3398  
  3399  // concurrentSendServer is a TestServiceServer whose
  3400  // StreamingOutputCall makes ten serial Send calls, sending payloads
  3401  // "0".."9", inclusive.  TestServerStreamingConcurrent verifies they
  3402  // were received in the correct order, and that there were no races.
  3403  //
  3404  // All other TestServiceServer methods crash if called.
  3405  type concurrentSendServer struct {
  3406  	testgrpc.TestServiceServer
  3407  }
  3408  
  3409  func (s concurrentSendServer) StreamingOutputCall(_ *testpb.StreamingOutputCallRequest, stream testgrpc.TestService_StreamingOutputCallServer) error {
  3410  	for i := 0; i < 10; i++ {
  3411  		stream.Send(&testpb.StreamingOutputCallResponse{
  3412  			Payload: &testpb.Payload{
  3413  				Body: []byte{'0' + uint8(i)},
  3414  			},
  3415  		})
  3416  	}
  3417  	return nil
  3418  }
  3419  
  3420  // Tests doing a bunch of concurrent streaming output calls.
  3421  func (s) TestServerStreamingConcurrent(t *testing.T) {
  3422  	for _, e := range listTestEnv() {
  3423  		testServerStreamingConcurrent(t, e)
  3424  	}
  3425  }
  3426  
  3427  func testServerStreamingConcurrent(t *testing.T, e env) {
  3428  	te := newTest(t, e)
  3429  	te.startServer(concurrentSendServer{})
  3430  	defer te.tearDown()
  3431  
  3432  	cc := te.clientConn()
  3433  	tc := testgrpc.NewTestServiceClient(cc)
  3434  
  3435  	doStreamingCall := func() {
  3436  		req := &testpb.StreamingOutputCallRequest{}
  3437  		ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3438  		defer cancel()
  3439  		stream, err := tc.StreamingOutputCall(ctx, req)
  3440  		if err != nil {
  3441  			t.Errorf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err)
  3442  			return
  3443  		}
  3444  		var ngot int
  3445  		var buf bytes.Buffer
  3446  		for {
  3447  			reply, err := stream.Recv()
  3448  			if err == io.EOF {
  3449  				break
  3450  			}
  3451  			if err != nil {
  3452  				t.Fatal(err)
  3453  			}
  3454  			ngot++
  3455  			if buf.Len() > 0 {
  3456  				buf.WriteByte(',')
  3457  			}
  3458  			buf.Write(reply.GetPayload().GetBody())
  3459  		}
  3460  		if want := 10; ngot != want {
  3461  			t.Errorf("Got %d replies, want %d", ngot, want)
  3462  		}
  3463  		if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want {
  3464  			t.Errorf("Got replies %q; want %q", got, want)
  3465  		}
  3466  	}
  3467  
  3468  	var wg sync.WaitGroup
  3469  	for i := 0; i < 20; i++ {
  3470  		wg.Add(1)
  3471  		go func() {
  3472  			defer wg.Done()
  3473  			doStreamingCall()
  3474  		}()
  3475  	}
  3476  	wg.Wait()
  3477  
  3478  }
  3479  
  3480  func generatePayloadSizes() [][]int {
  3481  	reqSizes := [][]int{
  3482  		{27182, 8, 1828, 45904},
  3483  	}
  3484  
  3485  	num8KPayloads := 1024
  3486  	eightKPayloads := []int{}
  3487  	for i := 0; i < num8KPayloads; i++ {
  3488  		eightKPayloads = append(eightKPayloads, (1 << 13))
  3489  	}
  3490  	reqSizes = append(reqSizes, eightKPayloads)
  3491  
  3492  	num2MPayloads := 8
  3493  	twoMPayloads := []int{}
  3494  	for i := 0; i < num2MPayloads; i++ {
  3495  		twoMPayloads = append(twoMPayloads, (1 << 21))
  3496  	}
  3497  	reqSizes = append(reqSizes, twoMPayloads)
  3498  
  3499  	return reqSizes
  3500  }
  3501  
  3502  func (s) TestClientStreaming(t *testing.T) {
  3503  	for _, s := range generatePayloadSizes() {
  3504  		for _, e := range listTestEnv() {
  3505  			testClientStreaming(t, e, s)
  3506  		}
  3507  	}
  3508  }
  3509  
  3510  func testClientStreaming(t *testing.T, e env, sizes []int) {
  3511  	te := newTest(t, e)
  3512  	te.startServer(&testServer{security: e.security})
  3513  	defer te.tearDown()
  3514  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3515  
  3516  	ctx, cancel := context.WithTimeout(te.ctx, defaultTestTimeout)
  3517  	defer cancel()
  3518  	stream, err := tc.StreamingInputCall(ctx)
  3519  	if err != nil {
  3520  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err)
  3521  	}
  3522  
  3523  	var sum int
  3524  	for _, s := range sizes {
  3525  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s))
  3526  		if err != nil {
  3527  			t.Fatal(err)
  3528  		}
  3529  
  3530  		req := &testpb.StreamingInputCallRequest{
  3531  			Payload: payload,
  3532  		}
  3533  		if err := stream.Send(req); err != nil {
  3534  			t.Fatalf("%v.Send(_) = %v, want <nil>", stream, err)
  3535  		}
  3536  		sum += s
  3537  	}
  3538  	reply, err := stream.CloseAndRecv()
  3539  	if err != nil {
  3540  		t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil)
  3541  	}
  3542  	if reply.GetAggregatedPayloadSize() != int32(sum) {
  3543  		t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum)
  3544  	}
  3545  }
  3546  
  3547  func (s) TestClientStreamingError(t *testing.T) {
  3548  	for _, e := range listTestEnv() {
  3549  		if e.name == "handler-tls" {
  3550  			continue
  3551  		}
  3552  		testClientStreamingError(t, e)
  3553  	}
  3554  }
  3555  
  3556  func testClientStreamingError(t *testing.T, e env) {
  3557  	te := newTest(t, e)
  3558  	te.startServer(&testServer{security: e.security, earlyFail: true})
  3559  	defer te.tearDown()
  3560  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3561  
  3562  	stream, err := tc.StreamingInputCall(te.ctx)
  3563  	if err != nil {
  3564  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err)
  3565  	}
  3566  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1)
  3567  	if err != nil {
  3568  		t.Fatal(err)
  3569  	}
  3570  
  3571  	req := &testpb.StreamingInputCallRequest{
  3572  		Payload: payload,
  3573  	}
  3574  	// The 1st request should go through.
  3575  	if err := stream.Send(req); err != nil {
  3576  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  3577  	}
  3578  	for {
  3579  		if err := stream.Send(req); err != io.EOF {
  3580  			continue
  3581  		}
  3582  		if _, err := stream.CloseAndRecv(); status.Code(err) != codes.NotFound {
  3583  			t.Fatalf("%v.CloseAndRecv() = %v, want error %s", stream, err, codes.NotFound)
  3584  		}
  3585  		break
  3586  	}
  3587  }
  3588  
  3589  // Tests that a client receives a cardinality violation error for client-streaming
  3590  // RPCs if the server doesn't send a message before returning status OK.
  3591  func (s) TestClientStreamingCardinalityViolation_ServerHandlerMissingSendAndClose(t *testing.T) {
  3592  	// TODO : https://github.com/grpc/grpc-go/issues/8119 - remove `t.Skip()`
  3593  	// after this is fixed.
  3594  	t.Skip()
  3595  	ss := &stubserver.StubServer{
  3596  		StreamingInputCallF: func(_ testgrpc.TestService_StreamingInputCallServer) error {
  3597  			// Returning status OK without sending a response message.This is a
  3598  			// cardinality violation.
  3599  			return nil
  3600  		},
  3601  	}
  3602  	if err := ss.Start(nil); err != nil {
  3603  		t.Fatalf("Error starting endpoint server: %v", err)
  3604  	}
  3605  	defer ss.Stop()
  3606  
  3607  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3608  	defer cancel()
  3609  
  3610  	stream, err := ss.Client.StreamingInputCall(ctx)
  3611  	if err != nil {
  3612  		t.Fatalf(".StreamingInputCall(_) = _, %v, want <nil>", err)
  3613  	}
  3614  
  3615  	_, err = stream.CloseAndRecv()
  3616  	if err == nil {
  3617  		t.Fatalf("stream.CloseAndRecv() = %v, want an error", err)
  3618  	}
  3619  	if status.Code(err) != codes.Internal {
  3620  		t.Fatalf("stream.CloseAndRecv() = %v, want error %s", err, codes.Internal)
  3621  	}
  3622  }
  3623  
  3624  // Tests that the server can continue to receive messages after calling SendAndClose. Although
  3625  // this is unexpected, we retain it for backward compatibility.
  3626  func (s) TestClientStreaming_ServerHandlerRecvAfterSendAndClose(t *testing.T) {
  3627  	ss := stubserver.StubServer{
  3628  		StreamingInputCallF: func(stream testgrpc.TestService_StreamingInputCallServer) error {
  3629  			if err := stream.SendAndClose(&testpb.StreamingInputCallResponse{}); err != nil {
  3630  				t.Errorf("stream.SendAndClose(_) = %v, want <nil>", err)
  3631  			}
  3632  			if resp, err := stream.Recv(); err != nil || !proto.Equal(resp, &testpb.StreamingInputCallRequest{}) {
  3633  				t.Errorf("stream.Recv() = %s, %v, want non-nil empty response, <nil>", resp, err)
  3634  			}
  3635  			return nil
  3636  		},
  3637  	}
  3638  	if err := ss.Start(nil); err != nil {
  3639  		t.Fatal("Error starting server:", err)
  3640  	}
  3641  	defer ss.Stop()
  3642  
  3643  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3644  	defer cancel()
  3645  	stream, err := ss.Client.StreamingInputCall(ctx)
  3646  	if err != nil {
  3647  		t.Fatalf(".StreamingInputCall(_) = _, %v, want <nil>", err)
  3648  	}
  3649  	if err := stream.Send(&testpb.StreamingInputCallRequest{}); err != nil {
  3650  		t.Fatalf("stream.Send(_) = %v, want <nil>", err)
  3651  	}
  3652  	if resp, err := stream.CloseAndRecv(); err != nil || !proto.Equal(resp, &testpb.StreamingInputCallResponse{}) {
  3653  		t.Fatalf("stream.CloseSend() = %v , %v, want non-nil empty response, <nil>", resp, err)
  3654  	}
  3655  }
  3656  
  3657  // Tests that Recv() on client streaming client blocks till the server handler
  3658  // returns even after calling SendAndClose from the server handler.
  3659  func (s) TestClientStreaming_RecvWaitsForServerHandlerRetrun(t *testing.T) {
  3660  	waitForReturn := make(chan struct{})
  3661  	ss := stubserver.StubServer{
  3662  		StreamingInputCallF: func(stream testgrpc.TestService_StreamingInputCallServer) error {
  3663  			if err := stream.SendAndClose(&testpb.StreamingInputCallResponse{}); err != nil {
  3664  				t.Errorf("stream.SendAndClose(_) = %v, want <nil>", err)
  3665  			}
  3666  			<-waitForReturn
  3667  			return nil
  3668  		},
  3669  	}
  3670  	if err := ss.Start(nil); err != nil {
  3671  		t.Fatal("Error starting server:", err)
  3672  	}
  3673  	defer ss.Stop()
  3674  
  3675  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3676  	defer cancel()
  3677  	stream, err := ss.Client.StreamingInputCall(ctx)
  3678  	if err != nil {
  3679  		t.Fatalf(".StreamingInputCall(_) = _, %v, want <nil>", err)
  3680  	}
  3681  	// Start Recv in a goroutine to test if it blocks until the server handler returns.
  3682  	errCh := make(chan error, 1)
  3683  	go func() {
  3684  		resp := new(testpb.StreamingInputCallResponse)
  3685  		err := stream.RecvMsg(resp)
  3686  		errCh <- err
  3687  	}()
  3688  
  3689  	// Check that Recv() is blocked.
  3690  	select {
  3691  	case err := <-errCh:
  3692  		t.Fatalf("stream.RecvMsg(_) = %v returned unexpectedly", err)
  3693  	case <-time.After(defaultTestShortTimeout):
  3694  	}
  3695  
  3696  	close(waitForReturn)
  3697  
  3698  	// Recv() should return after the server handler returns.
  3699  	select {
  3700  	case err := <-errCh:
  3701  		if err != nil {
  3702  			t.Fatalf("stream.RecvMsg(_) = %v, want <nil>", err)
  3703  		}
  3704  	case <-time.After(defaultTestTimeout):
  3705  		t.Fatal("Timed out waiting for stream.RecvMsg(_) to return")
  3706  	}
  3707  }
  3708  
  3709  // Tests the behavior where server handler returns an error after calling
  3710  // SendAndClose. It verifies the that client receives nil message and
  3711  // non-nil error.
  3712  func (s) TestClientStreaming_ReturnErrorAfterSendAndClose(t *testing.T) {
  3713  	wantError := status.Error(codes.Internal, "error for testing")
  3714  	ss := stubserver.StubServer{
  3715  		StreamingInputCallF: func(stream testgrpc.TestService_StreamingInputCallServer) error {
  3716  			if err := stream.SendAndClose(&testpb.StreamingInputCallResponse{}); err != nil {
  3717  				t.Errorf("stream.SendAndClose(_) = %v, want <nil>", err)
  3718  			}
  3719  			return wantError
  3720  		},
  3721  	}
  3722  	if err := ss.Start(nil); err != nil {
  3723  		t.Fatal("Error starting server:", err)
  3724  	}
  3725  	defer ss.Stop()
  3726  
  3727  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3728  	defer cancel()
  3729  	stream, err := ss.Client.StreamingInputCall(ctx)
  3730  	if err != nil {
  3731  		t.Fatalf(".StreamingInputCall(_) = _, %v, want <nil>", err)
  3732  	}
  3733  	resp, err := stream.CloseAndRecv()
  3734  
  3735  	wantStatus, _ := status.FromError(wantError)
  3736  	gotStatus, _ := status.FromError(err)
  3737  
  3738  	if gotStatus.Code() != wantStatus.Code() || gotStatus.Message() != wantStatus.Message() || resp != nil {
  3739  		t.Fatalf("stream.CloseSend() = %v , %v, want <nil>, %s", resp, err, wantError)
  3740  	}
  3741  }
  3742  
  3743  // Tests that a client receives a cardinality violation error for client-streaming
  3744  // RPCs if the server call SendMsg multiple times.
  3745  func (s) TestClientStreaming_ServerHandlerSendMsgAfterSendMsg(t *testing.T) {
  3746  	ss := stubserver.StubServer{
  3747  		StreamingInputCallF: func(stream testgrpc.TestService_StreamingInputCallServer) error {
  3748  			if err := stream.SendMsg(&testpb.StreamingInputCallResponse{}); err != nil {
  3749  				t.Errorf("stream.SendMsg(_) = %v, want <nil>", err)
  3750  			}
  3751  			if err := stream.SendMsg(&testpb.StreamingInputCallResponse{}); err != nil {
  3752  				t.Errorf("stream.SendMsg(_) = %v, want <nil>", err)
  3753  			}
  3754  			return nil
  3755  		},
  3756  	}
  3757  	if err := ss.Start(nil); err != nil {
  3758  		t.Fatal("Error starting server:", err)
  3759  	}
  3760  	defer ss.Stop()
  3761  
  3762  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3763  	defer cancel()
  3764  	stream, err := ss.Client.StreamingInputCall(ctx)
  3765  	if err != nil {
  3766  		t.Fatalf(".StreamingInputCall(_) = _, %v, want <nil>", err)
  3767  	}
  3768  	if err := stream.Send(&testpb.StreamingInputCallRequest{}); err != nil {
  3769  		t.Fatalf("stream.Send(_) = %v, want <nil>", err)
  3770  	}
  3771  	if _, err := stream.CloseAndRecv(); status.Code(err) != codes.Internal {
  3772  		t.Fatalf("stream.CloseAndRecv() = %v, want error with status code %s", err, codes.Internal)
  3773  	}
  3774  }
  3775  
  3776  func (s) TestExceedMaxStreamsLimit(t *testing.T) {
  3777  	for _, e := range listTestEnv() {
  3778  		testExceedMaxStreamsLimit(t, e)
  3779  	}
  3780  }
  3781  
  3782  func testExceedMaxStreamsLimit(t *testing.T, e env) {
  3783  	te := newTest(t, e)
  3784  	te.declareLogNoise(
  3785  		"http2Client.notifyError got notified that the client transport was broken",
  3786  		"Conn.resetTransport failed to create client transport",
  3787  		"grpc: the connection is closing",
  3788  	)
  3789  	te.maxStream = 1 // Only allows 1 live stream per server transport.
  3790  	te.startServer(&testServer{security: e.security})
  3791  	defer te.tearDown()
  3792  
  3793  	cc := te.clientConn()
  3794  	tc := testgrpc.NewTestServiceClient(cc)
  3795  
  3796  	_, err := tc.StreamingInputCall(te.ctx)
  3797  	if err != nil {
  3798  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, <nil>", tc, err)
  3799  	}
  3800  	// Loop until receiving the new max stream setting from the server.
  3801  	for {
  3802  		ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  3803  		defer cancel()
  3804  		_, err := tc.StreamingInputCall(ctx)
  3805  		if err == nil {
  3806  			time.Sleep(50 * time.Millisecond)
  3807  			continue
  3808  		}
  3809  		if status.Code(err) == codes.DeadlineExceeded {
  3810  			break
  3811  		}
  3812  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded)
  3813  	}
  3814  }
  3815  
  3816  func (s) TestStreamsQuotaRecovery(t *testing.T) {
  3817  	for _, e := range listTestEnv() {
  3818  		testStreamsQuotaRecovery(t, e)
  3819  	}
  3820  }
  3821  
  3822  func testStreamsQuotaRecovery(t *testing.T, e env) {
  3823  	te := newTest(t, e)
  3824  	te.declareLogNoise(
  3825  		"http2Client.notifyError got notified that the client transport was broken",
  3826  		"Conn.resetTransport failed to create client transport",
  3827  		"grpc: the connection is closing",
  3828  	)
  3829  	te.maxStream = 1 // Allows 1 live stream.
  3830  	te.startServer(&testServer{security: e.security})
  3831  	defer te.tearDown()
  3832  
  3833  	cc := te.clientConn()
  3834  	tc := testgrpc.NewTestServiceClient(cc)
  3835  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3836  	defer cancel()
  3837  	if _, err := tc.StreamingInputCall(ctx); err != nil {
  3838  		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, <nil>", err)
  3839  	}
  3840  	// Loop until the new max stream setting is effective.
  3841  	for {
  3842  		ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  3843  		_, err := tc.StreamingInputCall(ctx)
  3844  		cancel()
  3845  		if err == nil {
  3846  			time.Sleep(5 * time.Millisecond)
  3847  			continue
  3848  		}
  3849  		if status.Code(err) == codes.DeadlineExceeded {
  3850  			break
  3851  		}
  3852  		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  3853  	}
  3854  
  3855  	var wg sync.WaitGroup
  3856  	for i := 0; i < 10; i++ {
  3857  		wg.Add(1)
  3858  		go func() {
  3859  			defer wg.Done()
  3860  			payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 314)
  3861  			if err != nil {
  3862  				t.Error(err)
  3863  				return
  3864  			}
  3865  			req := &testpb.SimpleRequest{
  3866  				ResponseType: testpb.PayloadType_COMPRESSABLE,
  3867  				ResponseSize: 1592,
  3868  				Payload:      payload,
  3869  			}
  3870  			// No rpc should go through due to the max streams limit.
  3871  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  3872  			defer cancel()
  3873  			if _, err := tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded {
  3874  				t.Errorf("tc.UnaryCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  3875  			}
  3876  		}()
  3877  	}
  3878  	wg.Wait()
  3879  
  3880  	cancel()
  3881  	// A new stream should be allowed after canceling the first one.
  3882  	ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
  3883  	defer cancel()
  3884  	if _, err := tc.StreamingInputCall(ctx); err != nil {
  3885  		t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %v", err, nil)
  3886  	}
  3887  }
  3888  
  3889  func (s) TestUnaryClientInterceptor(t *testing.T) {
  3890  	for _, e := range listTestEnv() {
  3891  		testUnaryClientInterceptor(t, e)
  3892  	}
  3893  }
  3894  
  3895  func failOkayRPC(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
  3896  	err := invoker(ctx, method, req, reply, cc, opts...)
  3897  	if err == nil {
  3898  		return status.Error(codes.NotFound, "")
  3899  	}
  3900  	return err
  3901  }
  3902  
  3903  func testUnaryClientInterceptor(t *testing.T, e env) {
  3904  	te := newTest(t, e)
  3905  	te.userAgent = testAppUA
  3906  	te.unaryClientInt = failOkayRPC
  3907  	te.startServer(&testServer{security: e.security})
  3908  	defer te.tearDown()
  3909  
  3910  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3911  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3912  	defer cancel()
  3913  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.NotFound {
  3914  		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.NotFound)
  3915  	}
  3916  }
  3917  
  3918  func (s) TestStreamClientInterceptor(t *testing.T) {
  3919  	for _, e := range listTestEnv() {
  3920  		testStreamClientInterceptor(t, e)
  3921  	}
  3922  }
  3923  
  3924  func failOkayStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
  3925  	s, err := streamer(ctx, desc, cc, method, opts...)
  3926  	if err == nil {
  3927  		return nil, status.Error(codes.NotFound, "")
  3928  	}
  3929  	return s, nil
  3930  }
  3931  
  3932  func testStreamClientInterceptor(t *testing.T, e env) {
  3933  	te := newTest(t, e)
  3934  	te.streamClientInt = failOkayStream
  3935  	te.startServer(&testServer{security: e.security})
  3936  	defer te.tearDown()
  3937  
  3938  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3939  	respParam := []*testpb.ResponseParameters{
  3940  		{
  3941  			Size: int32(1),
  3942  		},
  3943  	}
  3944  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1))
  3945  	if err != nil {
  3946  		t.Fatal(err)
  3947  	}
  3948  	req := &testpb.StreamingOutputCallRequest{
  3949  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  3950  		ResponseParameters: respParam,
  3951  		Payload:            payload,
  3952  	}
  3953  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3954  	defer cancel()
  3955  	if _, err := tc.StreamingOutputCall(ctx, req); status.Code(err) != codes.NotFound {
  3956  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, error code %s", tc, err, codes.NotFound)
  3957  	}
  3958  }
  3959  
  3960  func (s) TestUnaryServerInterceptor(t *testing.T) {
  3961  	for _, e := range listTestEnv() {
  3962  		testUnaryServerInterceptor(t, e)
  3963  	}
  3964  }
  3965  
  3966  func errInjector(context.Context, any, *grpc.UnaryServerInfo, grpc.UnaryHandler) (any, error) {
  3967  	return nil, status.Error(codes.PermissionDenied, "")
  3968  }
  3969  
  3970  func testUnaryServerInterceptor(t *testing.T, e env) {
  3971  	te := newTest(t, e)
  3972  	te.unaryServerInt = errInjector
  3973  	te.startServer(&testServer{security: e.security})
  3974  	defer te.tearDown()
  3975  
  3976  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  3977  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  3978  	defer cancel()
  3979  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.PermissionDenied {
  3980  		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied)
  3981  	}
  3982  }
  3983  
  3984  func (s) TestStreamServerInterceptor(t *testing.T) {
  3985  	for _, e := range listTestEnv() {
  3986  		// TODO(bradfitz): Temporarily skip this env due to #619.
  3987  		if e.name == "handler-tls" {
  3988  			continue
  3989  		}
  3990  		testStreamServerInterceptor(t, e)
  3991  	}
  3992  }
  3993  
  3994  func fullDuplexOnly(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
  3995  	if info.FullMethod == "/grpc.testing.TestService/FullDuplexCall" {
  3996  		return handler(srv, ss)
  3997  	}
  3998  	// Reject the other methods.
  3999  	return status.Error(codes.PermissionDenied, "")
  4000  }
  4001  
  4002  func testStreamServerInterceptor(t *testing.T, e env) {
  4003  	te := newTest(t, e)
  4004  	te.streamServerInt = fullDuplexOnly
  4005  	te.startServer(&testServer{security: e.security})
  4006  	defer te.tearDown()
  4007  
  4008  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  4009  	respParam := []*testpb.ResponseParameters{
  4010  		{
  4011  			Size: int32(1),
  4012  		},
  4013  	}
  4014  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1))
  4015  	if err != nil {
  4016  		t.Fatal(err)
  4017  	}
  4018  	req := &testpb.StreamingOutputCallRequest{
  4019  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4020  		ResponseParameters: respParam,
  4021  		Payload:            payload,
  4022  	}
  4023  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4024  	defer cancel()
  4025  	s1, err := tc.StreamingOutputCall(ctx, req)
  4026  	if err != nil {
  4027  		t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, <nil>", tc, err)
  4028  	}
  4029  	if _, err := s1.Recv(); status.Code(err) != codes.PermissionDenied {
  4030  		t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied)
  4031  	}
  4032  	s2, err := tc.FullDuplexCall(ctx)
  4033  	if err != nil {
  4034  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4035  	}
  4036  	if err := s2.Send(req); err != nil {
  4037  		t.Fatalf("%v.Send(_) = %v, want <nil>", s2, err)
  4038  	}
  4039  	if _, err := s2.Recv(); err != nil {
  4040  		t.Fatalf("%v.Recv() = _, %v, want _, <nil>", s2, err)
  4041  	}
  4042  }
  4043  
  4044  // funcServer implements methods of TestServiceServer using funcs,
  4045  // similar to an http.HandlerFunc.
  4046  // Any unimplemented method will crash. Tests implement the method(s)
  4047  // they need.
  4048  type funcServer struct {
  4049  	testgrpc.TestServiceServer
  4050  	unaryCall          func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error)
  4051  	streamingInputCall func(stream testgrpc.TestService_StreamingInputCallServer) error
  4052  	fullDuplexCall     func(stream testgrpc.TestService_FullDuplexCallServer) error
  4053  }
  4054  
  4055  func (s *funcServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  4056  	return s.unaryCall(ctx, in)
  4057  }
  4058  
  4059  func (s *funcServer) StreamingInputCall(stream testgrpc.TestService_StreamingInputCallServer) error {
  4060  	return s.streamingInputCall(stream)
  4061  }
  4062  
  4063  func (s *funcServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error {
  4064  	return s.fullDuplexCall(stream)
  4065  }
  4066  
  4067  func (s) TestClientRequestBodyErrorUnexpectedEOF(t *testing.T) {
  4068  	for _, e := range listTestEnv() {
  4069  		testClientRequestBodyErrorUnexpectedEOF(t, e)
  4070  	}
  4071  }
  4072  
  4073  func testClientRequestBodyErrorUnexpectedEOF(t *testing.T, e env) {
  4074  	te := newTest(t, e)
  4075  	ts := &funcServer{unaryCall: func(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  4076  		errUnexpectedCall := errors.New("unexpected call func server method")
  4077  		t.Error(errUnexpectedCall)
  4078  		return nil, errUnexpectedCall
  4079  	}}
  4080  	te.startServer(ts)
  4081  	defer te.tearDown()
  4082  	te.withServerTester(func(st *serverTester) {
  4083  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
  4084  		// Say we have 5 bytes coming, but set END_STREAM flag:
  4085  		st.writeData(1, true, []byte{0, 0, 0, 0, 5})
  4086  		st.wantAnyFrame() // wait for server to crash (it used to crash)
  4087  	})
  4088  }
  4089  
  4090  func (s) TestClientRequestBodyErrorCloseAfterLength(t *testing.T) {
  4091  	for _, e := range listTestEnv() {
  4092  		testClientRequestBodyErrorCloseAfterLength(t, e)
  4093  	}
  4094  }
  4095  
  4096  // Tests gRPC server's behavior when a gRPC client sends a frame with an invalid
  4097  // streamID. Per [HTTP/2 spec]: Streams initiated by a client MUST use
  4098  // odd-numbered stream identifiers. This test sets up a test server and sends a
  4099  // header frame with stream ID of 2. The test asserts that a subsequent read on
  4100  // the transport sends a GoAwayFrame with error code: PROTOCOL_ERROR.
  4101  //
  4102  // [HTTP/2 spec]: https://httpwg.org/specs/rfc7540.html#StreamIdentifiers
  4103  func (s) TestClientInvalidStreamID(t *testing.T) {
  4104  	lis, err := net.Listen("tcp", "localhost:0")
  4105  	if err != nil {
  4106  		t.Fatalf("Failed to listen: %v", err)
  4107  	}
  4108  	defer lis.Close()
  4109  	s := grpc.NewServer()
  4110  	defer s.Stop()
  4111  	go s.Serve(lis)
  4112  
  4113  	conn, err := net.DialTimeout("tcp", lis.Addr().String(), defaultTestTimeout)
  4114  	if err != nil {
  4115  		t.Fatalf("Failed to dial: %v", err)
  4116  	}
  4117  	st := newServerTesterFromConn(t, conn)
  4118  	st.greet()
  4119  	st.writeHeadersGRPC(2, "/grpc.testing.TestService/StreamingInputCall", true)
  4120  	goAwayFrame := st.wantGoAway(http2.ErrCodeProtocol)
  4121  	want := "received an illegal stream id: 2."
  4122  	if got := string(goAwayFrame.DebugData()); !strings.Contains(got, want) {
  4123  		t.Fatalf(" Received: %v, Expected error message to contain: %v.", got, want)
  4124  	}
  4125  }
  4126  
  4127  // TestInvalidStreamIDSmallerThanPrevious tests the server sends a GOAWAY frame
  4128  // with error code: PROTOCOL_ERROR when the streamID of the current frame is
  4129  // lower than the previous frames.
  4130  func (s) TestInvalidStreamIDSmallerThanPrevious(t *testing.T) {
  4131  	lis, err := net.Listen("tcp", "localhost:0")
  4132  	if err != nil {
  4133  		t.Fatalf("Failed to listen: %v", err)
  4134  	}
  4135  	defer lis.Close()
  4136  	s := grpc.NewServer()
  4137  	defer s.Stop()
  4138  	go s.Serve(lis)
  4139  
  4140  	conn, err := net.DialTimeout("tcp", lis.Addr().String(), defaultTestTimeout)
  4141  	if err != nil {
  4142  		t.Fatalf("Failed to dial: %v", err)
  4143  	}
  4144  	st := newServerTesterFromConn(t, conn)
  4145  	st.greet()
  4146  	st.writeHeadersGRPC(3, "/grpc.testing.TestService/StreamingInputCall", true)
  4147  	st.wantAnyFrame()
  4148  	st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", true)
  4149  	goAwayFrame := st.wantGoAway(http2.ErrCodeProtocol)
  4150  	want := "received an illegal stream id: 1"
  4151  	if got := string(goAwayFrame.DebugData()); !strings.Contains(got, want) {
  4152  		t.Fatalf(" Received: %v, Expected error message to contain: %v.", got, want)
  4153  	}
  4154  }
  4155  
  4156  func testClientRequestBodyErrorCloseAfterLength(t *testing.T, e env) {
  4157  	te := newTest(t, e)
  4158  	te.declareLogNoise("Server.processUnaryRPC failed to write status")
  4159  	ts := &funcServer{unaryCall: func(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  4160  		errUnexpectedCall := errors.New("unexpected call func server method")
  4161  		t.Error(errUnexpectedCall)
  4162  		return nil, errUnexpectedCall
  4163  	}}
  4164  	te.startServer(ts)
  4165  	defer te.tearDown()
  4166  	te.withServerTester(func(st *serverTester) {
  4167  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
  4168  		// say we're sending 5 bytes, but then close the connection instead.
  4169  		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
  4170  		st.cc.Close()
  4171  	})
  4172  }
  4173  
  4174  func (s) TestClientRequestBodyErrorCancel(t *testing.T) {
  4175  	for _, e := range listTestEnv() {
  4176  		testClientRequestBodyErrorCancel(t, e)
  4177  	}
  4178  }
  4179  
  4180  func testClientRequestBodyErrorCancel(t *testing.T, e env) {
  4181  	te := newTest(t, e)
  4182  	gotCall := make(chan bool, 1)
  4183  	ts := &funcServer{unaryCall: func(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  4184  		gotCall <- true
  4185  		return new(testpb.SimpleResponse), nil
  4186  	}}
  4187  	te.startServer(ts)
  4188  	defer te.tearDown()
  4189  	te.withServerTester(func(st *serverTester) {
  4190  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall", false)
  4191  		// Say we have 5 bytes coming, but cancel it instead.
  4192  		st.writeRSTStream(1, http2.ErrCodeCancel)
  4193  		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
  4194  
  4195  		// Verify we didn't a call yet.
  4196  		select {
  4197  		case <-gotCall:
  4198  			t.Fatal("unexpected call")
  4199  		default:
  4200  		}
  4201  
  4202  		// And now send an uncanceled (but still invalid), just to get a response.
  4203  		st.writeHeadersGRPC(3, "/grpc.testing.TestService/UnaryCall", false)
  4204  		st.writeData(3, true, []byte{0, 0, 0, 0, 0})
  4205  		<-gotCall
  4206  		st.wantAnyFrame()
  4207  	})
  4208  }
  4209  
  4210  func (s) TestClientRequestBodyErrorCancelStreamingInput(t *testing.T) {
  4211  	for _, e := range listTestEnv() {
  4212  		testClientRequestBodyErrorCancelStreamingInput(t, e)
  4213  	}
  4214  }
  4215  
  4216  func testClientRequestBodyErrorCancelStreamingInput(t *testing.T, e env) {
  4217  	te := newTest(t, e)
  4218  	recvErr := make(chan error, 1)
  4219  	ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error {
  4220  		_, err := stream.Recv()
  4221  		recvErr <- err
  4222  		return nil
  4223  	}}
  4224  	te.startServer(ts)
  4225  	defer te.tearDown()
  4226  	te.withServerTester(func(st *serverTester) {
  4227  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", false)
  4228  		// Say we have 5 bytes coming, but cancel it instead.
  4229  		st.writeData(1, false, []byte{0, 0, 0, 0, 5})
  4230  		st.writeRSTStream(1, http2.ErrCodeCancel)
  4231  
  4232  		var got error
  4233  		select {
  4234  		case got = <-recvErr:
  4235  		case <-time.After(3 * time.Second):
  4236  			t.Fatal("timeout waiting for error")
  4237  		}
  4238  		if grpc.Code(got) != codes.Canceled {
  4239  			t.Errorf("error = %#v; want error code %s", got, codes.Canceled)
  4240  		}
  4241  	})
  4242  }
  4243  
  4244  func (s) TestClientInitialHeaderEndStream(t *testing.T) {
  4245  	for _, e := range listTestEnv() {
  4246  		if e.httpHandler {
  4247  			continue
  4248  		}
  4249  		testClientInitialHeaderEndStream(t, e)
  4250  	}
  4251  }
  4252  
  4253  func testClientInitialHeaderEndStream(t *testing.T, e env) {
  4254  	// To ensure RST_STREAM is sent for illegal data write and not normal stream
  4255  	// close.
  4256  	frameCheckingDone := make(chan struct{})
  4257  	// To ensure goroutine for test does not end before RPC handler performs error
  4258  	// checking.
  4259  	handlerDone := make(chan struct{})
  4260  	te := newTest(t, e)
  4261  	ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error {
  4262  		defer close(handlerDone)
  4263  		// Block on serverTester receiving RST_STREAM. This ensures server has closed
  4264  		// stream before stream.Recv().
  4265  		<-frameCheckingDone
  4266  		data, err := stream.Recv()
  4267  		if err == nil {
  4268  			t.Errorf("unexpected data received in func server method: '%v'", data)
  4269  		} else if status.Code(err) != codes.Canceled {
  4270  			t.Errorf("expected canceled error, instead received '%v'", err)
  4271  		}
  4272  		return nil
  4273  	}}
  4274  	te.startServer(ts)
  4275  	defer te.tearDown()
  4276  	te.withServerTester(func(st *serverTester) {
  4277  		// Send a headers with END_STREAM flag, but then write data.
  4278  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", true)
  4279  		st.writeData(1, false, []byte{0, 0, 0, 0, 0})
  4280  		st.wantAnyFrame()
  4281  		st.wantAnyFrame()
  4282  		st.wantRSTStream(http2.ErrCodeStreamClosed)
  4283  		close(frameCheckingDone)
  4284  		<-handlerDone
  4285  	})
  4286  }
  4287  
  4288  func (s) TestClientSendDataAfterCloseSend(t *testing.T) {
  4289  	for _, e := range listTestEnv() {
  4290  		if e.httpHandler {
  4291  			continue
  4292  		}
  4293  		testClientSendDataAfterCloseSend(t, e)
  4294  	}
  4295  }
  4296  
  4297  func testClientSendDataAfterCloseSend(t *testing.T, e env) {
  4298  	// To ensure RST_STREAM is sent for illegal data write prior to execution of RPC
  4299  	// handler.
  4300  	frameCheckingDone := make(chan struct{})
  4301  	// To ensure goroutine for test does not end before RPC handler performs error
  4302  	// checking.
  4303  	handlerDone := make(chan struct{})
  4304  	te := newTest(t, e)
  4305  	ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error {
  4306  		defer close(handlerDone)
  4307  		// Block on serverTester receiving RST_STREAM. This ensures server has closed
  4308  		// stream before stream.Recv().
  4309  		<-frameCheckingDone
  4310  		for {
  4311  			_, err := stream.Recv()
  4312  			if err == io.EOF {
  4313  				break
  4314  			}
  4315  			if err != nil {
  4316  				if status.Code(err) != codes.Canceled {
  4317  					t.Errorf("expected canceled error, instead received '%v'", err)
  4318  				}
  4319  				break
  4320  			}
  4321  		}
  4322  		if err := stream.SendMsg(nil); err == nil {
  4323  			t.Error("expected error sending message on stream after stream closed due to illegal data")
  4324  		} else if status.Code(err) != codes.Canceled {
  4325  			t.Errorf("expected cancel error, instead received '%v'", err)
  4326  		}
  4327  		return nil
  4328  	}}
  4329  	te.startServer(ts)
  4330  	defer te.tearDown()
  4331  	te.withServerTester(func(st *serverTester) {
  4332  		st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", false)
  4333  		// Send data with END_STREAM flag, but then write more data.
  4334  		st.writeData(1, true, []byte{0, 0, 0, 0, 0})
  4335  		st.writeData(1, false, []byte{0, 0, 0, 0, 0})
  4336  		st.wantAnyFrame()
  4337  		st.wantAnyFrame()
  4338  		st.wantRSTStream(http2.ErrCodeStreamClosed)
  4339  		close(frameCheckingDone)
  4340  		<-handlerDone
  4341  	})
  4342  }
  4343  
  4344  func (s) TestClientResourceExhaustedCancelFullDuplex(t *testing.T) {
  4345  	for _, e := range listTestEnv() {
  4346  		if e.httpHandler {
  4347  			// httpHandler write won't be blocked on flow control window.
  4348  			continue
  4349  		}
  4350  		testClientResourceExhaustedCancelFullDuplex(t, e)
  4351  	}
  4352  }
  4353  
  4354  func testClientResourceExhaustedCancelFullDuplex(t *testing.T, e env) {
  4355  	te := newTest(t, e)
  4356  	recvErr := make(chan error, 1)
  4357  	ts := &funcServer{fullDuplexCall: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  4358  		defer close(recvErr)
  4359  		_, err := stream.Recv()
  4360  		if err != nil {
  4361  			return status.Errorf(codes.Internal, "stream.Recv() got error: %v, want <nil>", err)
  4362  		}
  4363  		// create a payload that's larger than the default flow control window.
  4364  		payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 10)
  4365  		if err != nil {
  4366  			return err
  4367  		}
  4368  		resp := &testpb.StreamingOutputCallResponse{
  4369  			Payload: payload,
  4370  		}
  4371  		ce := make(chan error, 1)
  4372  		go func() {
  4373  			var err error
  4374  			for {
  4375  				if err = stream.Send(resp); err != nil {
  4376  					break
  4377  				}
  4378  			}
  4379  			ce <- err
  4380  		}()
  4381  		select {
  4382  		case err = <-ce:
  4383  		case <-time.After(10 * time.Second):
  4384  			err = errors.New("10s timeout reached")
  4385  		}
  4386  		recvErr <- err
  4387  		return err
  4388  	}}
  4389  	te.startServer(ts)
  4390  	defer te.tearDown()
  4391  	// set a low limit on receive message size to error with Resource Exhausted on
  4392  	// client side when server send a large message.
  4393  	te.maxClientReceiveMsgSize = newInt(10)
  4394  	cc := te.clientConn()
  4395  	tc := testgrpc.NewTestServiceClient(cc)
  4396  
  4397  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4398  	defer cancel()
  4399  	stream, err := tc.FullDuplexCall(ctx)
  4400  	if err != nil {
  4401  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4402  	}
  4403  	req := &testpb.StreamingOutputCallRequest{}
  4404  	if err := stream.Send(req); err != nil {
  4405  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  4406  	}
  4407  	if _, err := stream.Recv(); status.Code(err) != codes.ResourceExhausted {
  4408  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  4409  	}
  4410  	err = <-recvErr
  4411  	if status.Code(err) != codes.Canceled {
  4412  		t.Fatalf("server got error %v, want error code: %s", err, codes.Canceled)
  4413  	}
  4414  }
  4415  
  4416  type clientFailCreds struct{}
  4417  
  4418  func (c *clientFailCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
  4419  	return rawConn, nil, nil
  4420  }
  4421  func (c *clientFailCreds) ClientHandshake(context.Context, string, net.Conn) (net.Conn, credentials.AuthInfo, error) {
  4422  	return nil, nil, fmt.Errorf("client handshake fails with fatal error")
  4423  }
  4424  func (c *clientFailCreds) Info() credentials.ProtocolInfo {
  4425  	return credentials.ProtocolInfo{}
  4426  }
  4427  func (c *clientFailCreds) Clone() credentials.TransportCredentials {
  4428  	return c
  4429  }
  4430  func (c *clientFailCreds) OverrideServerName(string) error {
  4431  	return nil
  4432  }
  4433  
  4434  // This test makes sure that failfast RPCs fail if client handshake fails with
  4435  // fatal errors.
  4436  func (s) TestFailfastRPCFailOnFatalHandshakeError(t *testing.T) {
  4437  	lis, err := net.Listen("tcp", "localhost:0")
  4438  	if err != nil {
  4439  		t.Fatalf("Failed to listen: %v", err)
  4440  	}
  4441  	defer lis.Close()
  4442  
  4443  	cc, err := grpc.NewClient("passthrough:///"+lis.Addr().String(), grpc.WithTransportCredentials(&clientFailCreds{}))
  4444  	if err != nil {
  4445  		t.Fatalf("grpc.NewClient(_) = %v", err)
  4446  	}
  4447  	defer cc.Close()
  4448  
  4449  	tc := testgrpc.NewTestServiceClient(cc)
  4450  	// This unary call should fail, but not timeout.
  4451  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4452  	defer cancel()
  4453  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(false)); status.Code(err) != codes.Unavailable {
  4454  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want <Unavailable>", err)
  4455  	}
  4456  }
  4457  
  4458  func (s) TestFlowControlLogicalRace(t *testing.T) {
  4459  	// Test for a regression of https://github.com/grpc/grpc-go/issues/632,
  4460  	// and other flow control bugs.
  4461  
  4462  	const (
  4463  		itemCount   = 100
  4464  		itemSize    = 1 << 10
  4465  		recvCount   = 2
  4466  		maxFailures = 3
  4467  	)
  4468  
  4469  	requestCount := 3000
  4470  	if raceMode {
  4471  		requestCount = 1000
  4472  	}
  4473  
  4474  	lis, err := net.Listen("tcp", "localhost:0")
  4475  	if err != nil {
  4476  		t.Fatalf("Failed to listen: %v", err)
  4477  	}
  4478  	defer lis.Close()
  4479  
  4480  	s := grpc.NewServer()
  4481  	testgrpc.RegisterTestServiceServer(s, &flowControlLogicalRaceServer{
  4482  		itemCount: itemCount,
  4483  		itemSize:  itemSize,
  4484  	})
  4485  	defer s.Stop()
  4486  
  4487  	go s.Serve(lis)
  4488  
  4489  	cc, err := grpc.NewClient(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
  4490  	if err != nil {
  4491  		t.Fatalf("grpc.NewClient(%q) = %v", lis.Addr().String(), err)
  4492  	}
  4493  	defer cc.Close()
  4494  	cl := testgrpc.NewTestServiceClient(cc)
  4495  
  4496  	failures := 0
  4497  	for i := 0; i < requestCount; i++ {
  4498  		ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4499  		output, err := cl.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{})
  4500  		if err != nil {
  4501  			t.Fatalf("StreamingOutputCall; err = %q", err)
  4502  		}
  4503  
  4504  		for j := 0; j < recvCount; j++ {
  4505  			if _, err := output.Recv(); err != nil {
  4506  				if err == io.EOF || status.Code(err) == codes.DeadlineExceeded {
  4507  					t.Errorf("got %d responses to request %d", j, i)
  4508  					failures++
  4509  					break
  4510  				}
  4511  				t.Fatalf("Recv; err = %q", err)
  4512  			}
  4513  		}
  4514  		cancel()
  4515  
  4516  		if failures >= maxFailures {
  4517  			// Continue past the first failure to see if the connection is
  4518  			// entirely broken, or if only a single RPC was affected
  4519  			t.Fatalf("Too many failures received; aborting")
  4520  		}
  4521  	}
  4522  }
  4523  
  4524  type flowControlLogicalRaceServer struct {
  4525  	testgrpc.TestServiceServer
  4526  
  4527  	itemSize  int
  4528  	itemCount int
  4529  }
  4530  
  4531  func (s *flowControlLogicalRaceServer) StreamingOutputCall(_ *testpb.StreamingOutputCallRequest, srv testgrpc.TestService_StreamingOutputCallServer) error {
  4532  	for i := 0; i < s.itemCount; i++ {
  4533  		err := srv.Send(&testpb.StreamingOutputCallResponse{
  4534  			Payload: &testpb.Payload{
  4535  				// Sending a large stream of data which the client reject
  4536  				// helps to trigger some types of flow control bugs.
  4537  				//
  4538  				// Reallocating memory here is inefficient, but the stress it
  4539  				// puts on the GC leads to more frequent flow control
  4540  				// failures. The GC likely causes more variety in the
  4541  				// goroutine scheduling orders.
  4542  				Body: bytes.Repeat([]byte("a"), s.itemSize),
  4543  			},
  4544  		})
  4545  		if err != nil {
  4546  			return err
  4547  		}
  4548  	}
  4549  	return nil
  4550  }
  4551  
  4552  type lockingWriter struct {
  4553  	mu sync.Mutex
  4554  	w  io.Writer
  4555  }
  4556  
  4557  func (lw *lockingWriter) Write(p []byte) (n int, err error) {
  4558  	lw.mu.Lock()
  4559  	defer lw.mu.Unlock()
  4560  	return lw.w.Write(p)
  4561  }
  4562  
  4563  func (lw *lockingWriter) setWriter(w io.Writer) {
  4564  	lw.mu.Lock()
  4565  	defer lw.mu.Unlock()
  4566  	lw.w = w
  4567  }
  4568  
  4569  var testLogOutput = &lockingWriter{w: os.Stderr}
  4570  
  4571  // awaitNewConnLogOutput waits for any of grpc.NewConn's goroutines to
  4572  // terminate, if they're still running. It spams logs with this
  4573  // message.  We wait for it so our log filter is still
  4574  // active. Otherwise the "defer restore()" at the top of various test
  4575  // functions restores our log filter and then the goroutine spams.
  4576  func awaitNewConnLogOutput() {
  4577  	awaitLogOutput(50*time.Millisecond, "grpc: the client connection is closing; please retry")
  4578  }
  4579  
  4580  func awaitLogOutput(maxWait time.Duration, phrase string) {
  4581  	pb := []byte(phrase)
  4582  
  4583  	timer := time.NewTimer(maxWait)
  4584  	defer timer.Stop()
  4585  	wakeup := make(chan bool, 1)
  4586  	for {
  4587  		if logOutputHasContents(pb, wakeup) {
  4588  			return
  4589  		}
  4590  		select {
  4591  		case <-timer.C:
  4592  			// Too slow. Oh well.
  4593  			return
  4594  		case <-wakeup:
  4595  		}
  4596  	}
  4597  }
  4598  
  4599  func logOutputHasContents(v []byte, wakeup chan<- bool) bool {
  4600  	testLogOutput.mu.Lock()
  4601  	defer testLogOutput.mu.Unlock()
  4602  	fw, ok := testLogOutput.w.(*filterWriter)
  4603  	if !ok {
  4604  		return false
  4605  	}
  4606  	fw.mu.Lock()
  4607  	defer fw.mu.Unlock()
  4608  	if bytes.Contains(fw.buf.Bytes(), v) {
  4609  		return true
  4610  	}
  4611  	fw.wakeup = wakeup
  4612  	return false
  4613  }
  4614  
  4615  var verboseLogs = flag.Bool("verbose_logs", false, "show all log output, without filtering")
  4616  
  4617  func noop() {}
  4618  
  4619  // declareLogNoise declares that t is expected to emit the following noisy
  4620  // phrases, even on success. Those phrases will be filtered from log output and
  4621  // only be shown if *verbose_logs or t ends up failing. The returned restore
  4622  // function should be called with defer to be run before the test ends.
  4623  func declareLogNoise(t *testing.T, phrases ...string) (restore func()) {
  4624  	if *verboseLogs {
  4625  		return noop
  4626  	}
  4627  	fw := &filterWriter{dst: os.Stderr, filter: phrases}
  4628  	testLogOutput.setWriter(fw)
  4629  	return func() {
  4630  		if t.Failed() {
  4631  			fw.mu.Lock()
  4632  			defer fw.mu.Unlock()
  4633  			if fw.buf.Len() > 0 {
  4634  				t.Logf("Complete log output:\n%s", fw.buf.Bytes())
  4635  			}
  4636  		}
  4637  		testLogOutput.setWriter(os.Stderr)
  4638  	}
  4639  }
  4640  
  4641  type filterWriter struct {
  4642  	dst    io.Writer
  4643  	filter []string
  4644  
  4645  	mu     sync.Mutex
  4646  	buf    bytes.Buffer
  4647  	wakeup chan<- bool // if non-nil, gets true on write
  4648  }
  4649  
  4650  func (fw *filterWriter) Write(p []byte) (n int, err error) {
  4651  	fw.mu.Lock()
  4652  	fw.buf.Write(p)
  4653  	if fw.wakeup != nil {
  4654  		select {
  4655  		case fw.wakeup <- true:
  4656  		default:
  4657  		}
  4658  	}
  4659  	fw.mu.Unlock()
  4660  
  4661  	ps := string(p)
  4662  	for _, f := range fw.filter {
  4663  		if strings.Contains(ps, f) {
  4664  			return len(p), nil
  4665  		}
  4666  	}
  4667  	return fw.dst.Write(p)
  4668  }
  4669  
  4670  func (s) TestGRPCMethod(t *testing.T) {
  4671  	var method string
  4672  	var ok bool
  4673  
  4674  	ss := &stubserver.StubServer{
  4675  		EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) {
  4676  			method, ok = grpc.Method(ctx)
  4677  			return &testpb.Empty{}, nil
  4678  		},
  4679  	}
  4680  	if err := ss.Start(nil); err != nil {
  4681  		t.Fatalf("Error starting endpoint server: %v", err)
  4682  	}
  4683  	defer ss.Stop()
  4684  
  4685  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4686  	defer cancel()
  4687  
  4688  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  4689  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err)
  4690  	}
  4691  
  4692  	if want := "/grpc.testing.TestService/EmptyCall"; !ok || method != want {
  4693  		t.Fatalf("grpc.Method(_) = %q, %v; want %q, true", method, ok, want)
  4694  	}
  4695  }
  4696  
  4697  func (s) TestUnaryProxyDoesNotForwardMetadata(t *testing.T) {
  4698  	const mdkey = "somedata"
  4699  
  4700  	// endpoint ensures mdkey is NOT in metadata and returns an error if it is.
  4701  	endpoint := &stubserver.StubServer{
  4702  		EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) {
  4703  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil {
  4704  				return nil, status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
  4705  			}
  4706  			return &testpb.Empty{}, nil
  4707  		},
  4708  	}
  4709  	if err := endpoint.Start(nil); err != nil {
  4710  		t.Fatalf("Error starting endpoint server: %v", err)
  4711  	}
  4712  	defer endpoint.Stop()
  4713  
  4714  	// proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint
  4715  	// without explicitly copying the metadata.
  4716  	proxy := &stubserver.StubServer{
  4717  		EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
  4718  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil {
  4719  				return nil, status.Errorf(codes.Internal, "proxy: md=%v; want contains(%q)", md, mdkey)
  4720  			}
  4721  			return endpoint.Client.EmptyCall(ctx, in)
  4722  		},
  4723  	}
  4724  	if err := proxy.Start(nil); err != nil {
  4725  		t.Fatalf("Error starting proxy server: %v", err)
  4726  	}
  4727  	defer proxy.Stop()
  4728  
  4729  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4730  	defer cancel()
  4731  	md := metadata.Pairs(mdkey, "val")
  4732  	ctx = metadata.NewOutgoingContext(ctx, md)
  4733  
  4734  	// Sanity check that endpoint properly errors when it sees mdkey.
  4735  	_, err := endpoint.Client.EmptyCall(ctx, &testpb.Empty{})
  4736  	if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal {
  4737  		t.Fatalf("endpoint.Client.EmptyCall(_, _) = _, %v; want _, <status with Code()=Internal>", err)
  4738  	}
  4739  
  4740  	if _, err := proxy.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  4741  		t.Fatal(err.Error())
  4742  	}
  4743  }
  4744  
  4745  func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) {
  4746  	const mdkey = "somedata"
  4747  
  4748  	// doFDC performs a FullDuplexCall with client and returns the error from the
  4749  	// first stream.Recv call, or nil if that error is io.EOF.  Calls t.Fatal if
  4750  	// the stream cannot be established.
  4751  	doFDC := func(ctx context.Context, client testgrpc.TestServiceClient) error {
  4752  		stream, err := client.FullDuplexCall(ctx)
  4753  		if err != nil {
  4754  			t.Fatalf("Unwanted error: %v", err)
  4755  		}
  4756  		if _, err := stream.Recv(); err != io.EOF {
  4757  			return err
  4758  		}
  4759  		return nil
  4760  	}
  4761  
  4762  	// endpoint ensures mdkey is NOT in metadata and returns an error if it is.
  4763  	endpoint := &stubserver.StubServer{
  4764  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  4765  			ctx := stream.Context()
  4766  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil {
  4767  				return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
  4768  			}
  4769  			return nil
  4770  		},
  4771  	}
  4772  	if err := endpoint.Start(nil); err != nil {
  4773  		t.Fatalf("Error starting endpoint server: %v", err)
  4774  	}
  4775  	defer endpoint.Stop()
  4776  
  4777  	// proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint
  4778  	// without explicitly copying the metadata.
  4779  	proxy := &stubserver.StubServer{
  4780  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  4781  			ctx := stream.Context()
  4782  			if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil {
  4783  				return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey)
  4784  			}
  4785  			return doFDC(ctx, endpoint.Client)
  4786  		},
  4787  	}
  4788  	if err := proxy.Start(nil); err != nil {
  4789  		t.Fatalf("Error starting proxy server: %v", err)
  4790  	}
  4791  	defer proxy.Stop()
  4792  
  4793  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4794  	defer cancel()
  4795  	md := metadata.Pairs(mdkey, "val")
  4796  	ctx = metadata.NewOutgoingContext(ctx, md)
  4797  
  4798  	// Sanity check that endpoint properly errors when it sees mdkey in ctx.
  4799  	err := doFDC(ctx, endpoint.Client)
  4800  	if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal {
  4801  		t.Fatalf("stream.Recv() = _, %v; want _, <status with Code()=Internal>", err)
  4802  	}
  4803  
  4804  	if err := doFDC(ctx, proxy.Client); err != nil {
  4805  		t.Fatalf("doFDC(_, proxy.Client) = %v; want nil", err)
  4806  	}
  4807  }
  4808  
  4809  func (s) TestStatsTagsAndTrace(t *testing.T) {
  4810  	const mdTraceKey = "grpc-trace-bin"
  4811  	const mdTagsKey = "grpc-tags-bin"
  4812  
  4813  	setTrace := func(ctx context.Context, b []byte) context.Context {
  4814  		return metadata.AppendToOutgoingContext(ctx, mdTraceKey, string(b))
  4815  	}
  4816  	setTags := func(ctx context.Context, b []byte) context.Context {
  4817  		return metadata.AppendToOutgoingContext(ctx, mdTagsKey, string(b))
  4818  	}
  4819  
  4820  	// Data added to context by client (typically in a stats handler).
  4821  	tags := []byte{1, 5, 2, 4, 3}
  4822  	trace := []byte{5, 2, 1, 3, 4}
  4823  
  4824  	// endpoint ensures Tags() and Trace() in context match those that were added
  4825  	// by the client and returns an error if not.
  4826  	endpoint := &stubserver.StubServer{
  4827  		EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) {
  4828  			md, _ := metadata.FromIncomingContext(ctx)
  4829  			if md[mdTagsKey] == nil || !cmp.Equal(md[mdTagsKey][len(md[mdTagsKey])-1], string(tags)) {
  4830  				return nil, status.Errorf(codes.Internal, "md['grpc-tags-bin']=%v; want %v", md[mdTagsKey], tags)
  4831  			}
  4832  			if md[mdTraceKey] == nil || !cmp.Equal(md[mdTraceKey][len(md[mdTraceKey])-1], string(trace)) {
  4833  				return nil, status.Errorf(codes.Internal, "md['grpc-trace-bin']=%v; want %v", md[mdTraceKey], trace)
  4834  			}
  4835  			return &testpb.Empty{}, nil
  4836  		},
  4837  	}
  4838  	if err := endpoint.Start(nil); err != nil {
  4839  		t.Fatalf("Error starting endpoint server: %v", err)
  4840  	}
  4841  	defer endpoint.Stop()
  4842  
  4843  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4844  	defer cancel()
  4845  
  4846  	testCases := []struct {
  4847  		ctx  context.Context
  4848  		want codes.Code
  4849  	}{
  4850  		{ctx: ctx, want: codes.Internal},
  4851  		{ctx: setTags(ctx, tags), want: codes.Internal},
  4852  		{ctx: setTrace(ctx, trace), want: codes.Internal},
  4853  		{ctx: setTags(setTrace(ctx, tags), tags), want: codes.Internal},
  4854  		{ctx: setTags(setTrace(ctx, trace), tags), want: codes.OK},
  4855  	}
  4856  
  4857  	for _, tc := range testCases {
  4858  		_, err := endpoint.Client.EmptyCall(tc.ctx, &testpb.Empty{})
  4859  		if tc.want == codes.OK && err != nil {
  4860  			t.Fatalf("endpoint.Client.EmptyCall(%v, _) = _, %v; want _, nil", tc.ctx, err)
  4861  		}
  4862  		if s, ok := status.FromError(err); !ok || s.Code() != tc.want {
  4863  			t.Fatalf("endpoint.Client.EmptyCall(%v, _) = _, %v; want _, <status with Code()=%v>", tc.ctx, err, tc.want)
  4864  		}
  4865  	}
  4866  }
  4867  
  4868  func (s) TestTapTimeout(t *testing.T) {
  4869  	sopts := []grpc.ServerOption{
  4870  		grpc.InTapHandle(func(ctx context.Context, _ *tap.Info) (context.Context, error) {
  4871  			c, cancel := context.WithCancel(ctx)
  4872  			// Call cancel instead of setting a deadline so we can detect which error
  4873  			// occurred -- this cancellation (desired) or the client's deadline
  4874  			// expired (indicating this cancellation did not affect the RPC).
  4875  			time.AfterFunc(10*time.Millisecond, cancel)
  4876  			return c, nil
  4877  		}),
  4878  	}
  4879  
  4880  	ss := &stubserver.StubServer{
  4881  		EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) {
  4882  			<-ctx.Done()
  4883  			return nil, status.Error(codes.Canceled, ctx.Err().Error())
  4884  		},
  4885  	}
  4886  	if err := ss.Start(sopts); err != nil {
  4887  		t.Fatalf("Error starting endpoint server: %v", err)
  4888  	}
  4889  	defer ss.Stop()
  4890  
  4891  	// This was known to be flaky; test several times.
  4892  	for i := 0; i < 10; i++ {
  4893  		// Set our own deadline in case the server hangs.
  4894  		ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4895  		res, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
  4896  		cancel()
  4897  		if s, ok := status.FromError(err); !ok || s.Code() != codes.Canceled {
  4898  			t.Fatalf("ss.Client.EmptyCall(ctx, _) = %v, %v; want nil, <status with Code()=Canceled>", res, err)
  4899  		}
  4900  	}
  4901  
  4902  }
  4903  
  4904  func (s) TestClientWriteFailsAfterServerClosesStream(t *testing.T) {
  4905  	ss := &stubserver.StubServer{
  4906  		FullDuplexCallF: func(testgrpc.TestService_FullDuplexCallServer) error {
  4907  			return status.Errorf(codes.Internal, "")
  4908  		},
  4909  	}
  4910  	sopts := []grpc.ServerOption{}
  4911  	if err := ss.Start(sopts); err != nil {
  4912  		t.Fatalf("Error starting endpoint server: %v", err)
  4913  	}
  4914  	defer ss.Stop()
  4915  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4916  	defer cancel()
  4917  	stream, err := ss.Client.FullDuplexCall(ctx)
  4918  	if err != nil {
  4919  		t.Fatalf("Error while creating stream: %v", err)
  4920  	}
  4921  	for {
  4922  		if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err == nil {
  4923  			time.Sleep(5 * time.Millisecond)
  4924  		} else if err == io.EOF {
  4925  			break // Success.
  4926  		} else {
  4927  			t.Fatalf("stream.Send(_) = %v, want io.EOF", err)
  4928  		}
  4929  	}
  4930  }
  4931  
  4932  type windowSizeConfig struct {
  4933  	serverStream int32
  4934  	serverConn   int32
  4935  	clientStream int32
  4936  	clientConn   int32
  4937  }
  4938  
  4939  func (s) TestConfigurableWindowSizeWithLargeWindow(t *testing.T) {
  4940  	wc := windowSizeConfig{
  4941  		serverStream: 8 * 1024 * 1024,
  4942  		serverConn:   12 * 1024 * 1024,
  4943  		clientStream: 6 * 1024 * 1024,
  4944  		clientConn:   8 * 1024 * 1024,
  4945  	}
  4946  	for _, e := range listTestEnv() {
  4947  		testConfigurableWindowSize(t, e, wc)
  4948  	}
  4949  }
  4950  
  4951  func (s) TestConfigurableWindowSizeWithSmallWindow(t *testing.T) {
  4952  	wc := windowSizeConfig{
  4953  		serverStream: 1,
  4954  		serverConn:   1,
  4955  		clientStream: 1,
  4956  		clientConn:   1,
  4957  	}
  4958  	for _, e := range listTestEnv() {
  4959  		testConfigurableWindowSize(t, e, wc)
  4960  	}
  4961  }
  4962  
  4963  func testConfigurableWindowSize(t *testing.T, e env, wc windowSizeConfig) {
  4964  	te := newTest(t, e)
  4965  	te.serverInitialWindowSize = wc.serverStream
  4966  	te.serverInitialConnWindowSize = wc.serverConn
  4967  	te.clientInitialWindowSize = wc.clientStream
  4968  	te.clientInitialConnWindowSize = wc.clientConn
  4969  
  4970  	te.startServer(&testServer{security: e.security})
  4971  	defer te.tearDown()
  4972  
  4973  	cc := te.clientConn()
  4974  	tc := testgrpc.NewTestServiceClient(cc)
  4975  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  4976  	defer cancel()
  4977  	stream, err := tc.FullDuplexCall(ctx)
  4978  	if err != nil {
  4979  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  4980  	}
  4981  	numOfIter := 11
  4982  	// Set message size to exhaust largest of window sizes.
  4983  	messageSize := max(max(wc.serverStream, wc.serverConn), max(wc.clientStream, wc.clientConn)) / int32(numOfIter-1)
  4984  	messageSize = max(messageSize, 64*1024)
  4985  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, messageSize)
  4986  	if err != nil {
  4987  		t.Fatal(err)
  4988  	}
  4989  	respParams := []*testpb.ResponseParameters{
  4990  		{
  4991  			Size: messageSize,
  4992  		},
  4993  	}
  4994  	req := &testpb.StreamingOutputCallRequest{
  4995  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  4996  		ResponseParameters: respParams,
  4997  		Payload:            payload,
  4998  	}
  4999  	for i := 0; i < numOfIter; i++ {
  5000  		if err := stream.Send(req); err != nil {
  5001  			t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err)
  5002  		}
  5003  		if _, err := stream.Recv(); err != nil {
  5004  			t.Fatalf("%v.Recv() = _, %v, want _, <nil>", stream, err)
  5005  		}
  5006  	}
  5007  	if err := stream.CloseSend(); err != nil {
  5008  		t.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
  5009  	}
  5010  }
  5011  
  5012  func (s) TestWaitForReadyConnection(t *testing.T) {
  5013  	for _, e := range listTestEnv() {
  5014  		testWaitForReadyConnection(t, e)
  5015  	}
  5016  
  5017  }
  5018  
  5019  func testWaitForReadyConnection(t *testing.T, e env) {
  5020  	te := newTest(t, e)
  5021  	te.userAgent = testAppUA
  5022  	te.startServer(&testServer{security: e.security})
  5023  	defer te.tearDown()
  5024  
  5025  	cc := te.clientConn() // Non-blocking dial.
  5026  	tc := testgrpc.NewTestServiceClient(cc)
  5027  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5028  	defer cancel()
  5029  	testutils.AwaitState(ctx, t, cc, connectivity.Ready)
  5030  	// Make a fail-fast RPC.
  5031  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  5032  		t.Fatalf("TestService/EmptyCall(_,_) = _, %v, want _, nil", err)
  5033  	}
  5034  }
  5035  
  5036  func (s) TestSvrWriteStatusEarlyWrite(t *testing.T) {
  5037  	for _, e := range listTestEnv() {
  5038  		testSvrWriteStatusEarlyWrite(t, e)
  5039  	}
  5040  }
  5041  
  5042  func testSvrWriteStatusEarlyWrite(t *testing.T, e env) {
  5043  	te := newTest(t, e)
  5044  	const smallSize = 1024
  5045  	const largeSize = 2048
  5046  	const extraLargeSize = 4096
  5047  	te.maxServerReceiveMsgSize = newInt(largeSize)
  5048  	te.maxServerSendMsgSize = newInt(largeSize)
  5049  	smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize)
  5050  	if err != nil {
  5051  		t.Fatal(err)
  5052  	}
  5053  	extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize)
  5054  	if err != nil {
  5055  		t.Fatal(err)
  5056  	}
  5057  	te.startServer(&testServer{security: e.security})
  5058  	defer te.tearDown()
  5059  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  5060  	respParam := []*testpb.ResponseParameters{
  5061  		{
  5062  			Size: int32(smallSize),
  5063  		},
  5064  	}
  5065  	sreq := &testpb.StreamingOutputCallRequest{
  5066  		ResponseType:       testpb.PayloadType_COMPRESSABLE,
  5067  		ResponseParameters: respParam,
  5068  		Payload:            extraLargePayload,
  5069  	}
  5070  	// Test recv case: server receives a message larger than maxServerReceiveMsgSize.
  5071  	stream, err := tc.FullDuplexCall(te.ctx)
  5072  	if err != nil {
  5073  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  5074  	}
  5075  	if err = stream.Send(sreq); err != nil {
  5076  		t.Fatalf("%v.Send() = _, %v, want <nil>", stream, err)
  5077  	}
  5078  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  5079  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  5080  	}
  5081  	// Test send case: server sends a message larger than maxServerSendMsgSize.
  5082  	sreq.Payload = smallPayload
  5083  	respParam[0].Size = int32(extraLargeSize)
  5084  
  5085  	stream, err = tc.FullDuplexCall(te.ctx)
  5086  	if err != nil {
  5087  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
  5088  	}
  5089  	if err = stream.Send(sreq); err != nil {
  5090  		t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, sreq, err)
  5091  	}
  5092  	if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted {
  5093  		t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted)
  5094  	}
  5095  }
  5096  
  5097  // TestMalformedStreamMethod starts a test server and sends an RPC with a
  5098  // malformed method name. The server should respond with an UNIMPLEMENTED status
  5099  // code in this case.
  5100  func (s) TestMalformedStreamMethod(t *testing.T) {
  5101  	const testMethod = "a-method-name-without-any-slashes"
  5102  	te := newTest(t, tcpClearRREnv)
  5103  	te.startServer(nil)
  5104  	defer te.tearDown()
  5105  
  5106  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5107  	defer cancel()
  5108  	err := te.clientConn().Invoke(ctx, testMethod, nil, nil)
  5109  	if gotCode := status.Code(err); gotCode != codes.Unimplemented {
  5110  		t.Fatalf("Invoke with method %q, got code %s, want %s", testMethod, gotCode, codes.Unimplemented)
  5111  	}
  5112  }
  5113  
  5114  func (s) TestMethodFromServerStream(t *testing.T) {
  5115  	const testMethod = "/package.service/method"
  5116  	e := tcpClearRREnv
  5117  	te := newTest(t, e)
  5118  	var method string
  5119  	var ok bool
  5120  	te.unknownHandler = func(_ any, stream grpc.ServerStream) error {
  5121  		method, ok = grpc.MethodFromServerStream(stream)
  5122  		return nil
  5123  	}
  5124  
  5125  	te.startServer(nil)
  5126  	defer te.tearDown()
  5127  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5128  	defer cancel()
  5129  	_ = te.clientConn().Invoke(ctx, testMethod, nil, nil)
  5130  	if !ok || method != testMethod {
  5131  		t.Fatalf("Invoke with method %q, got %q, %v, want %q, true", testMethod, method, ok, testMethod)
  5132  	}
  5133  }
  5134  
  5135  func (s) TestInterceptorCanAccessCallOptions(t *testing.T) {
  5136  	e := tcpClearRREnv
  5137  	te := newTest(t, e)
  5138  	te.startServer(&testServer{security: e.security})
  5139  	defer te.tearDown()
  5140  
  5141  	type observedOptions struct {
  5142  		headers     []*metadata.MD
  5143  		trailers    []*metadata.MD
  5144  		peer        []*peer.Peer
  5145  		creds       []credentials.PerRPCCredentials
  5146  		failFast    []bool
  5147  		maxRecvSize []int
  5148  		maxSendSize []int
  5149  		compressor  []string
  5150  		subtype     []string
  5151  	}
  5152  	var observedOpts observedOptions
  5153  	populateOpts := func(opts []grpc.CallOption) {
  5154  		for _, o := range opts {
  5155  			switch o := o.(type) {
  5156  			case grpc.HeaderCallOption:
  5157  				observedOpts.headers = append(observedOpts.headers, o.HeaderAddr)
  5158  			case grpc.TrailerCallOption:
  5159  				observedOpts.trailers = append(observedOpts.trailers, o.TrailerAddr)
  5160  			case grpc.PeerCallOption:
  5161  				observedOpts.peer = append(observedOpts.peer, o.PeerAddr)
  5162  			case grpc.PerRPCCredsCallOption:
  5163  				observedOpts.creds = append(observedOpts.creds, o.Creds)
  5164  			case grpc.FailFastCallOption:
  5165  				observedOpts.failFast = append(observedOpts.failFast, o.FailFast)
  5166  			case grpc.MaxRecvMsgSizeCallOption:
  5167  				observedOpts.maxRecvSize = append(observedOpts.maxRecvSize, o.MaxRecvMsgSize)
  5168  			case grpc.MaxSendMsgSizeCallOption:
  5169  				observedOpts.maxSendSize = append(observedOpts.maxSendSize, o.MaxSendMsgSize)
  5170  			case grpc.CompressorCallOption:
  5171  				observedOpts.compressor = append(observedOpts.compressor, o.CompressorType)
  5172  			case grpc.ContentSubtypeCallOption:
  5173  				observedOpts.subtype = append(observedOpts.subtype, o.ContentSubtype)
  5174  			}
  5175  		}
  5176  	}
  5177  
  5178  	te.unaryClientInt = func(_ context.Context, _ string, _, _ any, _ *grpc.ClientConn, _ grpc.UnaryInvoker, opts ...grpc.CallOption) error {
  5179  		populateOpts(opts)
  5180  		return nil
  5181  	}
  5182  	te.streamClientInt = func(_ context.Context, _ *grpc.StreamDesc, _ *grpc.ClientConn, _ string, _ grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
  5183  		populateOpts(opts)
  5184  		return nil, nil
  5185  	}
  5186  
  5187  	defaults := []grpc.CallOption{
  5188  		grpc.WaitForReady(true),
  5189  		grpc.MaxCallRecvMsgSize(1010),
  5190  	}
  5191  	tc := testgrpc.NewTestServiceClient(te.clientConn(grpc.WithDefaultCallOptions(defaults...)))
  5192  
  5193  	var headers metadata.MD
  5194  	var trailers metadata.MD
  5195  	var pr peer.Peer
  5196  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5197  	defer cancel()
  5198  	tc.UnaryCall(ctx, &testpb.SimpleRequest{},
  5199  		grpc.MaxCallRecvMsgSize(100),
  5200  		grpc.MaxCallSendMsgSize(200),
  5201  		grpc.PerRPCCredentials(testPerRPCCredentials{}),
  5202  		grpc.Header(&headers),
  5203  		grpc.Trailer(&trailers),
  5204  		grpc.Peer(&pr))
  5205  	expected := observedOptions{
  5206  		failFast:    []bool{false},
  5207  		maxRecvSize: []int{1010, 100},
  5208  		maxSendSize: []int{200},
  5209  		creds:       []credentials.PerRPCCredentials{testPerRPCCredentials{}},
  5210  		headers:     []*metadata.MD{&headers},
  5211  		trailers:    []*metadata.MD{&trailers},
  5212  		peer:        []*peer.Peer{&pr},
  5213  	}
  5214  
  5215  	if !reflect.DeepEqual(expected, observedOpts) {
  5216  		t.Errorf("unary call did not observe expected options: expected %#v, got %#v", expected, observedOpts)
  5217  	}
  5218  
  5219  	observedOpts = observedOptions{} // reset
  5220  
  5221  	tc.StreamingInputCall(ctx,
  5222  		grpc.WaitForReady(false),
  5223  		grpc.MaxCallSendMsgSize(2020),
  5224  		grpc.UseCompressor("comp-type"),
  5225  		grpc.CallContentSubtype("json"))
  5226  	expected = observedOptions{
  5227  		failFast:    []bool{false, true},
  5228  		maxRecvSize: []int{1010},
  5229  		maxSendSize: []int{2020},
  5230  		compressor:  []string{"comp-type"},
  5231  		subtype:     []string{"json"},
  5232  	}
  5233  
  5234  	if !reflect.DeepEqual(expected, observedOpts) {
  5235  		t.Errorf("streaming call did not observe expected options: expected %#v, got %#v", expected, observedOpts)
  5236  	}
  5237  }
  5238  
  5239  func (s) TestServeExitsWhenListenerClosed(t *testing.T) {
  5240  	ss := &stubserver.StubServer{
  5241  		EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) {
  5242  			return &testpb.Empty{}, nil
  5243  		},
  5244  	}
  5245  
  5246  	s := grpc.NewServer()
  5247  	defer s.Stop()
  5248  	testgrpc.RegisterTestServiceServer(s, ss)
  5249  
  5250  	lis, err := net.Listen("tcp", "localhost:0")
  5251  	if err != nil {
  5252  		t.Fatalf("Failed to create listener: %v", err)
  5253  	}
  5254  
  5255  	done := make(chan struct{})
  5256  	go func() {
  5257  		s.Serve(lis)
  5258  		close(done)
  5259  	}()
  5260  
  5261  	cc, err := grpc.NewClient(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
  5262  	if err != nil {
  5263  		t.Fatalf("Failed to create a client for server: %v", err)
  5264  	}
  5265  	defer cc.Close()
  5266  	c := testgrpc.NewTestServiceClient(cc)
  5267  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5268  	defer cancel()
  5269  	if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil {
  5270  		t.Fatalf("Failed to send test RPC to server: %v", err)
  5271  	}
  5272  
  5273  	if err := lis.Close(); err != nil {
  5274  		t.Fatalf("Failed to close listener: %v", err)
  5275  	}
  5276  	const timeout = 5 * time.Second
  5277  	timer := time.NewTimer(timeout)
  5278  	select {
  5279  	case <-done:
  5280  		return
  5281  	case <-timer.C:
  5282  		t.Fatalf("Serve did not return after %v", timeout)
  5283  	}
  5284  }
  5285  
  5286  // Service handler returns status with invalid utf8 message.
  5287  func (s) TestStatusInvalidUTF8Message(t *testing.T) {
  5288  	var (
  5289  		origMsg = string([]byte{0xff, 0xfe, 0xfd})
  5290  		wantMsg = "���"
  5291  	)
  5292  
  5293  	ss := &stubserver.StubServer{
  5294  		EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) {
  5295  			return nil, status.Error(codes.Internal, origMsg)
  5296  		},
  5297  	}
  5298  	if err := ss.Start(nil); err != nil {
  5299  		t.Fatalf("Error starting endpoint server: %v", err)
  5300  	}
  5301  	defer ss.Stop()
  5302  
  5303  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5304  	defer cancel()
  5305  
  5306  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Message() != wantMsg {
  5307  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, status.Convert(err).Message(), wantMsg)
  5308  	}
  5309  }
  5310  
  5311  // Service handler returns status with details and invalid utf8 message. Proto
  5312  // will fail to marshal the status because of the invalid utf8 message. Details
  5313  // will be dropped when sending.
  5314  func (s) TestStatusInvalidUTF8Details(t *testing.T) {
  5315  	grpctest.TLogger.ExpectError("Failed to marshal rpc status")
  5316  
  5317  	var (
  5318  		origMsg = string([]byte{0xff, 0xfe, 0xfd})
  5319  		wantMsg = "���"
  5320  	)
  5321  
  5322  	ss := &stubserver.StubServer{
  5323  		EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) {
  5324  			st := status.New(codes.Internal, origMsg)
  5325  			st, err := st.WithDetails(&testpb.Empty{})
  5326  			if err != nil {
  5327  				return nil, err
  5328  			}
  5329  			return nil, st.Err()
  5330  		},
  5331  	}
  5332  	if err := ss.Start(nil); err != nil {
  5333  		t.Fatalf("Error starting endpoint server: %v", err)
  5334  	}
  5335  	defer ss.Stop()
  5336  
  5337  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5338  	defer cancel()
  5339  
  5340  	_, err := ss.Client.EmptyCall(ctx, &testpb.Empty{})
  5341  	st := status.Convert(err)
  5342  	if st.Message() != wantMsg {
  5343  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, st.Message(), wantMsg)
  5344  	}
  5345  	if len(st.Details()) != 0 {
  5346  		// Details should be dropped on the server side.
  5347  		t.Fatalf("RPC status contain details: %v, want no details", st.Details())
  5348  	}
  5349  }
  5350  
  5351  func (s) TestRPCTimeout(t *testing.T) {
  5352  	for _, e := range listTestEnv() {
  5353  		testRPCTimeout(t, e)
  5354  	}
  5355  }
  5356  
  5357  func testRPCTimeout(t *testing.T, e env) {
  5358  	te := newTest(t, e)
  5359  	te.startServer(&testServer{security: e.security, unaryCallSleepTime: 500 * time.Millisecond})
  5360  	defer te.tearDown()
  5361  
  5362  	cc := te.clientConn()
  5363  	tc := testgrpc.NewTestServiceClient(cc)
  5364  
  5365  	const argSize = 2718
  5366  	const respSize = 314
  5367  
  5368  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize)
  5369  	if err != nil {
  5370  		t.Fatal(err)
  5371  	}
  5372  
  5373  	req := &testpb.SimpleRequest{
  5374  		ResponseType: testpb.PayloadType_COMPRESSABLE,
  5375  		ResponseSize: respSize,
  5376  		Payload:      payload,
  5377  	}
  5378  	for i := -1; i <= 10; i++ {
  5379  		ctx, cancel := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond)
  5380  		if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.DeadlineExceeded {
  5381  			t.Fatalf("TestService/UnaryCallv(_, _) = _, %v; want <nil>, error code: %s", err, codes.DeadlineExceeded)
  5382  		}
  5383  		cancel()
  5384  	}
  5385  }
  5386  
  5387  // Tests that the client doesn't send a negative timeout to the server. If the
  5388  // server receives a negative timeout, it would return an internal status. The
  5389  // client checks the context error before starting a stream, however the context
  5390  // may expire after this check and before the timeout is calculated.
  5391  func (s) TestNegativeRPCTimeout(t *testing.T) {
  5392  	server := stubserver.StartTestService(t, nil)
  5393  	defer server.Stop()
  5394  
  5395  	if err := server.StartClient(); err != nil {
  5396  		t.Fatalf("Failed to create client: %v", err)
  5397  	}
  5398  
  5399  	// Try increasingly larger timeout values to trigger the condition when the
  5400  	// context has expired while creating the grpc-timeout header.
  5401  	for i := range 10 {
  5402  		ctx, cancel := context.WithTimeout(context.Background(), time.Duration(i*100)*time.Nanosecond)
  5403  		defer cancel()
  5404  
  5405  		client := server.Client
  5406  		if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  5407  			t.Fatalf("TestService/EmptyCall(_, _) = _, %v; want <nil>, error code: %s", err, codes.DeadlineExceeded)
  5408  		}
  5409  	}
  5410  }
  5411  
  5412  func (s) TestDisabledIOBuffers(t *testing.T) {
  5413  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(60000))
  5414  	if err != nil {
  5415  		t.Fatalf("Failed to create payload: %v", err)
  5416  	}
  5417  	req := &testpb.StreamingOutputCallRequest{
  5418  		Payload: payload,
  5419  	}
  5420  	resp := &testpb.StreamingOutputCallResponse{
  5421  		Payload: payload,
  5422  	}
  5423  
  5424  	ss := &stubserver.StubServer{
  5425  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  5426  			for {
  5427  				in, err := stream.Recv()
  5428  				if err == io.EOF {
  5429  					return nil
  5430  				}
  5431  				if err != nil {
  5432  					t.Errorf("stream.Recv() = _, %v, want _, <nil>", err)
  5433  					return err
  5434  				}
  5435  				if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
  5436  					t.Errorf("Received message(len: %v) on server not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
  5437  					return err
  5438  				}
  5439  				if err := stream.Send(resp); err != nil {
  5440  					t.Errorf("stream.Send(_)= %v, want <nil>", err)
  5441  					return err
  5442  				}
  5443  
  5444  			}
  5445  		},
  5446  	}
  5447  
  5448  	s := grpc.NewServer(grpc.WriteBufferSize(0), grpc.ReadBufferSize(0))
  5449  	testgrpc.RegisterTestServiceServer(s, ss)
  5450  
  5451  	lis, err := net.Listen("tcp", "localhost:0")
  5452  	if err != nil {
  5453  		t.Fatalf("Failed to create listener: %v", err)
  5454  	}
  5455  
  5456  	go func() {
  5457  		s.Serve(lis)
  5458  	}()
  5459  	defer s.Stop()
  5460  	cc, err := grpc.NewClient(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0))
  5461  	if err != nil {
  5462  		t.Fatalf("Failed to create a client for server")
  5463  	}
  5464  	defer cc.Close()
  5465  	c := testgrpc.NewTestServiceClient(cc)
  5466  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5467  	defer cancel()
  5468  	stream, err := c.FullDuplexCall(ctx, grpc.WaitForReady(true))
  5469  	if err != nil {
  5470  		t.Fatalf("Failed to send test RPC to server")
  5471  	}
  5472  	for i := 0; i < 10; i++ {
  5473  		if err := stream.Send(req); err != nil {
  5474  			t.Fatalf("stream.Send(_) = %v, want <nil>", err)
  5475  		}
  5476  		in, err := stream.Recv()
  5477  		if err != nil {
  5478  			t.Fatalf("stream.Recv() = _, %v, want _, <nil>", err)
  5479  		}
  5480  		if !reflect.DeepEqual(in.Payload.Body, payload.Body) {
  5481  			t.Fatalf("Received message(len: %v) on client not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body))
  5482  		}
  5483  	}
  5484  	stream.CloseSend()
  5485  	if _, err := stream.Recv(); err != io.EOF {
  5486  		t.Fatalf("stream.Recv() = _, %v, want _, io.EOF", err)
  5487  	}
  5488  }
  5489  
  5490  func (s) TestServerMaxHeaderListSizeClientUserViolation(t *testing.T) {
  5491  	for _, e := range listTestEnv() {
  5492  		if e.httpHandler {
  5493  			continue
  5494  		}
  5495  		testServerMaxHeaderListSizeClientUserViolation(t, e)
  5496  	}
  5497  }
  5498  
  5499  func testServerMaxHeaderListSizeClientUserViolation(t *testing.T, e env) {
  5500  	te := newTest(t, e)
  5501  	te.maxServerHeaderListSize = new(uint32)
  5502  	*te.maxServerHeaderListSize = 216
  5503  	te.startServer(&testServer{security: e.security})
  5504  	defer te.tearDown()
  5505  
  5506  	cc := te.clientConn()
  5507  	tc := testgrpc.NewTestServiceClient(cc)
  5508  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5509  	defer cancel()
  5510  	metadata.AppendToOutgoingContext(ctx, "oversize", string(make([]byte, 216)))
  5511  	var err error
  5512  	if err = verifyResultWithDelay(func() (bool, error) {
  5513  		if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
  5514  			return true, nil
  5515  		}
  5516  		return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
  5517  	}); err != nil {
  5518  		t.Fatal(err)
  5519  	}
  5520  }
  5521  
  5522  func (s) TestClientMaxHeaderListSizeServerUserViolation(t *testing.T) {
  5523  	for _, e := range listTestEnv() {
  5524  		if e.httpHandler {
  5525  			continue
  5526  		}
  5527  		testClientMaxHeaderListSizeServerUserViolation(t, e)
  5528  	}
  5529  }
  5530  
  5531  func testClientMaxHeaderListSizeServerUserViolation(t *testing.T, e env) {
  5532  	te := newTest(t, e)
  5533  	te.maxClientHeaderListSize = new(uint32)
  5534  	*te.maxClientHeaderListSize = 1 // any header server sends will violate
  5535  	te.startServer(&testServer{security: e.security})
  5536  	defer te.tearDown()
  5537  
  5538  	cc := te.clientConn()
  5539  	tc := testgrpc.NewTestServiceClient(cc)
  5540  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5541  	defer cancel()
  5542  	var err error
  5543  	if err = verifyResultWithDelay(func() (bool, error) {
  5544  		if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal {
  5545  			return true, nil
  5546  		}
  5547  		return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal)
  5548  	}); err != nil {
  5549  		t.Fatal(err)
  5550  	}
  5551  }
  5552  
  5553  func (s) TestServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T) {
  5554  	for _, e := range listTestEnv() {
  5555  		if e.httpHandler || e.security == "tls" {
  5556  			continue
  5557  		}
  5558  		testServerMaxHeaderListSizeClientIntentionalViolation(t, e)
  5559  	}
  5560  }
  5561  
  5562  func testServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T, e env) {
  5563  	te := newTest(t, e)
  5564  	te.maxServerHeaderListSize = new(uint32)
  5565  	*te.maxServerHeaderListSize = 512
  5566  	te.startServer(&testServer{security: e.security})
  5567  	defer te.tearDown()
  5568  
  5569  	cc, dw := te.clientConnWithConnControl()
  5570  	tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)}
  5571  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5572  	defer cancel()
  5573  	stream, err := tc.FullDuplexCall(ctx)
  5574  	if err != nil {
  5575  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
  5576  	}
  5577  	rcw := dw.getRawConnWrapper()
  5578  	val := make([]string, 512)
  5579  	for i := range val {
  5580  		val[i] = "a"
  5581  	}
  5582  	// allow for client to send the initial header
  5583  	time.Sleep(100 * time.Millisecond)
  5584  	rcw.writeHeaders(http2.HeadersFrameParam{
  5585  		StreamID:      tc.getCurrentStreamID(),
  5586  		BlockFragment: rcw.encodeHeader("oversize", strings.Join(val, "")),
  5587  		EndStream:     false,
  5588  		EndHeaders:    true,
  5589  	})
  5590  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
  5591  		t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
  5592  	}
  5593  }
  5594  
  5595  func (s) TestClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T) {
  5596  	for _, e := range listTestEnv() {
  5597  		if e.httpHandler || e.security == "tls" {
  5598  			continue
  5599  		}
  5600  		testClientMaxHeaderListSizeServerIntentionalViolation(t, e)
  5601  	}
  5602  }
  5603  
  5604  func testClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T, e env) {
  5605  	te := newTest(t, e)
  5606  	te.maxClientHeaderListSize = new(uint32)
  5607  	*te.maxClientHeaderListSize = 200
  5608  	lw := te.startServerWithConnControl(&testServer{security: e.security, setHeaderOnly: true})
  5609  	defer te.tearDown()
  5610  	cc, _ := te.clientConnWithConnControl()
  5611  	tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)}
  5612  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5613  	defer cancel()
  5614  	stream, err := tc.FullDuplexCall(ctx)
  5615  	if err != nil {
  5616  		t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, <nil>", tc, err)
  5617  	}
  5618  	var i int
  5619  	var rcw *rawConnWrapper
  5620  	for i = 0; i < 100; i++ {
  5621  		rcw = lw.getLastConn()
  5622  		if rcw != nil {
  5623  			break
  5624  		}
  5625  		time.Sleep(10 * time.Millisecond)
  5626  		continue
  5627  	}
  5628  	if i == 100 {
  5629  		t.Fatalf("failed to create server transport after 1s")
  5630  	}
  5631  
  5632  	val := make([]string, 200)
  5633  	for i := range val {
  5634  		val[i] = "a"
  5635  	}
  5636  	// allow for client to send the initial header.
  5637  	time.Sleep(100 * time.Millisecond)
  5638  	rcw.writeHeaders(http2.HeadersFrameParam{
  5639  		StreamID:      tc.getCurrentStreamID(),
  5640  		BlockFragment: rcw.encodeRawHeader("oversize", strings.Join(val, "")),
  5641  		EndStream:     false,
  5642  		EndHeaders:    true,
  5643  	})
  5644  	if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal {
  5645  		t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal)
  5646  	}
  5647  }
  5648  
  5649  func (s) TestNetPipeConn(t *testing.T) {
  5650  	// This test will block indefinitely if grpc writes both client and server
  5651  	// prefaces without either reading from the Conn.
  5652  	pl := testutils.NewPipeListener()
  5653  	s := grpc.NewServer()
  5654  	defer s.Stop()
  5655  	ts := &funcServer{unaryCall: func(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  5656  		return &testpb.SimpleResponse{}, nil
  5657  	}}
  5658  	testgrpc.RegisterTestServiceServer(s, ts)
  5659  	go s.Serve(pl)
  5660  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5661  	defer cancel()
  5662  	cc, err := grpc.NewClient("passthrough:///", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDialer(pl.Dialer()))
  5663  	if err != nil {
  5664  		t.Fatalf("Error creating client: %v", err)
  5665  	}
  5666  	defer cc.Close()
  5667  	client := testgrpc.NewTestServiceClient(cc)
  5668  	if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  5669  		t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err)
  5670  	}
  5671  }
  5672  
  5673  func (s) TestLargeTimeout(t *testing.T) {
  5674  	for _, e := range listTestEnv() {
  5675  		testLargeTimeout(t, e)
  5676  	}
  5677  }
  5678  
  5679  func testLargeTimeout(t *testing.T, e env) {
  5680  	te := newTest(t, e)
  5681  	te.declareLogNoise("Server.processUnaryRPC failed to write status")
  5682  
  5683  	ts := &funcServer{}
  5684  	te.startServer(ts)
  5685  	defer te.tearDown()
  5686  	tc := testgrpc.NewTestServiceClient(te.clientConn())
  5687  
  5688  	timeouts := []time.Duration{
  5689  		time.Duration(math.MaxInt64), // will be (correctly) converted to
  5690  		// 2562048 hours, which overflows upon converting back to an int64
  5691  		2562047 * time.Hour, // the largest timeout that does not overflow
  5692  	}
  5693  
  5694  	for i, maxTimeout := range timeouts {
  5695  		ts.unaryCall = func(ctx context.Context, _ *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  5696  			deadline, ok := ctx.Deadline()
  5697  			timeout := time.Until(deadline)
  5698  			minTimeout := maxTimeout - 5*time.Second
  5699  			if !ok || timeout < minTimeout || timeout > maxTimeout {
  5700  				t.Errorf("ctx.Deadline() = (now+%v), %v; want [%v, %v], true", timeout, ok, minTimeout, maxTimeout)
  5701  				return nil, status.Error(codes.OutOfRange, "deadline error")
  5702  			}
  5703  			return &testpb.SimpleResponse{}, nil
  5704  		}
  5705  
  5706  		ctx, cancel := context.WithTimeout(context.Background(), maxTimeout)
  5707  		defer cancel()
  5708  
  5709  		if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  5710  			t.Errorf("case %v: UnaryCall(_) = _, %v; want _, nil", i, err)
  5711  		}
  5712  	}
  5713  }
  5714  
  5715  func listenWithNotifyingListener(network, address string, event *grpcsync.Event) (net.Listener, error) {
  5716  	lis, err := net.Listen(network, address)
  5717  	if err != nil {
  5718  		return nil, err
  5719  	}
  5720  	return notifyingListener{connEstablished: event, Listener: lis}, nil
  5721  }
  5722  
  5723  type notifyingListener struct {
  5724  	connEstablished *grpcsync.Event
  5725  	net.Listener
  5726  }
  5727  
  5728  func (lis notifyingListener) Accept() (net.Conn, error) {
  5729  	defer lis.connEstablished.Fire()
  5730  	return lis.Listener.Accept()
  5731  }
  5732  
  5733  func (s) TestRPCWaitsForResolver(t *testing.T) {
  5734  	te := testServiceConfigSetup(t, tcpClearRREnv)
  5735  	te.startServer(&testServer{security: tcpClearRREnv.security})
  5736  	defer te.tearDown()
  5737  	r := manual.NewBuilderWithScheme("whatever")
  5738  
  5739  	te.resolverScheme = r.Scheme()
  5740  	cc := te.clientConn(grpc.WithResolvers(r))
  5741  	tc := testgrpc.NewTestServiceClient(cc)
  5742  
  5743  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout)
  5744  	defer cancel()
  5745  	// With no resolved addresses yet, this will timeout.
  5746  	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded {
  5747  		t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded)
  5748  	}
  5749  
  5750  	ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout)
  5751  	defer cancel()
  5752  	go func() {
  5753  		time.Sleep(time.Second)
  5754  		r.UpdateState(resolver.State{
  5755  			Addresses: []resolver.Address{{Addr: te.srvAddr}},
  5756  			ServiceConfig: parseServiceConfig(t, r, `{
  5757  		    "methodConfig": [
  5758  		        {
  5759  		            "name": [
  5760  		                {
  5761  		                    "service": "grpc.testing.TestService",
  5762  		                    "method": "UnaryCall"
  5763  		                }
  5764  		            ],
  5765                      "maxRequestMessageBytes": 0
  5766  		        }
  5767  		    ]
  5768  		}`)})
  5769  	}()
  5770  	// We wait a second before providing a service config and resolving
  5771  	// addresses.  So this will wait for that and then honor the
  5772  	// maxRequestMessageBytes it contains.
  5773  	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1)
  5774  	if err != nil {
  5775  		t.Fatal(err)
  5776  	}
  5777  	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{Payload: payload}); status.Code(err) != codes.ResourceExhausted {
  5778  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err)
  5779  	}
  5780  	if got := ctx.Err(); got != nil {
  5781  		t.Fatalf("ctx.Err() = %v; want nil (deadline should be set short by service config)", got)
  5782  	}
  5783  	if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  5784  		t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err)
  5785  	}
  5786  }
  5787  
  5788  type httpServerResponse struct {
  5789  	headers  [][]string
  5790  	payload  []byte
  5791  	trailers [][]string
  5792  }
  5793  
  5794  type httpServer struct {
  5795  	// If waitForEndStream is set, wait for the client to send a frame with end
  5796  	// stream in it before sending a response/refused stream.
  5797  	waitForEndStream bool
  5798  	refuseStream     func(uint32) bool
  5799  	responses        []httpServerResponse
  5800  }
  5801  
  5802  func (s *httpServer) writeHeader(framer *http2.Framer, sid uint32, headerFields []string, endStream bool) error {
  5803  	if len(headerFields)%2 == 1 {
  5804  		panic("odd number of kv args")
  5805  	}
  5806  
  5807  	var buf bytes.Buffer
  5808  	henc := hpack.NewEncoder(&buf)
  5809  	for len(headerFields) > 0 {
  5810  		k, v := headerFields[0], headerFields[1]
  5811  		headerFields = headerFields[2:]
  5812  		henc.WriteField(hpack.HeaderField{Name: k, Value: v})
  5813  	}
  5814  
  5815  	return framer.WriteHeaders(http2.HeadersFrameParam{
  5816  		StreamID:      sid,
  5817  		BlockFragment: buf.Bytes(),
  5818  		EndStream:     endStream,
  5819  		EndHeaders:    true,
  5820  	})
  5821  }
  5822  
  5823  func (s *httpServer) writePayload(framer *http2.Framer, sid uint32, payload []byte) error {
  5824  	return framer.WriteData(sid, false, payload)
  5825  }
  5826  
  5827  func (s *httpServer) start(t *testing.T, lis net.Listener) {
  5828  	// Launch an HTTP server to send back header.
  5829  	go func() {
  5830  		conn, err := lis.Accept()
  5831  		if err != nil {
  5832  			t.Errorf("Error accepting connection: %v", err)
  5833  			return
  5834  		}
  5835  		defer conn.Close()
  5836  		// Read preface sent by client.
  5837  		if _, err = io.ReadFull(conn, make([]byte, len(http2.ClientPreface))); err != nil {
  5838  			t.Errorf("Error at server-side while reading preface from client. Err: %v", err)
  5839  			return
  5840  		}
  5841  		reader := bufio.NewReader(conn)
  5842  		writer := bufio.NewWriter(conn)
  5843  		framer := http2.NewFramer(writer, reader)
  5844  		if err = framer.WriteSettingsAck(); err != nil {
  5845  			t.Errorf("Error at server-side while sending Settings ack. Err: %v", err)
  5846  			return
  5847  		}
  5848  		writer.Flush() // necessary since client is expecting preface before declaring connection fully setup.
  5849  		var sid uint32
  5850  		// Loop until framer returns possible conn closed errors.
  5851  		for requestNum := 0; ; requestNum = (requestNum + 1) % len(s.responses) {
  5852  			// Read frames until a header is received.
  5853  			for {
  5854  				frame, err := framer.ReadFrame()
  5855  				if err != nil {
  5856  					if !isConnClosedErr(err) {
  5857  						t.Errorf("Error at server-side while reading frame. got: %q, want: rpc error containing substring %q OR %q", err, possibleConnResetMsg, possibleEOFMsg)
  5858  					}
  5859  					return
  5860  				}
  5861  				sid = 0
  5862  				switch fr := frame.(type) {
  5863  				case *http2.HeadersFrame:
  5864  					// Respond after this if we are not waiting for an end
  5865  					// stream or if this frame ends it.
  5866  					if !s.waitForEndStream || fr.StreamEnded() {
  5867  						sid = fr.Header().StreamID
  5868  					}
  5869  
  5870  				case *http2.DataFrame:
  5871  					// Respond after this if we were waiting for an end stream
  5872  					// and this frame ends it.  (If we were not waiting for an
  5873  					// end stream, this stream was already responded to when
  5874  					// the headers were received.)
  5875  					if s.waitForEndStream && fr.StreamEnded() {
  5876  						sid = fr.Header().StreamID
  5877  					}
  5878  				}
  5879  				if sid != 0 {
  5880  					if s.refuseStream == nil || !s.refuseStream(sid) {
  5881  						break
  5882  					}
  5883  					framer.WriteRSTStream(sid, http2.ErrCodeRefusedStream)
  5884  					writer.Flush()
  5885  				}
  5886  			}
  5887  
  5888  			response := s.responses[requestNum]
  5889  			for _, header := range response.headers {
  5890  				if err = s.writeHeader(framer, sid, header, false); err != nil {
  5891  					t.Errorf("Error at server-side while writing headers. Err: %v", err)
  5892  					return
  5893  				}
  5894  				writer.Flush()
  5895  			}
  5896  			if response.payload != nil {
  5897  				if err = s.writePayload(framer, sid, response.payload); err != nil {
  5898  					t.Errorf("Error at server-side while writing payload. Err: %v", err)
  5899  					return
  5900  				}
  5901  				writer.Flush()
  5902  			}
  5903  			for i, trailer := range response.trailers {
  5904  				if err = s.writeHeader(framer, sid, trailer, i == len(response.trailers)-1); err != nil {
  5905  					t.Errorf("Error at server-side while writing trailers. Err: %v", err)
  5906  					return
  5907  				}
  5908  				writer.Flush()
  5909  			}
  5910  		}
  5911  	}()
  5912  }
  5913  
  5914  func (s) TestClientCancellationPropagatesUnary(t *testing.T) {
  5915  	wg := &sync.WaitGroup{}
  5916  	called, done := make(chan struct{}), make(chan struct{})
  5917  	ss := &stubserver.StubServer{
  5918  		EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) {
  5919  			close(called)
  5920  			<-ctx.Done()
  5921  			err := ctx.Err()
  5922  			if err != context.Canceled {
  5923  				t.Errorf("ctx.Err() = %v; want context.Canceled", err)
  5924  			}
  5925  			close(done)
  5926  			return nil, err
  5927  		},
  5928  	}
  5929  	if err := ss.Start(nil); err != nil {
  5930  		t.Fatalf("Error starting endpoint server: %v", err)
  5931  	}
  5932  	defer ss.Stop()
  5933  
  5934  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5935  
  5936  	wg.Add(1)
  5937  	go func() {
  5938  		if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Canceled {
  5939  			t.Errorf("ss.Client.EmptyCall() = _, %v; want _, Code()=codes.Canceled", err)
  5940  		}
  5941  		wg.Done()
  5942  	}()
  5943  
  5944  	select {
  5945  	case <-called:
  5946  	case <-time.After(5 * time.Second):
  5947  		t.Fatalf("failed to perform EmptyCall after 10s")
  5948  	}
  5949  	cancel()
  5950  	select {
  5951  	case <-done:
  5952  	case <-time.After(5 * time.Second):
  5953  		t.Fatalf("server failed to close done chan due to cancellation propagation")
  5954  	}
  5955  	wg.Wait()
  5956  }
  5957  
  5958  // When an RPC is canceled, it's possible that the last Recv() returns before
  5959  // all call options' after are executed.
  5960  func (s) TestCanceledRPCCallOptionRace(t *testing.T) {
  5961  	ss := &stubserver.StubServer{
  5962  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  5963  			err := stream.Send(&testpb.StreamingOutputCallResponse{})
  5964  			if err != nil {
  5965  				return err
  5966  			}
  5967  			<-stream.Context().Done()
  5968  			return nil
  5969  		},
  5970  	}
  5971  	if err := ss.Start(nil); err != nil {
  5972  		t.Fatalf("Error starting endpoint server: %v", err)
  5973  	}
  5974  	defer ss.Stop()
  5975  
  5976  	const count = 1000
  5977  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  5978  	defer cancel()
  5979  
  5980  	var wg sync.WaitGroup
  5981  	wg.Add(count)
  5982  	for i := 0; i < count; i++ {
  5983  		go func() {
  5984  			defer wg.Done()
  5985  			var p peer.Peer
  5986  			ctx, cancel := context.WithCancel(ctx)
  5987  			defer cancel()
  5988  			stream, err := ss.Client.FullDuplexCall(ctx, grpc.Peer(&p))
  5989  			if err != nil {
  5990  				t.Errorf("_.FullDuplexCall(_) = _, %v", err)
  5991  				return
  5992  			}
  5993  			if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil {
  5994  				t.Errorf("_ has error %v while sending", err)
  5995  				return
  5996  			}
  5997  			if _, err := stream.Recv(); err != nil {
  5998  				t.Errorf("%v.Recv() = %v", stream, err)
  5999  				return
  6000  			}
  6001  			cancel()
  6002  			if _, err := stream.Recv(); status.Code(err) != codes.Canceled {
  6003  				t.Errorf("%v compleled with error %v, want %s", stream, err, codes.Canceled)
  6004  				return
  6005  			}
  6006  			// If recv returns before call options are executed, peer.Addr is not set,
  6007  			// fail the test.
  6008  			if p.Addr == nil {
  6009  				t.Errorf("peer.Addr is nil, want non-nil")
  6010  				return
  6011  			}
  6012  		}()
  6013  	}
  6014  	wg.Wait()
  6015  }
  6016  
  6017  func (s) TestClientSettingsFloodCloseConn(t *testing.T) {
  6018  	// Tests that the server properly closes its transport if the client floods
  6019  	// settings frames and then closes the connection.
  6020  
  6021  	// Minimize buffer sizes to stimulate failure condition more quickly.
  6022  	s := grpc.NewServer(grpc.WriteBufferSize(20))
  6023  	l := bufconn.Listen(20)
  6024  	go s.Serve(l)
  6025  
  6026  	// Dial our server and handshake.
  6027  	conn, err := l.Dial()
  6028  	if err != nil {
  6029  		t.Fatalf("Error dialing bufconn: %v", err)
  6030  	}
  6031  
  6032  	n, err := conn.Write([]byte(http2.ClientPreface))
  6033  	if err != nil || n != len(http2.ClientPreface) {
  6034  		t.Fatalf("Error writing client preface: %v, %v", n, err)
  6035  	}
  6036  
  6037  	fr := http2.NewFramer(conn, conn)
  6038  	f, err := fr.ReadFrame()
  6039  	if err != nil {
  6040  		t.Fatalf("Error reading initial settings frame: %v", err)
  6041  	}
  6042  	if _, ok := f.(*http2.SettingsFrame); ok {
  6043  		if err := fr.WriteSettingsAck(); err != nil {
  6044  			t.Fatalf("Error writing settings ack: %v", err)
  6045  		}
  6046  	} else {
  6047  		t.Fatalf("Error reading initial settings frame: type=%T", f)
  6048  	}
  6049  
  6050  	// Confirm settings can be written, and that an ack is read.
  6051  	if err = fr.WriteSettings(); err != nil {
  6052  		t.Fatalf("Error writing settings frame: %v", err)
  6053  	}
  6054  	if f, err = fr.ReadFrame(); err != nil {
  6055  		t.Fatalf("Error reading frame: %v", err)
  6056  	}
  6057  	if sf, ok := f.(*http2.SettingsFrame); !ok || !sf.IsAck() {
  6058  		t.Fatalf("Unexpected frame: %v", f)
  6059  	}
  6060  
  6061  	// Flood settings frames until a timeout occurs, indicating the server has
  6062  	// stopped reading from the connection, then close the conn.
  6063  	for {
  6064  		conn.SetWriteDeadline(time.Now().Add(50 * time.Millisecond))
  6065  		if err := fr.WriteSettings(); err != nil {
  6066  			if to, ok := err.(interface{ Timeout() bool }); !ok || !to.Timeout() {
  6067  				t.Fatalf("Received unexpected write error: %v", err)
  6068  			}
  6069  			break
  6070  		}
  6071  	}
  6072  	conn.Close()
  6073  
  6074  	// If the server does not handle this situation correctly, it will never
  6075  	// close the transport.  This is because its loopyWriter.run() will have
  6076  	// exited, and thus not handle the goAway the draining process initiates.
  6077  	// Also, we would see a goroutine leak in this case, as the reader would be
  6078  	// blocked on the controlBuf's throttle() method indefinitely.
  6079  
  6080  	timer := time.AfterFunc(5*time.Second, func() {
  6081  		t.Errorf("Timeout waiting for GracefulStop to return")
  6082  		s.Stop()
  6083  	})
  6084  	s.GracefulStop()
  6085  	timer.Stop()
  6086  }
  6087  
  6088  func unaryInterceptorVerifyConn(ctx context.Context, _ any, _ *grpc.UnaryServerInfo, _ grpc.UnaryHandler) (any, error) {
  6089  	conn := transport.GetConnection(ctx)
  6090  	if conn == nil {
  6091  		return nil, status.Error(codes.NotFound, "connection was not in context")
  6092  	}
  6093  	return nil, status.Error(codes.OK, "")
  6094  }
  6095  
  6096  // TestUnaryServerInterceptorGetsConnection tests whether the accepted conn on
  6097  // the server gets to any unary interceptors on the server side.
  6098  func (s) TestUnaryServerInterceptorGetsConnection(t *testing.T) {
  6099  	ss := &stubserver.StubServer{}
  6100  	if err := ss.Start([]grpc.ServerOption{grpc.UnaryInterceptor(unaryInterceptorVerifyConn)}); err != nil {
  6101  		t.Fatalf("Error starting endpoint server: %v", err)
  6102  	}
  6103  	defer ss.Stop()
  6104  
  6105  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6106  	defer cancel()
  6107  
  6108  	if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK {
  6109  		t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v, want _, error code %s", err, codes.OK)
  6110  	}
  6111  }
  6112  
  6113  func streamingInterceptorVerifyConn(_ any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, _ grpc.StreamHandler) error {
  6114  	conn := transport.GetConnection(ss.Context())
  6115  	if conn == nil {
  6116  		return status.Error(codes.NotFound, "connection was not in context")
  6117  	}
  6118  	return status.Error(codes.OK, "")
  6119  }
  6120  
  6121  // TestStreamingServerInterceptorGetsConnection tests whether the accepted conn on
  6122  // the server gets to any streaming interceptors on the server side.
  6123  func (s) TestStreamingServerInterceptorGetsConnection(t *testing.T) {
  6124  	ss := &stubserver.StubServer{}
  6125  	if err := ss.Start([]grpc.ServerOption{grpc.StreamInterceptor(streamingInterceptorVerifyConn)}); err != nil {
  6126  		t.Fatalf("Error starting endpoint server: %v", err)
  6127  	}
  6128  	defer ss.Stop()
  6129  
  6130  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6131  	defer cancel()
  6132  
  6133  	s, err := ss.Client.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{})
  6134  	if err != nil {
  6135  		t.Fatalf("ss.Client.StreamingOutputCall(_) = _, %v, want _, <nil>", err)
  6136  	}
  6137  	if _, err := s.Recv(); err != io.EOF {
  6138  		t.Fatalf("ss.Client.StreamingInputCall(_) = _, %v, want _, %v", err, io.EOF)
  6139  	}
  6140  }
  6141  
  6142  // unaryInterceptorVerifyAuthority verifies there is an unambiguous :authority
  6143  // once the request gets to an interceptor. An unambiguous :authority is defined
  6144  // as at most a single :authority header, and no host header according to A41.
  6145  func unaryInterceptorVerifyAuthority(ctx context.Context, _ any, _ *grpc.UnaryServerInfo, _ grpc.UnaryHandler) (any, error) {
  6146  	md, ok := metadata.FromIncomingContext(ctx)
  6147  	if !ok {
  6148  		return nil, status.Error(codes.NotFound, "metadata was not in context")
  6149  	}
  6150  	authority := md.Get(":authority")
  6151  	if len(authority) > 1 { // Should be an unambiguous authority by the time it gets to interceptor.
  6152  		return nil, status.Error(codes.NotFound, ":authority value had more than one value")
  6153  	}
  6154  	// Host header shouldn't be present by the time it gets to the interceptor
  6155  	// level (should either be renamed to :authority or explicitly deleted).
  6156  	host := md.Get("host")
  6157  	if len(host) != 0 {
  6158  		return nil, status.Error(codes.NotFound, "host header should not be present in metadata")
  6159  	}
  6160  	// Pass back the authority for verification on client - NotFound so
  6161  	// grpc-message will be available to read for verification.
  6162  	if len(authority) == 0 {
  6163  		// Represent no :authority header present with an empty string.
  6164  		return nil, status.Error(codes.NotFound, "")
  6165  	}
  6166  	return nil, status.Error(codes.NotFound, authority[0])
  6167  }
  6168  
  6169  // TestAuthorityHeader tests that the eventual :authority that reaches the grpc
  6170  // layer is unambiguous due to logic added in A41.
  6171  func (s) TestAuthorityHeader(t *testing.T) {
  6172  	tests := []struct {
  6173  		name          string
  6174  		headers       []string
  6175  		wantAuthority string
  6176  	}{
  6177  		// "If :authority is missing, Host must be renamed to :authority." - A41
  6178  		{
  6179  			name: "Missing :authority",
  6180  			// Codepath triggered by incoming headers with no authority but with
  6181  			// a host.
  6182  			headers: []string{
  6183  				":method", "POST",
  6184  				":path", "/grpc.testing.TestService/UnaryCall",
  6185  				"content-type", "application/grpc",
  6186  				"te", "trailers",
  6187  				"host", "localhost",
  6188  			},
  6189  			wantAuthority: "localhost",
  6190  		},
  6191  		{
  6192  			name: "Missing :authority and host",
  6193  			// Codepath triggered by incoming headers with no :authority and no
  6194  			// host.
  6195  			headers: []string{
  6196  				":method", "POST",
  6197  				":path", "/grpc.testing.TestService/UnaryCall",
  6198  				"content-type", "application/grpc",
  6199  				"te", "trailers",
  6200  			},
  6201  			wantAuthority: "",
  6202  		},
  6203  		// "If :authority is present, Host must be discarded." - A41
  6204  		{
  6205  			name: ":authority and host present",
  6206  			// Codepath triggered by incoming headers with both an authority
  6207  			// header and a host header.
  6208  			headers: []string{
  6209  				":method", "POST",
  6210  				":path", "/grpc.testing.TestService/UnaryCall",
  6211  				":authority", "localhost",
  6212  				"content-type", "application/grpc",
  6213  				"host", "localhost2",
  6214  			},
  6215  			wantAuthority: "localhost",
  6216  		},
  6217  	}
  6218  	for _, test := range tests {
  6219  		t.Run(test.name, func(t *testing.T) {
  6220  			te := newTest(t, tcpClearRREnv)
  6221  			ts := &funcServer{unaryCall: func(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  6222  				return &testpb.SimpleResponse{}, nil
  6223  			}}
  6224  			te.unaryServerInt = unaryInterceptorVerifyAuthority
  6225  			te.startServer(ts)
  6226  			defer te.tearDown()
  6227  			success := testutils.NewChannel()
  6228  			te.withServerTester(func(st *serverTester) {
  6229  				st.writeHeaders(http2.HeadersFrameParam{
  6230  					StreamID:      1,
  6231  					BlockFragment: st.encodeHeader(test.headers...),
  6232  					EndStream:     false,
  6233  					EndHeaders:    true,
  6234  				})
  6235  				st.writeData(1, true, []byte{0, 0, 0, 0, 0})
  6236  
  6237  				for {
  6238  					frame := st.wantAnyFrame()
  6239  					f, ok := frame.(*http2.MetaHeadersFrame)
  6240  					if !ok {
  6241  						continue
  6242  					}
  6243  					for _, header := range f.Fields {
  6244  						if header.Name == "grpc-message" {
  6245  							success.Send(header.Value)
  6246  							return
  6247  						}
  6248  					}
  6249  				}
  6250  			})
  6251  
  6252  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6253  			defer cancel()
  6254  			gotAuthority, err := success.Receive(ctx)
  6255  			if err != nil {
  6256  				t.Fatalf("Error receiving from channel: %v", err)
  6257  			}
  6258  			if gotAuthority != test.wantAuthority {
  6259  				t.Fatalf("gotAuthority: %v, wantAuthority %v", gotAuthority, test.wantAuthority)
  6260  			}
  6261  		})
  6262  	}
  6263  }
  6264  
  6265  // wrapCloseListener tracks Accepts/Closes and maintains a counter of the
  6266  // number of open connections.
  6267  type wrapCloseListener struct {
  6268  	net.Listener
  6269  	connsOpen int32
  6270  }
  6271  
  6272  // wrapCloseListener is returned by wrapCloseListener.Accept and decrements its
  6273  // connsOpen when Close is called.
  6274  type wrapCloseConn struct {
  6275  	net.Conn
  6276  	lis       *wrapCloseListener
  6277  	closeOnce sync.Once
  6278  }
  6279  
  6280  func (w *wrapCloseListener) Accept() (net.Conn, error) {
  6281  	conn, err := w.Listener.Accept()
  6282  	if err != nil {
  6283  		return nil, err
  6284  	}
  6285  	atomic.AddInt32(&w.connsOpen, 1)
  6286  	return &wrapCloseConn{Conn: conn, lis: w}, nil
  6287  }
  6288  
  6289  func (w *wrapCloseConn) Close() error {
  6290  	defer w.closeOnce.Do(func() { atomic.AddInt32(&w.lis.connsOpen, -1) })
  6291  	return w.Conn.Close()
  6292  }
  6293  
  6294  // TestServerClosesConn ensures conn.Close is always closed even if the client
  6295  // doesn't complete the HTTP/2 handshake.
  6296  func (s) TestServerClosesConn(t *testing.T) {
  6297  	lis := bufconn.Listen(20)
  6298  	wrapLis := &wrapCloseListener{Listener: lis}
  6299  
  6300  	s := grpc.NewServer()
  6301  	go s.Serve(wrapLis)
  6302  	defer s.Stop()
  6303  
  6304  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6305  	defer cancel()
  6306  
  6307  	for i := 0; i < 10; i++ {
  6308  		conn, err := lis.DialContext(ctx)
  6309  		if err != nil {
  6310  			t.Fatalf("Dial = _, %v; want _, nil", err)
  6311  		}
  6312  		conn.Close()
  6313  	}
  6314  	for ctx.Err() == nil {
  6315  		if atomic.LoadInt32(&wrapLis.connsOpen) == 0 {
  6316  			return
  6317  		}
  6318  		time.Sleep(50 * time.Millisecond)
  6319  	}
  6320  	t.Fatalf("timed out waiting for conns to be closed by server; still open: %v", atomic.LoadInt32(&wrapLis.connsOpen))
  6321  }
  6322  
  6323  // TestNilStatsHandler ensures we do not panic as a result of a nil stats
  6324  // handler.
  6325  func (s) TestNilStatsHandler(t *testing.T) {
  6326  	grpctest.TLogger.ExpectErrorN("ignoring nil parameter", 2)
  6327  	ss := &stubserver.StubServer{
  6328  		UnaryCallF: func(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  6329  			return &testpb.SimpleResponse{}, nil
  6330  		},
  6331  	}
  6332  	if err := ss.Start([]grpc.ServerOption{grpc.StatsHandler(nil)}, grpc.WithStatsHandler(nil)); err != nil {
  6333  		t.Fatalf("Error starting endpoint server: %v", err)
  6334  	}
  6335  	defer ss.Stop()
  6336  
  6337  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6338  	defer cancel()
  6339  	if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  6340  		t.Fatalf("Unexpected error from UnaryCall: %v", err)
  6341  	}
  6342  }
  6343  
  6344  // TestUnexpectedEOF tests a scenario where a client invokes two unary RPC
  6345  // calls. The first call receives a payload which exceeds max grpc receive
  6346  // message length, and the second gets a large response. This second RPC should
  6347  // not fail with unexpected.EOF.
  6348  func (s) TestUnexpectedEOF(t *testing.T) {
  6349  	ss := &stubserver.StubServer{
  6350  		UnaryCallF: func(_ context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  6351  			return &testpb.SimpleResponse{
  6352  				Payload: &testpb.Payload{
  6353  					Body: bytes.Repeat([]byte("a"), int(in.ResponseSize)),
  6354  				},
  6355  			}, nil
  6356  		},
  6357  	}
  6358  	if err := ss.Start([]grpc.ServerOption{}); err != nil {
  6359  		t.Fatalf("Error starting endpoint server: %v", err)
  6360  	}
  6361  	defer ss.Stop()
  6362  
  6363  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6364  	defer cancel()
  6365  	for i := 0; i < 10; i++ {
  6366  		// exceeds grpc.DefaultMaxRecvMessageSize, this should error with
  6367  		// RESOURCE_EXHAUSTED error.
  6368  		_, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{ResponseSize: 4194304})
  6369  		if code := status.Code(err); code != codes.ResourceExhausted {
  6370  			t.Fatalf("UnaryCall RPC returned error: %v, want status code %v", err, codes.ResourceExhausted)
  6371  		}
  6372  		// Larger response that doesn't exceed DefaultMaxRecvMessageSize, this
  6373  		// should work normally.
  6374  		if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{ResponseSize: 275075}); err != nil {
  6375  			t.Fatalf("UnaryCall RPC failed: %v", err)
  6376  		}
  6377  	}
  6378  }
  6379  
  6380  // TestRecvWhileReturningStatus performs a Recv in a service handler while the
  6381  // handler returns its status.  A race condition could result in the server
  6382  // sending the first headers frame without the HTTP :status header.  This can
  6383  // happen when the failed Recv (due to the handler returning) and the handler's
  6384  // status both attempt to write the status, which would be the first headers
  6385  // frame sent, simultaneously.
  6386  func (s) TestRecvWhileReturningStatus(t *testing.T) {
  6387  	ss := &stubserver.StubServer{
  6388  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  6389  			// The client never sends, so this Recv blocks until the server
  6390  			// returns and causes stream operations to return errors.
  6391  			go stream.Recv()
  6392  			return nil
  6393  		},
  6394  	}
  6395  	if err := ss.Start(nil); err != nil {
  6396  		t.Fatalf("Error starting endpoint server: %v", err)
  6397  	}
  6398  	defer ss.Stop()
  6399  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6400  	defer cancel()
  6401  	for i := 0; i < 100; i++ {
  6402  		stream, err := ss.Client.FullDuplexCall(ctx)
  6403  		if err != nil {
  6404  			t.Fatalf("Error while creating stream: %v", err)
  6405  		}
  6406  		if _, err := stream.Recv(); err != io.EOF {
  6407  			t.Fatalf("stream.Recv() = %v, want io.EOF", err)
  6408  		}
  6409  	}
  6410  }
  6411  
  6412  type mockBinaryLogger struct {
  6413  	mml *mockMethodLogger
  6414  }
  6415  
  6416  func newMockBinaryLogger() *mockBinaryLogger {
  6417  	return &mockBinaryLogger{
  6418  		mml: &mockMethodLogger{},
  6419  	}
  6420  }
  6421  
  6422  func (mbl *mockBinaryLogger) GetMethodLogger(string) binarylog.MethodLogger {
  6423  	return mbl.mml
  6424  }
  6425  
  6426  type mockMethodLogger struct {
  6427  	events uint64
  6428  }
  6429  
  6430  func (mml *mockMethodLogger) Log(context.Context, binarylog.LogEntryConfig) {
  6431  	atomic.AddUint64(&mml.events, 1)
  6432  }
  6433  
  6434  // TestGlobalBinaryLoggingOptions tests the binary logging options for client
  6435  // and server side. The test configures a binary logger to be plumbed into every
  6436  // created ClientConn and server. It then makes a unary RPC call, and a
  6437  // streaming RPC call. A certain amount of logging calls should happen as a
  6438  // result of the stream operations on each of these calls.
  6439  func (s) TestGlobalBinaryLoggingOptions(t *testing.T) {
  6440  	csbl := newMockBinaryLogger()
  6441  	ssbl := newMockBinaryLogger()
  6442  
  6443  	internal.AddGlobalDialOptions.(func(opt ...grpc.DialOption))(internal.WithBinaryLogger.(func(bl binarylog.Logger) grpc.DialOption)(csbl))
  6444  	internal.AddGlobalServerOptions.(func(opt ...grpc.ServerOption))(internal.BinaryLogger.(func(bl binarylog.Logger) grpc.ServerOption)(ssbl))
  6445  	defer func() {
  6446  		internal.ClearGlobalDialOptions()
  6447  		internal.ClearGlobalServerOptions()
  6448  	}()
  6449  	ss := &stubserver.StubServer{
  6450  		UnaryCallF: func(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  6451  			return &testpb.SimpleResponse{}, nil
  6452  		},
  6453  		FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error {
  6454  			_, err := stream.Recv()
  6455  			if err == io.EOF {
  6456  				return nil
  6457  			}
  6458  			return status.Errorf(codes.Unknown, "expected client to call CloseSend")
  6459  		},
  6460  	}
  6461  
  6462  	// No client or server options specified, because should pick up configured
  6463  	// global options.
  6464  	if err := ss.Start(nil); err != nil {
  6465  		t.Fatalf("Error starting endpoint server: %v", err)
  6466  	}
  6467  	defer ss.Stop()
  6468  
  6469  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6470  	defer cancel()
  6471  	// Make a Unary RPC. This should cause Log calls on the MethodLogger.
  6472  	if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  6473  		t.Fatalf("Unexpected error from UnaryCall: %v", err)
  6474  	}
  6475  	if csbl.mml.events != 5 {
  6476  		t.Fatalf("want 5 client side binary logging events, got %v", csbl.mml.events)
  6477  	}
  6478  	if ssbl.mml.events != 5 {
  6479  		t.Fatalf("want 5 server side binary logging events, got %v", ssbl.mml.events)
  6480  	}
  6481  
  6482  	// Make a streaming RPC. This should cause Log calls on the MethodLogger.
  6483  	stream, err := ss.Client.FullDuplexCall(ctx)
  6484  	if err != nil {
  6485  		t.Fatalf("ss.Client.FullDuplexCall failed: %f", err)
  6486  	}
  6487  
  6488  	stream.CloseSend()
  6489  	if _, err = stream.Recv(); err != io.EOF {
  6490  		t.Fatalf("unexpected error: %v, expected an EOF error", err)
  6491  	}
  6492  
  6493  	if csbl.mml.events != 8 {
  6494  		t.Fatalf("want 8 client side binary logging events, got %v", csbl.mml.events)
  6495  	}
  6496  	if ssbl.mml.events != 8 {
  6497  		t.Fatalf("want 8 server side binary logging events, got %v", ssbl.mml.events)
  6498  	}
  6499  }
  6500  
  6501  type statsHandlerRecordEvents struct {
  6502  	mu sync.Mutex
  6503  	s  []stats.RPCStats
  6504  }
  6505  
  6506  func (*statsHandlerRecordEvents) TagRPC(ctx context.Context, _ *stats.RPCTagInfo) context.Context {
  6507  	return ctx
  6508  }
  6509  func (h *statsHandlerRecordEvents) HandleRPC(_ context.Context, s stats.RPCStats) {
  6510  	h.mu.Lock()
  6511  	defer h.mu.Unlock()
  6512  	h.s = append(h.s, s)
  6513  }
  6514  func (*statsHandlerRecordEvents) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context {
  6515  	return ctx
  6516  }
  6517  func (*statsHandlerRecordEvents) HandleConn(context.Context, stats.ConnStats) {}
  6518  
  6519  type triggerRPCBlockPicker struct {
  6520  	pickDone func()
  6521  }
  6522  
  6523  func (bp *triggerRPCBlockPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
  6524  	bp.pickDone()
  6525  	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
  6526  }
  6527  
  6528  const name = "triggerRPCBlockBalancer"
  6529  
  6530  type triggerRPCBlockPickerBalancerBuilder struct{}
  6531  
  6532  func (triggerRPCBlockPickerBalancerBuilder) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer {
  6533  	b := &triggerRPCBlockBalancer{
  6534  		blockingPickerDone: grpcsync.NewEvent(),
  6535  		ClientConn:         cc,
  6536  	}
  6537  	// round_robin child to complete balancer tree with a usable leaf policy and
  6538  	// have RPCs actually work.
  6539  	builder := balancer.Get(roundrobin.Name)
  6540  	rr := builder.Build(b, bOpts)
  6541  	if rr == nil {
  6542  		panic("round robin builder returned nil")
  6543  	}
  6544  	b.Balancer = rr
  6545  	return b
  6546  }
  6547  
  6548  func (triggerRPCBlockPickerBalancerBuilder) ParseConfig(json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
  6549  	return &bpbConfig{}, nil
  6550  }
  6551  
  6552  func (triggerRPCBlockPickerBalancerBuilder) Name() string {
  6553  	return name
  6554  }
  6555  
  6556  type bpbConfig struct {
  6557  	serviceconfig.LoadBalancingConfig
  6558  }
  6559  
  6560  // triggerRPCBlockBalancer uses a child RR balancer, but blocks all UpdateState
  6561  // calls until the first Pick call. That first Pick returns
  6562  // ErrNoSubConnAvailable to make the RPC block and trigger the appropriate stats
  6563  // handler callout. After the first Pick call, it will forward at least one
  6564  // READY picker update from the child, causing RPCs to proceed as normal using a
  6565  // round robin balancer's picker if it updates with a READY picker.
  6566  type triggerRPCBlockBalancer struct {
  6567  	stateMu    sync.Mutex
  6568  	childState balancer.State
  6569  
  6570  	blockingPickerDone *grpcsync.Event
  6571  	// embed a ClientConn to wrap only UpdateState() operation
  6572  	balancer.ClientConn
  6573  	// embed a Balancer to wrap only UpdateClientConnState() operation
  6574  	balancer.Balancer
  6575  }
  6576  
  6577  func (bpb *triggerRPCBlockBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
  6578  	err := bpb.Balancer.UpdateClientConnState(s)
  6579  	bpb.ClientConn.UpdateState(balancer.State{
  6580  		ConnectivityState: connectivity.Connecting,
  6581  		Picker: &triggerRPCBlockPicker{
  6582  			pickDone: func() {
  6583  				bpb.stateMu.Lock()
  6584  				defer bpb.stateMu.Unlock()
  6585  				bpb.blockingPickerDone.Fire()
  6586  				if bpb.childState.ConnectivityState == connectivity.Ready {
  6587  					bpb.ClientConn.UpdateState(bpb.childState)
  6588  				}
  6589  			},
  6590  		},
  6591  	})
  6592  	return err
  6593  }
  6594  
  6595  func (bpb *triggerRPCBlockBalancer) UpdateState(state balancer.State) {
  6596  	bpb.stateMu.Lock()
  6597  	defer bpb.stateMu.Unlock()
  6598  	bpb.childState = state
  6599  	if bpb.blockingPickerDone.HasFired() { // guard first one to get a picker sending ErrNoSubConnAvailable first
  6600  		if state.ConnectivityState == connectivity.Ready {
  6601  			bpb.ClientConn.UpdateState(state) // after the first rr picker update, only forward once READY for deterministic picker counts
  6602  		}
  6603  	}
  6604  }
  6605  
  6606  // TestRPCBlockingOnPickerStatsCall tests the emission of a stats handler call
  6607  // that represents the RPC had to block waiting for a new picker due to
  6608  // ErrNoSubConnAvailable being returned from the first picker call.
  6609  func (s) TestRPCBlockingOnPickerStatsCall(t *testing.T) {
  6610  	sh := &statsHandlerRecordEvents{}
  6611  	ss := &stubserver.StubServer{
  6612  		UnaryCallF: func(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  6613  			return &testpb.SimpleResponse{}, nil
  6614  		},
  6615  	}
  6616  
  6617  	if err := ss.StartServer(); err != nil {
  6618  		t.Fatalf("Error starting endpoint server: %v", err)
  6619  	}
  6620  	defer ss.Stop()
  6621  
  6622  	lbCfgJSON := `{
  6623    		"loadBalancingConfig": [
  6624      		{
  6625        			"triggerRPCBlockBalancer": {}
  6626      		}
  6627  		]
  6628  	}`
  6629  
  6630  	sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(lbCfgJSON)
  6631  	mr := manual.NewBuilderWithScheme("pickerupdatedbalancer")
  6632  	defer mr.Close()
  6633  	mr.InitialState(resolver.State{
  6634  		Addresses: []resolver.Address{
  6635  			{Addr: ss.Address},
  6636  		},
  6637  		ServiceConfig: sc,
  6638  	})
  6639  
  6640  	cc, err := grpc.NewClient(mr.Scheme()+":///", grpc.WithResolvers(mr), grpc.WithStatsHandler(sh), grpc.WithTransportCredentials(insecure.NewCredentials()))
  6641  	if err != nil {
  6642  		t.Fatalf("grpc.NewClient() failed: %v", err)
  6643  	}
  6644  	defer cc.Close()
  6645  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  6646  	defer cancel()
  6647  	testServiceClient := testgrpc.NewTestServiceClient(cc)
  6648  	if _, err := testServiceClient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil {
  6649  		t.Fatalf("Unexpected error from UnaryCall: %v", err)
  6650  	}
  6651  
  6652  	var pickerUpdatedCount uint
  6653  	for _, stat := range sh.s {
  6654  		if _, ok := stat.(*stats.PickerUpdated); ok {
  6655  			pickerUpdatedCount++
  6656  		}
  6657  	}
  6658  	if pickerUpdatedCount != 1 {
  6659  		t.Fatalf("sh.pickerUpdated count: %v, want: %v", pickerUpdatedCount, 2)
  6660  	}
  6661  }