google.golang.org/grpc@v1.62.1/internal/transport/transport_test.go (about)

     1  /*
     2   *
     3   * Copyright 2014 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package transport
    20  
    21  import (
    22  	"bytes"
    23  	"context"
    24  	"encoding/binary"
    25  	"errors"
    26  	"fmt"
    27  	"io"
    28  	"math"
    29  	"net"
    30  	"os"
    31  	"runtime"
    32  	"strconv"
    33  	"strings"
    34  	"sync"
    35  	"testing"
    36  	"time"
    37  
    38  	"github.com/google/go-cmp/cmp"
    39  	"golang.org/x/net/http2"
    40  	"golang.org/x/net/http2/hpack"
    41  	"google.golang.org/grpc/attributes"
    42  	"google.golang.org/grpc/codes"
    43  	"google.golang.org/grpc/credentials"
    44  	"google.golang.org/grpc/internal/channelz"
    45  	"google.golang.org/grpc/internal/grpctest"
    46  	"google.golang.org/grpc/internal/leakcheck"
    47  	"google.golang.org/grpc/internal/testutils"
    48  	"google.golang.org/grpc/metadata"
    49  	"google.golang.org/grpc/resolver"
    50  	"google.golang.org/grpc/status"
    51  )
    52  
    53  type s struct {
    54  	grpctest.Tester
    55  }
    56  
    57  func Test(t *testing.T) {
    58  	grpctest.RunSubTests(t, s{})
    59  }
    60  
    61  var (
    62  	expectedRequest            = []byte("ping")
    63  	expectedResponse           = []byte("pong")
    64  	expectedRequestLarge       = make([]byte, initialWindowSize*2)
    65  	expectedResponseLarge      = make([]byte, initialWindowSize*2)
    66  	expectedInvalidHeaderField = "invalid/content-type"
    67  )
    68  
    69  func init() {
    70  	expectedRequestLarge[0] = 'g'
    71  	expectedRequestLarge[len(expectedRequestLarge)-1] = 'r'
    72  	expectedResponseLarge[0] = 'p'
    73  	expectedResponseLarge[len(expectedResponseLarge)-1] = 'c'
    74  }
    75  
    76  type testStreamHandler struct {
    77  	t           *http2Server
    78  	notify      chan struct{}
    79  	getNotified chan struct{}
    80  }
    81  
    82  type hType int
    83  
    84  const (
    85  	normal hType = iota
    86  	suspended
    87  	notifyCall
    88  	misbehaved
    89  	encodingRequiredStatus
    90  	invalidHeaderField
    91  	delayRead
    92  	pingpong
    93  )
    94  
    95  func (h *testStreamHandler) handleStreamAndNotify(s *Stream) {
    96  	if h.notify == nil {
    97  		return
    98  	}
    99  	go func() {
   100  		select {
   101  		case <-h.notify:
   102  		default:
   103  			close(h.notify)
   104  		}
   105  	}()
   106  }
   107  
   108  func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) {
   109  	req := expectedRequest
   110  	resp := expectedResponse
   111  	if s.Method() == "foo.Large" {
   112  		req = expectedRequestLarge
   113  		resp = expectedResponseLarge
   114  	}
   115  	p := make([]byte, len(req))
   116  	_, err := s.Read(p)
   117  	if err != nil {
   118  		return
   119  	}
   120  	if !bytes.Equal(p, req) {
   121  		t.Errorf("handleStream got %v, want %v", p, req)
   122  		h.t.WriteStatus(s, status.New(codes.Internal, "panic"))
   123  		return
   124  	}
   125  	// send a response back to the client.
   126  	h.t.Write(s, nil, resp, &Options{})
   127  	// send the trailer to end the stream.
   128  	h.t.WriteStatus(s, status.New(codes.OK, ""))
   129  }
   130  
   131  func (h *testStreamHandler) handleStreamPingPong(t *testing.T, s *Stream) {
   132  	header := make([]byte, 5)
   133  	for {
   134  		if _, err := s.Read(header); err != nil {
   135  			if err == io.EOF {
   136  				h.t.WriteStatus(s, status.New(codes.OK, ""))
   137  				return
   138  			}
   139  			t.Errorf("Error on server while reading data header: %v", err)
   140  			h.t.WriteStatus(s, status.New(codes.Internal, "panic"))
   141  			return
   142  		}
   143  		sz := binary.BigEndian.Uint32(header[1:])
   144  		msg := make([]byte, int(sz))
   145  		if _, err := s.Read(msg); err != nil {
   146  			t.Errorf("Error on server while reading message: %v", err)
   147  			h.t.WriteStatus(s, status.New(codes.Internal, "panic"))
   148  			return
   149  		}
   150  		buf := make([]byte, sz+5)
   151  		buf[0] = byte(0)
   152  		binary.BigEndian.PutUint32(buf[1:], uint32(sz))
   153  		copy(buf[5:], msg)
   154  		h.t.Write(s, nil, buf, &Options{})
   155  	}
   156  }
   157  
   158  func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) {
   159  	conn, ok := s.st.(*http2Server)
   160  	if !ok {
   161  		t.Errorf("Failed to convert %v to *http2Server", s.st)
   162  		h.t.WriteStatus(s, status.New(codes.Internal, ""))
   163  		return
   164  	}
   165  	var sent int
   166  	p := make([]byte, http2MaxFrameLen)
   167  	for sent < initialWindowSize {
   168  		n := initialWindowSize - sent
   169  		// The last message may be smaller than http2MaxFrameLen
   170  		if n <= http2MaxFrameLen {
   171  			if s.Method() == "foo.Connection" {
   172  				// Violate connection level flow control window of client but do not
   173  				// violate any stream level windows.
   174  				p = make([]byte, n)
   175  			} else {
   176  				// Violate stream level flow control window of client.
   177  				p = make([]byte, n+1)
   178  			}
   179  		}
   180  		conn.controlBuf.put(&dataFrame{
   181  			streamID:    s.id,
   182  			h:           nil,
   183  			d:           p,
   184  			onEachWrite: func() {},
   185  		})
   186  		sent += len(p)
   187  	}
   188  }
   189  
   190  func (h *testStreamHandler) handleStreamEncodingRequiredStatus(s *Stream) {
   191  	// raw newline is not accepted by http2 framer so it must be encoded.
   192  	h.t.WriteStatus(s, encodingTestStatus)
   193  }
   194  
   195  func (h *testStreamHandler) handleStreamInvalidHeaderField(s *Stream) {
   196  	headerFields := []hpack.HeaderField{}
   197  	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: expectedInvalidHeaderField})
   198  	h.t.controlBuf.put(&headerFrame{
   199  		streamID:  s.id,
   200  		hf:        headerFields,
   201  		endStream: false,
   202  	})
   203  }
   204  
   205  // handleStreamDelayRead delays reads so that the other side has to halt on
   206  // stream-level flow control.
   207  // This handler assumes dynamic flow control is turned off and assumes window
   208  // sizes to be set to defaultWindowSize.
   209  func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) {
   210  	req := expectedRequest
   211  	resp := expectedResponse
   212  	if s.Method() == "foo.Large" {
   213  		req = expectedRequestLarge
   214  		resp = expectedResponseLarge
   215  	}
   216  	var (
   217  		mu    sync.Mutex
   218  		total int
   219  	)
   220  	s.wq.replenish = func(n int) {
   221  		mu.Lock()
   222  		total += n
   223  		mu.Unlock()
   224  		s.wq.realReplenish(n)
   225  	}
   226  	getTotal := func() int {
   227  		mu.Lock()
   228  		defer mu.Unlock()
   229  		return total
   230  	}
   231  	done := make(chan struct{})
   232  	defer close(done)
   233  	go func() {
   234  		for {
   235  			select {
   236  			// Prevent goroutine from leaking.
   237  			case <-done:
   238  				return
   239  			default:
   240  			}
   241  			if getTotal() == defaultWindowSize {
   242  				// Signal the client to start reading and
   243  				// thereby send window update.
   244  				close(h.notify)
   245  				return
   246  			}
   247  			runtime.Gosched()
   248  		}
   249  	}()
   250  	p := make([]byte, len(req))
   251  
   252  	// Let the other side run out of stream-level window before
   253  	// starting to read and thereby sending a window update.
   254  	timer := time.NewTimer(time.Second * 10)
   255  	select {
   256  	case <-h.getNotified:
   257  		timer.Stop()
   258  	case <-timer.C:
   259  		t.Errorf("Server timed-out.")
   260  		return
   261  	}
   262  	_, err := s.Read(p)
   263  	if err != nil {
   264  		t.Errorf("s.Read(_) = _, %v, want _, <nil>", err)
   265  		return
   266  	}
   267  
   268  	if !bytes.Equal(p, req) {
   269  		t.Errorf("handleStream got %v, want %v", p, req)
   270  		return
   271  	}
   272  	// This write will cause server to run out of stream level,
   273  	// flow control and the other side won't send a window update
   274  	// until that happens.
   275  	if err := h.t.Write(s, nil, resp, &Options{}); err != nil {
   276  		t.Errorf("server Write got %v, want <nil>", err)
   277  		return
   278  	}
   279  	// Read one more time to ensure that everything remains fine and
   280  	// that the goroutine, that we launched earlier to signal client
   281  	// to read, gets enough time to process.
   282  	_, err = s.Read(p)
   283  	if err != nil {
   284  		t.Errorf("s.Read(_) = _, %v, want _, nil", err)
   285  		return
   286  	}
   287  	// send the trailer to end the stream.
   288  	if err := h.t.WriteStatus(s, status.New(codes.OK, "")); err != nil {
   289  		t.Errorf("server WriteStatus got %v, want <nil>", err)
   290  		return
   291  	}
   292  }
   293  
   294  type server struct {
   295  	lis        net.Listener
   296  	port       string
   297  	startedErr chan error // error (or nil) with server start value
   298  	mu         sync.Mutex
   299  	conns      map[ServerTransport]net.Conn
   300  	h          *testStreamHandler
   301  	ready      chan struct{}
   302  	channelzID *channelz.Identifier
   303  }
   304  
   305  func newTestServer() *server {
   306  	return &server{
   307  		startedErr: make(chan error, 1),
   308  		ready:      make(chan struct{}),
   309  		channelzID: channelz.NewIdentifierForTesting(channelz.RefServer, time.Now().Unix(), nil),
   310  	}
   311  }
   312  
   313  // start starts server. Other goroutines should block on s.readyChan for further operations.
   314  func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hType) {
   315  	var err error
   316  	if port == 0 {
   317  		s.lis, err = net.Listen("tcp", "localhost:0")
   318  	} else {
   319  		s.lis, err = net.Listen("tcp", "localhost:"+strconv.Itoa(port))
   320  	}
   321  	if err != nil {
   322  		s.startedErr <- fmt.Errorf("failed to listen: %v", err)
   323  		return
   324  	}
   325  	_, p, err := net.SplitHostPort(s.lis.Addr().String())
   326  	if err != nil {
   327  		s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err)
   328  		return
   329  	}
   330  	s.port = p
   331  	s.conns = make(map[ServerTransport]net.Conn)
   332  	s.startedErr <- nil
   333  	for {
   334  		conn, err := s.lis.Accept()
   335  		if err != nil {
   336  			return
   337  		}
   338  		rawConn := conn
   339  		if serverConfig.MaxStreams == 0 {
   340  			serverConfig.MaxStreams = math.MaxUint32
   341  		}
   342  		transport, err := NewServerTransport(conn, serverConfig)
   343  		if err != nil {
   344  			return
   345  		}
   346  		s.mu.Lock()
   347  		if s.conns == nil {
   348  			s.mu.Unlock()
   349  			transport.Close(errors.New("s.conns is nil"))
   350  			return
   351  		}
   352  		s.conns[transport] = rawConn
   353  		h := &testStreamHandler{t: transport.(*http2Server)}
   354  		s.h = h
   355  		s.mu.Unlock()
   356  		switch ht {
   357  		case notifyCall:
   358  			go transport.HandleStreams(context.Background(), h.handleStreamAndNotify)
   359  		case suspended:
   360  			go transport.HandleStreams(context.Background(), func(*Stream) {})
   361  		case misbehaved:
   362  			go transport.HandleStreams(context.Background(), func(s *Stream) {
   363  				go h.handleStreamMisbehave(t, s)
   364  			})
   365  		case encodingRequiredStatus:
   366  			go transport.HandleStreams(context.Background(), func(s *Stream) {
   367  				go h.handleStreamEncodingRequiredStatus(s)
   368  			})
   369  		case invalidHeaderField:
   370  			go transport.HandleStreams(context.Background(), func(s *Stream) {
   371  				go h.handleStreamInvalidHeaderField(s)
   372  			})
   373  		case delayRead:
   374  			h.notify = make(chan struct{})
   375  			h.getNotified = make(chan struct{})
   376  			s.mu.Lock()
   377  			close(s.ready)
   378  			s.mu.Unlock()
   379  			go transport.HandleStreams(context.Background(), func(s *Stream) {
   380  				go h.handleStreamDelayRead(t, s)
   381  			})
   382  		case pingpong:
   383  			go transport.HandleStreams(context.Background(), func(s *Stream) {
   384  				go h.handleStreamPingPong(t, s)
   385  			})
   386  		default:
   387  			go transport.HandleStreams(context.Background(), func(s *Stream) {
   388  				go h.handleStream(t, s)
   389  			})
   390  		}
   391  	}
   392  }
   393  
   394  func (s *server) wait(t *testing.T, timeout time.Duration) {
   395  	select {
   396  	case err := <-s.startedErr:
   397  		if err != nil {
   398  			t.Fatal(err)
   399  		}
   400  	case <-time.After(timeout):
   401  		t.Fatalf("Timed out after %v waiting for server to be ready", timeout)
   402  	}
   403  }
   404  
   405  func (s *server) stop() {
   406  	s.lis.Close()
   407  	s.mu.Lock()
   408  	for c := range s.conns {
   409  		c.Close(errors.New("server Stop called"))
   410  	}
   411  	s.conns = nil
   412  	s.mu.Unlock()
   413  }
   414  
   415  func (s *server) addr() string {
   416  	if s.lis == nil {
   417  		return ""
   418  	}
   419  	return s.lis.Addr().String()
   420  }
   421  
   422  func setUpServerOnly(t *testing.T, port int, sc *ServerConfig, ht hType) *server {
   423  	server := newTestServer()
   424  	sc.ChannelzParentID = server.channelzID
   425  	go server.start(t, port, sc, ht)
   426  	server.wait(t, 2*time.Second)
   427  	return server
   428  }
   429  
   430  func setUp(t *testing.T, port int, ht hType) (*server, *http2Client, func()) {
   431  	return setUpWithOptions(t, port, &ServerConfig{}, ht, ConnectOptions{})
   432  }
   433  
   434  func setUpWithOptions(t *testing.T, port int, sc *ServerConfig, ht hType, copts ConnectOptions) (*server, *http2Client, func()) {
   435  	server := setUpServerOnly(t, port, sc, ht)
   436  	addr := resolver.Address{Addr: "localhost:" + server.port}
   437  	copts.ChannelzParentID = channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil)
   438  
   439  	connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second))
   440  	ct, connErr := NewClientTransport(connectCtx, context.Background(), addr, copts, func(GoAwayReason) {})
   441  	if connErr != nil {
   442  		cancel() // Do not cancel in success path.
   443  		t.Fatalf("failed to create transport: %v", connErr)
   444  	}
   445  	return server, ct.(*http2Client), cancel
   446  }
   447  
   448  func setUpWithNoPingServer(t *testing.T, copts ConnectOptions, connCh chan net.Conn) (*http2Client, func()) {
   449  	lis, err := net.Listen("tcp", "localhost:0")
   450  	if err != nil {
   451  		t.Fatalf("Failed to listen: %v", err)
   452  	}
   453  	// Launch a non responsive server.
   454  	go func() {
   455  		defer lis.Close()
   456  		conn, err := lis.Accept()
   457  		if err != nil {
   458  			t.Errorf("Error at server-side while accepting: %v", err)
   459  			close(connCh)
   460  			return
   461  		}
   462  		framer := http2.NewFramer(conn, conn)
   463  		if err := framer.WriteSettings(); err != nil {
   464  			t.Errorf("Error at server-side while writing settings: %v", err)
   465  			close(connCh)
   466  			return
   467  		}
   468  		connCh <- conn
   469  	}()
   470  	connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second))
   471  	tr, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {})
   472  	if err != nil {
   473  		cancel() // Do not cancel in success path.
   474  		// Server clean-up.
   475  		lis.Close()
   476  		if conn, ok := <-connCh; ok {
   477  			conn.Close()
   478  		}
   479  		t.Fatalf("Failed to dial: %v", err)
   480  	}
   481  	return tr.(*http2Client), cancel
   482  }
   483  
   484  // TestInflightStreamClosing ensures that closing in-flight stream
   485  // sends status error to concurrent stream reader.
   486  func (s) TestInflightStreamClosing(t *testing.T) {
   487  	serverConfig := &ServerConfig{}
   488  	server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
   489  	defer cancel()
   490  	defer server.stop()
   491  	defer client.Close(fmt.Errorf("closed manually by test"))
   492  
   493  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   494  	defer cancel()
   495  	stream, err := client.NewStream(ctx, &CallHdr{})
   496  	if err != nil {
   497  		t.Fatalf("Client failed to create RPC request: %v", err)
   498  	}
   499  
   500  	donec := make(chan struct{})
   501  	serr := status.Error(codes.Internal, "client connection is closing")
   502  	go func() {
   503  		defer close(donec)
   504  		if _, err := stream.Read(make([]byte, defaultWindowSize)); err != serr {
   505  			t.Errorf("unexpected Stream error %v, expected %v", err, serr)
   506  		}
   507  	}()
   508  
   509  	// should unblock concurrent stream.Read
   510  	client.CloseStream(stream, serr)
   511  
   512  	// wait for stream.Read error
   513  	timeout := time.NewTimer(5 * time.Second)
   514  	select {
   515  	case <-donec:
   516  		if !timeout.Stop() {
   517  			<-timeout.C
   518  		}
   519  	case <-timeout.C:
   520  		t.Fatalf("Test timed out, expected a status error.")
   521  	}
   522  }
   523  
   524  // Tests that when streamID > MaxStreamId, the current client transport drains.
   525  func (s) TestClientTransportDrainsAfterStreamIDExhausted(t *testing.T) {
   526  	server, ct, cancel := setUp(t, 0, normal)
   527  	defer cancel()
   528  	defer server.stop()
   529  	callHdr := &CallHdr{
   530  		Host:   "localhost",
   531  		Method: "foo.Small",
   532  	}
   533  
   534  	originalMaxStreamID := MaxStreamID
   535  	MaxStreamID = 3
   536  	defer func() {
   537  		MaxStreamID = originalMaxStreamID
   538  	}()
   539  
   540  	ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   541  	defer ctxCancel()
   542  
   543  	s, err := ct.NewStream(ctx, callHdr)
   544  	if err != nil {
   545  		t.Fatalf("ct.NewStream() = %v", err)
   546  	}
   547  	if s.id != 1 {
   548  		t.Fatalf("Stream id: %d, want: 1", s.id)
   549  	}
   550  
   551  	if got, want := ct.stateForTesting(), reachable; got != want {
   552  		t.Fatalf("Client transport state %v, want %v", got, want)
   553  	}
   554  
   555  	// The expected stream ID here is 3 since stream IDs are incremented by 2.
   556  	s, err = ct.NewStream(ctx, callHdr)
   557  	if err != nil {
   558  		t.Fatalf("ct.NewStream() = %v", err)
   559  	}
   560  	if s.id != 3 {
   561  		t.Fatalf("Stream id: %d, want: 3", s.id)
   562  	}
   563  
   564  	// Verifying that ct.state is draining when next stream ID > MaxStreamId.
   565  	if got, want := ct.stateForTesting(), draining; got != want {
   566  		t.Fatalf("Client transport state %v, want %v", got, want)
   567  	}
   568  }
   569  
   570  func (s) TestClientSendAndReceive(t *testing.T) {
   571  	server, ct, cancel := setUp(t, 0, normal)
   572  	defer cancel()
   573  	callHdr := &CallHdr{
   574  		Host:   "localhost",
   575  		Method: "foo.Small",
   576  	}
   577  	ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   578  	defer ctxCancel()
   579  	s1, err1 := ct.NewStream(ctx, callHdr)
   580  	if err1 != nil {
   581  		t.Fatalf("failed to open stream: %v", err1)
   582  	}
   583  	if s1.id != 1 {
   584  		t.Fatalf("wrong stream id: %d", s1.id)
   585  	}
   586  	s2, err2 := ct.NewStream(ctx, callHdr)
   587  	if err2 != nil {
   588  		t.Fatalf("failed to open stream: %v", err2)
   589  	}
   590  	if s2.id != 3 {
   591  		t.Fatalf("wrong stream id: %d", s2.id)
   592  	}
   593  	opts := Options{Last: true}
   594  	if err := ct.Write(s1, nil, expectedRequest, &opts); err != nil && err != io.EOF {
   595  		t.Fatalf("failed to send data: %v", err)
   596  	}
   597  	p := make([]byte, len(expectedResponse))
   598  	_, recvErr := s1.Read(p)
   599  	if recvErr != nil || !bytes.Equal(p, expectedResponse) {
   600  		t.Fatalf("Error: %v, want <nil>; Result: %v, want %v", recvErr, p, expectedResponse)
   601  	}
   602  	_, recvErr = s1.Read(p)
   603  	if recvErr != io.EOF {
   604  		t.Fatalf("Error: %v; want <EOF>", recvErr)
   605  	}
   606  	ct.Close(fmt.Errorf("closed manually by test"))
   607  	server.stop()
   608  }
   609  
   610  func (s) TestClientErrorNotify(t *testing.T) {
   611  	server, ct, cancel := setUp(t, 0, normal)
   612  	defer cancel()
   613  	go server.stop()
   614  	// ct.reader should detect the error and activate ct.Error().
   615  	<-ct.Error()
   616  	ct.Close(fmt.Errorf("closed manually by test"))
   617  }
   618  
   619  func performOneRPC(ct ClientTransport) {
   620  	callHdr := &CallHdr{
   621  		Host:   "localhost",
   622  		Method: "foo.Small",
   623  	}
   624  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   625  	defer cancel()
   626  	s, err := ct.NewStream(ctx, callHdr)
   627  	if err != nil {
   628  		return
   629  	}
   630  	opts := Options{Last: true}
   631  	if err := ct.Write(s, []byte{}, expectedRequest, &opts); err == nil || err == io.EOF {
   632  		time.Sleep(5 * time.Millisecond)
   633  		// The following s.Recv()'s could error out because the
   634  		// underlying transport is gone.
   635  		//
   636  		// Read response
   637  		p := make([]byte, len(expectedResponse))
   638  		s.Read(p)
   639  		// Read io.EOF
   640  		s.Read(p)
   641  	}
   642  }
   643  
   644  func (s) TestClientMix(t *testing.T) {
   645  	s, ct, cancel := setUp(t, 0, normal)
   646  	defer cancel()
   647  	time.AfterFunc(time.Second, s.stop)
   648  	go func(ct ClientTransport) {
   649  		<-ct.Error()
   650  		ct.Close(fmt.Errorf("closed manually by test"))
   651  	}(ct)
   652  	for i := 0; i < 750; i++ {
   653  		time.Sleep(2 * time.Millisecond)
   654  		go performOneRPC(ct)
   655  	}
   656  }
   657  
   658  func (s) TestLargeMessage(t *testing.T) {
   659  	server, ct, cancel := setUp(t, 0, normal)
   660  	defer cancel()
   661  	callHdr := &CallHdr{
   662  		Host:   "localhost",
   663  		Method: "foo.Large",
   664  	}
   665  	ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   666  	defer ctxCancel()
   667  	var wg sync.WaitGroup
   668  	for i := 0; i < 2; i++ {
   669  		wg.Add(1)
   670  		go func() {
   671  			defer wg.Done()
   672  			s, err := ct.NewStream(ctx, callHdr)
   673  			if err != nil {
   674  				t.Errorf("%v.NewStream(_, _) = _, %v, want _, <nil>", ct, err)
   675  			}
   676  			if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true}); err != nil && err != io.EOF {
   677  				t.Errorf("%v.Write(_, _, _) = %v, want  <nil>", ct, err)
   678  			}
   679  			p := make([]byte, len(expectedResponseLarge))
   680  			if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) {
   681  				t.Errorf("s.Read(%v) = _, %v, want %v, <nil>", err, p, expectedResponse)
   682  			}
   683  			if _, err = s.Read(p); err != io.EOF {
   684  				t.Errorf("Failed to complete the stream %v; want <EOF>", err)
   685  			}
   686  		}()
   687  	}
   688  	wg.Wait()
   689  	ct.Close(fmt.Errorf("closed manually by test"))
   690  	server.stop()
   691  }
   692  
   693  func (s) TestLargeMessageWithDelayRead(t *testing.T) {
   694  	// Disable dynamic flow control.
   695  	sc := &ServerConfig{
   696  		InitialWindowSize:     defaultWindowSize,
   697  		InitialConnWindowSize: defaultWindowSize,
   698  	}
   699  	co := ConnectOptions{
   700  		InitialWindowSize:     defaultWindowSize,
   701  		InitialConnWindowSize: defaultWindowSize,
   702  	}
   703  	server, ct, cancel := setUpWithOptions(t, 0, sc, delayRead, co)
   704  	defer cancel()
   705  	defer server.stop()
   706  	defer ct.Close(fmt.Errorf("closed manually by test"))
   707  	server.mu.Lock()
   708  	ready := server.ready
   709  	server.mu.Unlock()
   710  	callHdr := &CallHdr{
   711  		Host:   "localhost",
   712  		Method: "foo.Large",
   713  	}
   714  	ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
   715  	defer cancel()
   716  	s, err := ct.NewStream(ctx, callHdr)
   717  	if err != nil {
   718  		t.Fatalf("%v.NewStream(_, _) = _, %v, want _, <nil>", ct, err)
   719  		return
   720  	}
   721  	// Wait for server's handerler to be initialized
   722  	select {
   723  	case <-ready:
   724  	case <-ctx.Done():
   725  		t.Fatalf("Client timed out waiting for server handler to be initialized.")
   726  	}
   727  	server.mu.Lock()
   728  	serviceHandler := server.h
   729  	server.mu.Unlock()
   730  	var (
   731  		mu    sync.Mutex
   732  		total int
   733  	)
   734  	s.wq.replenish = func(n int) {
   735  		mu.Lock()
   736  		total += n
   737  		mu.Unlock()
   738  		s.wq.realReplenish(n)
   739  	}
   740  	getTotal := func() int {
   741  		mu.Lock()
   742  		defer mu.Unlock()
   743  		return total
   744  	}
   745  	done := make(chan struct{})
   746  	defer close(done)
   747  	go func() {
   748  		for {
   749  			select {
   750  			// Prevent goroutine from leaking in case of error.
   751  			case <-done:
   752  				return
   753  			default:
   754  			}
   755  			if getTotal() == defaultWindowSize {
   756  				// unblock server to be able to read and
   757  				// thereby send stream level window update.
   758  				close(serviceHandler.getNotified)
   759  				return
   760  			}
   761  			runtime.Gosched()
   762  		}
   763  	}()
   764  	// This write will cause client to run out of stream level,
   765  	// flow control and the other side won't send a window update
   766  	// until that happens.
   767  	if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{}); err != nil {
   768  		t.Fatalf("write(_, _, _) = %v, want  <nil>", err)
   769  	}
   770  	p := make([]byte, len(expectedResponseLarge))
   771  
   772  	// Wait for the other side to run out of stream level flow control before
   773  	// reading and thereby sending a window update.
   774  	select {
   775  	case <-serviceHandler.notify:
   776  	case <-ctx.Done():
   777  		t.Fatalf("Client timed out")
   778  	}
   779  	if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) {
   780  		t.Fatalf("s.Read(_) = _, %v, want _, <nil>", err)
   781  	}
   782  	if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true}); err != nil {
   783  		t.Fatalf("Write(_, _, _) = %v, want <nil>", err)
   784  	}
   785  	if _, err = s.Read(p); err != io.EOF {
   786  		t.Fatalf("Failed to complete the stream %v; want <EOF>", err)
   787  	}
   788  }
   789  
   790  // TestGracefulClose ensures that GracefulClose allows in-flight streams to
   791  // proceed until they complete naturally, while not allowing creation of new
   792  // streams during this window.
   793  func (s) TestGracefulClose(t *testing.T) {
   794  	server, ct, cancel := setUp(t, 0, pingpong)
   795  	defer cancel()
   796  	defer func() {
   797  		// Stop the server's listener to make the server's goroutines terminate
   798  		// (after the last active stream is done).
   799  		server.lis.Close()
   800  		// Check for goroutine leaks (i.e. GracefulClose with an active stream
   801  		// doesn't eventually close the connection when that stream completes).
   802  		leakcheck.Check(t)
   803  		// Correctly clean up the server
   804  		server.stop()
   805  	}()
   806  	ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
   807  	defer cancel()
   808  
   809  	// Create a stream that will exist for this whole test and confirm basic
   810  	// functionality.
   811  	s, err := ct.NewStream(ctx, &CallHdr{})
   812  	if err != nil {
   813  		t.Fatalf("NewStream(_, _) = _, %v, want _, <nil>", err)
   814  	}
   815  	msg := make([]byte, 1024)
   816  	outgoingHeader := make([]byte, 5)
   817  	outgoingHeader[0] = byte(0)
   818  	binary.BigEndian.PutUint32(outgoingHeader[1:], uint32(len(msg)))
   819  	incomingHeader := make([]byte, 5)
   820  	if err := ct.Write(s, outgoingHeader, msg, &Options{}); err != nil {
   821  		t.Fatalf("Error while writing: %v", err)
   822  	}
   823  	if _, err := s.Read(incomingHeader); err != nil {
   824  		t.Fatalf("Error while reading: %v", err)
   825  	}
   826  	sz := binary.BigEndian.Uint32(incomingHeader[1:])
   827  	recvMsg := make([]byte, int(sz))
   828  	if _, err := s.Read(recvMsg); err != nil {
   829  		t.Fatalf("Error while reading: %v", err)
   830  	}
   831  
   832  	// Gracefully close the transport, which should not affect the existing
   833  	// stream.
   834  	ct.GracefulClose()
   835  
   836  	var wg sync.WaitGroup
   837  	// Expect errors creating new streams because the client transport has been
   838  	// gracefully closed.
   839  	for i := 0; i < 200; i++ {
   840  		wg.Add(1)
   841  		go func() {
   842  			defer wg.Done()
   843  			_, err := ct.NewStream(ctx, &CallHdr{})
   844  			if err != nil && err.(*NewStreamError).Err == ErrConnClosing && err.(*NewStreamError).AllowTransparentRetry {
   845  				return
   846  			}
   847  			t.Errorf("_.NewStream(_, _) = _, %v, want _, %v", err, ErrConnClosing)
   848  		}()
   849  	}
   850  
   851  	// Confirm the existing stream still functions as expected.
   852  	ct.Write(s, nil, nil, &Options{Last: true})
   853  	if _, err := s.Read(incomingHeader); err != io.EOF {
   854  		t.Fatalf("Client expected EOF from the server. Got: %v", err)
   855  	}
   856  	wg.Wait()
   857  }
   858  
   859  func (s) TestLargeMessageSuspension(t *testing.T) {
   860  	server, ct, cancel := setUp(t, 0, suspended)
   861  	defer cancel()
   862  	callHdr := &CallHdr{
   863  		Host:   "localhost",
   864  		Method: "foo.Large",
   865  	}
   866  	// Set a long enough timeout for writing a large message out.
   867  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   868  	defer cancel()
   869  	s, err := ct.NewStream(ctx, callHdr)
   870  	if err != nil {
   871  		t.Fatalf("failed to open stream: %v", err)
   872  	}
   873  	// Launch a goroutine simillar to the stream monitoring goroutine in
   874  	// stream.go to keep track of context timeout and call CloseStream.
   875  	go func() {
   876  		<-ctx.Done()
   877  		ct.CloseStream(s, ContextErr(ctx.Err()))
   878  	}()
   879  	// Write should not be done successfully due to flow control.
   880  	msg := make([]byte, initialWindowSize*8)
   881  	ct.Write(s, nil, msg, &Options{})
   882  	err = ct.Write(s, nil, msg, &Options{Last: true})
   883  	if err != errStreamDone {
   884  		t.Fatalf("Write got %v, want io.EOF", err)
   885  	}
   886  	expectedErr := status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
   887  	if _, err := s.Read(make([]byte, 8)); err.Error() != expectedErr.Error() {
   888  		t.Fatalf("Read got %v of type %T, want %v", err, err, expectedErr)
   889  	}
   890  	ct.Close(fmt.Errorf("closed manually by test"))
   891  	server.stop()
   892  }
   893  
   894  func (s) TestMaxStreams(t *testing.T) {
   895  	serverConfig := &ServerConfig{
   896  		MaxStreams: 1,
   897  	}
   898  	server, ct, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
   899  	defer cancel()
   900  	defer ct.Close(fmt.Errorf("closed manually by test"))
   901  	defer server.stop()
   902  	callHdr := &CallHdr{
   903  		Host:   "localhost",
   904  		Method: "foo.Large",
   905  	}
   906  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   907  	defer cancel()
   908  	s, err := ct.NewStream(ctx, callHdr)
   909  	if err != nil {
   910  		t.Fatalf("Failed to open stream: %v", err)
   911  	}
   912  	// Keep creating streams until one fails with deadline exceeded, marking the application
   913  	// of server settings on client.
   914  	slist := []*Stream{}
   915  	pctx, cancel := context.WithCancel(context.Background())
   916  	defer cancel()
   917  	timer := time.NewTimer(time.Second * 10)
   918  	expectedErr := status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
   919  	for {
   920  		select {
   921  		case <-timer.C:
   922  			t.Fatalf("Test timeout: client didn't receive server settings.")
   923  		default:
   924  		}
   925  		ctx, cancel := context.WithDeadline(pctx, time.Now().Add(time.Second))
   926  		// This is only to get rid of govet. All these context are based on a base
   927  		// context which is canceled at the end of the test.
   928  		defer cancel()
   929  		if str, err := ct.NewStream(ctx, callHdr); err == nil {
   930  			slist = append(slist, str)
   931  			continue
   932  		} else if err.Error() != expectedErr.Error() {
   933  			t.Fatalf("ct.NewStream(_,_) = _, %v, want _, %v", err, expectedErr)
   934  		}
   935  		timer.Stop()
   936  		break
   937  	}
   938  	done := make(chan struct{})
   939  	// Try and create a new stream.
   940  	go func() {
   941  		defer close(done)
   942  		ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
   943  		defer cancel()
   944  		if _, err := ct.NewStream(ctx, callHdr); err != nil {
   945  			t.Errorf("Failed to open stream: %v", err)
   946  		}
   947  	}()
   948  	// Close all the extra streams created and make sure the new stream is not created.
   949  	for _, str := range slist {
   950  		ct.CloseStream(str, nil)
   951  	}
   952  	select {
   953  	case <-done:
   954  		t.Fatalf("Test failed: didn't expect new stream to be created just yet.")
   955  	default:
   956  	}
   957  	// Close the first stream created so that the new stream can finally be created.
   958  	ct.CloseStream(s, nil)
   959  	<-done
   960  	ct.Close(fmt.Errorf("closed manually by test"))
   961  	<-ct.writerDone
   962  	if ct.maxConcurrentStreams != 1 {
   963  		t.Fatalf("ct.maxConcurrentStreams: %d, want 1", ct.maxConcurrentStreams)
   964  	}
   965  }
   966  
   967  func (s) TestServerContextCanceledOnClosedConnection(t *testing.T) {
   968  	server, ct, cancel := setUp(t, 0, suspended)
   969  	defer cancel()
   970  	callHdr := &CallHdr{
   971  		Host:   "localhost",
   972  		Method: "foo",
   973  	}
   974  	var sc *http2Server
   975  	// Wait until the server transport is setup.
   976  	for {
   977  		server.mu.Lock()
   978  		if len(server.conns) == 0 {
   979  			server.mu.Unlock()
   980  			time.Sleep(time.Millisecond)
   981  			continue
   982  		}
   983  		for k := range server.conns {
   984  			var ok bool
   985  			sc, ok = k.(*http2Server)
   986  			if !ok {
   987  				t.Fatalf("Failed to convert %v to *http2Server", k)
   988  			}
   989  		}
   990  		server.mu.Unlock()
   991  		break
   992  	}
   993  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   994  	defer cancel()
   995  	s, err := ct.NewStream(ctx, callHdr)
   996  	if err != nil {
   997  		t.Fatalf("Failed to open stream: %v", err)
   998  	}
   999  	ct.controlBuf.put(&dataFrame{
  1000  		streamID:    s.id,
  1001  		endStream:   false,
  1002  		h:           nil,
  1003  		d:           make([]byte, http2MaxFrameLen),
  1004  		onEachWrite: func() {},
  1005  	})
  1006  	// Loop until the server side stream is created.
  1007  	var ss *Stream
  1008  	for {
  1009  		time.Sleep(time.Second)
  1010  		sc.mu.Lock()
  1011  		if len(sc.activeStreams) == 0 {
  1012  			sc.mu.Unlock()
  1013  			continue
  1014  		}
  1015  		ss = sc.activeStreams[s.id]
  1016  		sc.mu.Unlock()
  1017  		break
  1018  	}
  1019  	ct.Close(fmt.Errorf("closed manually by test"))
  1020  	select {
  1021  	case <-ss.Context().Done():
  1022  		if ss.Context().Err() != context.Canceled {
  1023  			t.Fatalf("ss.Context().Err() got %v, want %v", ss.Context().Err(), context.Canceled)
  1024  		}
  1025  	case <-time.After(5 * time.Second):
  1026  		t.Fatalf("Failed to cancel the context of the sever side stream.")
  1027  	}
  1028  	server.stop()
  1029  }
  1030  
  1031  func (s) TestClientConnDecoupledFromApplicationRead(t *testing.T) {
  1032  	connectOptions := ConnectOptions{
  1033  		InitialWindowSize:     defaultWindowSize,
  1034  		InitialConnWindowSize: defaultWindowSize,
  1035  	}
  1036  	server, client, cancel := setUpWithOptions(t, 0, &ServerConfig{}, notifyCall, connectOptions)
  1037  	defer cancel()
  1038  	defer server.stop()
  1039  	defer client.Close(fmt.Errorf("closed manually by test"))
  1040  
  1041  	waitWhileTrue(t, func() (bool, error) {
  1042  		server.mu.Lock()
  1043  		defer server.mu.Unlock()
  1044  
  1045  		if len(server.conns) == 0 {
  1046  			return true, fmt.Errorf("timed-out while waiting for connection to be created on the server")
  1047  		}
  1048  		return false, nil
  1049  	})
  1050  
  1051  	var st *http2Server
  1052  	server.mu.Lock()
  1053  	for k := range server.conns {
  1054  		st = k.(*http2Server)
  1055  	}
  1056  	notifyChan := make(chan struct{})
  1057  	server.h.notify = notifyChan
  1058  	server.mu.Unlock()
  1059  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1060  	defer cancel()
  1061  	cstream1, err := client.NewStream(ctx, &CallHdr{})
  1062  	if err != nil {
  1063  		t.Fatalf("Client failed to create first stream. Err: %v", err)
  1064  	}
  1065  
  1066  	<-notifyChan
  1067  	var sstream1 *Stream
  1068  	// Access stream on the server.
  1069  	st.mu.Lock()
  1070  	for _, v := range st.activeStreams {
  1071  		if v.id == cstream1.id {
  1072  			sstream1 = v
  1073  		}
  1074  	}
  1075  	st.mu.Unlock()
  1076  	if sstream1 == nil {
  1077  		t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream1.id)
  1078  	}
  1079  	// Exhaust client's connection window.
  1080  	if err := st.Write(sstream1, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil {
  1081  		t.Fatalf("Server failed to write data. Err: %v", err)
  1082  	}
  1083  	notifyChan = make(chan struct{})
  1084  	server.mu.Lock()
  1085  	server.h.notify = notifyChan
  1086  	server.mu.Unlock()
  1087  	// Create another stream on client.
  1088  	cstream2, err := client.NewStream(ctx, &CallHdr{})
  1089  	if err != nil {
  1090  		t.Fatalf("Client failed to create second stream. Err: %v", err)
  1091  	}
  1092  	<-notifyChan
  1093  	var sstream2 *Stream
  1094  	st.mu.Lock()
  1095  	for _, v := range st.activeStreams {
  1096  		if v.id == cstream2.id {
  1097  			sstream2 = v
  1098  		}
  1099  	}
  1100  	st.mu.Unlock()
  1101  	if sstream2 == nil {
  1102  		t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream2.id)
  1103  	}
  1104  	// Server should be able to send data on the new stream, even though the client hasn't read anything on the first stream.
  1105  	if err := st.Write(sstream2, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil {
  1106  		t.Fatalf("Server failed to write data. Err: %v", err)
  1107  	}
  1108  
  1109  	// Client should be able to read data on second stream.
  1110  	if _, err := cstream2.Read(make([]byte, defaultWindowSize)); err != nil {
  1111  		t.Fatalf("_.Read(_) = _, %v, want _, <nil>", err)
  1112  	}
  1113  
  1114  	// Client should be able to read data on first stream.
  1115  	if _, err := cstream1.Read(make([]byte, defaultWindowSize)); err != nil {
  1116  		t.Fatalf("_.Read(_) = _, %v, want _, <nil>", err)
  1117  	}
  1118  }
  1119  
  1120  func (s) TestServerConnDecoupledFromApplicationRead(t *testing.T) {
  1121  	serverConfig := &ServerConfig{
  1122  		InitialWindowSize:     defaultWindowSize,
  1123  		InitialConnWindowSize: defaultWindowSize,
  1124  	}
  1125  	server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
  1126  	defer cancel()
  1127  	defer server.stop()
  1128  	defer client.Close(fmt.Errorf("closed manually by test"))
  1129  	waitWhileTrue(t, func() (bool, error) {
  1130  		server.mu.Lock()
  1131  		defer server.mu.Unlock()
  1132  
  1133  		if len(server.conns) == 0 {
  1134  			return true, fmt.Errorf("timed-out while waiting for connection to be created on the server")
  1135  		}
  1136  		return false, nil
  1137  	})
  1138  	var st *http2Server
  1139  	server.mu.Lock()
  1140  	for k := range server.conns {
  1141  		st = k.(*http2Server)
  1142  	}
  1143  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1144  	defer cancel()
  1145  	server.mu.Unlock()
  1146  	cstream1, err := client.NewStream(ctx, &CallHdr{})
  1147  	if err != nil {
  1148  		t.Fatalf("Failed to create 1st stream. Err: %v", err)
  1149  	}
  1150  	// Exhaust server's connection window.
  1151  	if err := client.Write(cstream1, nil, make([]byte, defaultWindowSize), &Options{Last: true}); err != nil {
  1152  		t.Fatalf("Client failed to write data. Err: %v", err)
  1153  	}
  1154  	//Client should be able to create another stream and send data on it.
  1155  	cstream2, err := client.NewStream(ctx, &CallHdr{})
  1156  	if err != nil {
  1157  		t.Fatalf("Failed to create 2nd stream. Err: %v", err)
  1158  	}
  1159  	if err := client.Write(cstream2, nil, make([]byte, defaultWindowSize), &Options{}); err != nil {
  1160  		t.Fatalf("Client failed to write data. Err: %v", err)
  1161  	}
  1162  	// Get the streams on server.
  1163  	waitWhileTrue(t, func() (bool, error) {
  1164  		st.mu.Lock()
  1165  		defer st.mu.Unlock()
  1166  
  1167  		if len(st.activeStreams) != 2 {
  1168  			return true, fmt.Errorf("timed-out while waiting for server to have created the streams")
  1169  		}
  1170  		return false, nil
  1171  	})
  1172  	var sstream1 *Stream
  1173  	st.mu.Lock()
  1174  	for _, v := range st.activeStreams {
  1175  		if v.id == 1 {
  1176  			sstream1 = v
  1177  		}
  1178  	}
  1179  	st.mu.Unlock()
  1180  	// Reading from the stream on server should succeed.
  1181  	if _, err := sstream1.Read(make([]byte, defaultWindowSize)); err != nil {
  1182  		t.Fatalf("_.Read(_) = %v, want <nil>", err)
  1183  	}
  1184  
  1185  	if _, err := sstream1.Read(make([]byte, 1)); err != io.EOF {
  1186  		t.Fatalf("_.Read(_) = %v, want io.EOF", err)
  1187  	}
  1188  
  1189  }
  1190  
  1191  func (s) TestServerWithMisbehavedClient(t *testing.T) {
  1192  	server := setUpServerOnly(t, 0, &ServerConfig{}, suspended)
  1193  	defer server.stop()
  1194  	// Create a client that can override server stream quota.
  1195  	mconn, err := net.Dial("tcp", server.lis.Addr().String())
  1196  	if err != nil {
  1197  		t.Fatalf("Clent failed to dial:%v", err)
  1198  	}
  1199  	defer mconn.Close()
  1200  	if err := mconn.SetWriteDeadline(time.Now().Add(time.Second * 10)); err != nil {
  1201  		t.Fatalf("Failed to set write deadline: %v", err)
  1202  	}
  1203  	if n, err := mconn.Write(clientPreface); err != nil || n != len(clientPreface) {
  1204  		t.Fatalf("mconn.Write(clientPreface) = %d, %v, want %d, <nil>", n, err, len(clientPreface))
  1205  	}
  1206  	// success chan indicates that reader received a RSTStream from server.
  1207  	success := make(chan struct{})
  1208  	var mu sync.Mutex
  1209  	framer := http2.NewFramer(mconn, mconn)
  1210  	if err := framer.WriteSettings(); err != nil {
  1211  		t.Fatalf("Error while writing settings: %v", err)
  1212  	}
  1213  	go func() { // Launch a reader for this misbehaving client.
  1214  		for {
  1215  			frame, err := framer.ReadFrame()
  1216  			if err != nil {
  1217  				return
  1218  			}
  1219  			switch frame := frame.(type) {
  1220  			case *http2.PingFrame:
  1221  				// Write ping ack back so that server's BDP estimation works right.
  1222  				mu.Lock()
  1223  				framer.WritePing(true, frame.Data)
  1224  				mu.Unlock()
  1225  			case *http2.RSTStreamFrame:
  1226  				if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeFlowControl {
  1227  					t.Errorf("RST stream received with streamID: %d and code: %v, want streamID: 1 and code: http2.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode))
  1228  				}
  1229  				close(success)
  1230  				return
  1231  			default:
  1232  				// Do nothing.
  1233  			}
  1234  
  1235  		}
  1236  	}()
  1237  	// Create a stream.
  1238  	var buf bytes.Buffer
  1239  	henc := hpack.NewEncoder(&buf)
  1240  	// TODO(mmukhi): Remove unnecessary fields.
  1241  	if err := henc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}); err != nil {
  1242  		t.Fatalf("Error while encoding header: %v", err)
  1243  	}
  1244  	if err := henc.WriteField(hpack.HeaderField{Name: ":path", Value: "foo"}); err != nil {
  1245  		t.Fatalf("Error while encoding header: %v", err)
  1246  	}
  1247  	if err := henc.WriteField(hpack.HeaderField{Name: ":authority", Value: "localhost"}); err != nil {
  1248  		t.Fatalf("Error while encoding header: %v", err)
  1249  	}
  1250  	if err := henc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}); err != nil {
  1251  		t.Fatalf("Error while encoding header: %v", err)
  1252  	}
  1253  	mu.Lock()
  1254  	if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil {
  1255  		mu.Unlock()
  1256  		t.Fatalf("Error while writing headers: %v", err)
  1257  	}
  1258  	mu.Unlock()
  1259  
  1260  	// Test server behavior for violation of stream flow control window size restriction.
  1261  	timer := time.NewTimer(time.Second * 5)
  1262  	dbuf := make([]byte, http2MaxFrameLen)
  1263  	for {
  1264  		select {
  1265  		case <-timer.C:
  1266  			t.Fatalf("Test timed out.")
  1267  		case <-success:
  1268  			return
  1269  		default:
  1270  		}
  1271  		mu.Lock()
  1272  		if err := framer.WriteData(1, false, dbuf); err != nil {
  1273  			mu.Unlock()
  1274  			// Error here means the server could have closed the connection due to flow control
  1275  			// violation. Make sure that is the case by waiting for success chan to be closed.
  1276  			select {
  1277  			case <-timer.C:
  1278  				t.Fatalf("Error while writing data: %v", err)
  1279  			case <-success:
  1280  				return
  1281  			}
  1282  		}
  1283  		mu.Unlock()
  1284  		// This for loop is capable of hogging the CPU and cause starvation
  1285  		// in Go versions prior to 1.9,
  1286  		// in single CPU environment. Explicitly relinquish processor.
  1287  		runtime.Gosched()
  1288  	}
  1289  }
  1290  
  1291  func (s) TestClientHonorsConnectContext(t *testing.T) {
  1292  	// Create a server that will not send a preface.
  1293  	lis, err := net.Listen("tcp", "localhost:0")
  1294  	if err != nil {
  1295  		t.Fatalf("Error while listening: %v", err)
  1296  	}
  1297  	defer lis.Close()
  1298  	go func() { // Launch the misbehaving server.
  1299  		sconn, err := lis.Accept()
  1300  		if err != nil {
  1301  			t.Errorf("Error while accepting: %v", err)
  1302  			return
  1303  		}
  1304  		defer sconn.Close()
  1305  		if _, err := io.ReadFull(sconn, make([]byte, len(clientPreface))); err != nil {
  1306  			t.Errorf("Error while reading client preface: %v", err)
  1307  			return
  1308  		}
  1309  		sfr := http2.NewFramer(sconn, sconn)
  1310  		// Do not write a settings frame, but read from the conn forever.
  1311  		for {
  1312  			if _, err := sfr.ReadFrame(); err != nil {
  1313  				return
  1314  			}
  1315  		}
  1316  	}()
  1317  
  1318  	// Test context cancelation.
  1319  	timeBefore := time.Now()
  1320  	connectCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1321  	time.AfterFunc(100*time.Millisecond, cancel)
  1322  
  1323  	copts := ConnectOptions{ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil)}
  1324  	_, err = NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {})
  1325  	if err == nil {
  1326  		t.Fatalf("NewClientTransport() returned successfully; wanted error")
  1327  	}
  1328  	t.Logf("NewClientTransport() = _, %v", err)
  1329  	if time.Since(timeBefore) > 3*time.Second {
  1330  		t.Fatalf("NewClientTransport returned > 2.9s after context cancelation")
  1331  	}
  1332  
  1333  	// Test context deadline.
  1334  	connectCtx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond)
  1335  	defer cancel()
  1336  	_, err = NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {})
  1337  	if err == nil {
  1338  		t.Fatalf("NewClientTransport() returned successfully; wanted error")
  1339  	}
  1340  	t.Logf("NewClientTransport() = _, %v", err)
  1341  }
  1342  
  1343  func (s) TestClientWithMisbehavedServer(t *testing.T) {
  1344  	// Create a misbehaving server.
  1345  	lis, err := net.Listen("tcp", "localhost:0")
  1346  	if err != nil {
  1347  		t.Fatalf("Error while listening: %v", err)
  1348  	}
  1349  	defer lis.Close()
  1350  	// success chan indicates that the server received
  1351  	// RSTStream from the client.
  1352  	success := make(chan struct{})
  1353  	go func() { // Launch the misbehaving server.
  1354  		sconn, err := lis.Accept()
  1355  		if err != nil {
  1356  			t.Errorf("Error while accepting: %v", err)
  1357  			return
  1358  		}
  1359  		defer sconn.Close()
  1360  		if _, err := io.ReadFull(sconn, make([]byte, len(clientPreface))); err != nil {
  1361  			t.Errorf("Error while reading client preface: %v", err)
  1362  			return
  1363  		}
  1364  		sfr := http2.NewFramer(sconn, sconn)
  1365  		if err := sfr.WriteSettings(); err != nil {
  1366  			t.Errorf("Error while writing settings: %v", err)
  1367  			return
  1368  		}
  1369  		if err := sfr.WriteSettingsAck(); err != nil {
  1370  			t.Errorf("Error while writing settings: %v", err)
  1371  			return
  1372  		}
  1373  		var mu sync.Mutex
  1374  		for {
  1375  			frame, err := sfr.ReadFrame()
  1376  			if err != nil {
  1377  				return
  1378  			}
  1379  			switch frame := frame.(type) {
  1380  			case *http2.HeadersFrame:
  1381  				// When the client creates a stream, violate the stream flow control.
  1382  				go func() {
  1383  					buf := make([]byte, http2MaxFrameLen)
  1384  					for {
  1385  						mu.Lock()
  1386  						if err := sfr.WriteData(1, false, buf); err != nil {
  1387  							mu.Unlock()
  1388  							return
  1389  						}
  1390  						mu.Unlock()
  1391  						// This for loop is capable of hogging the CPU and cause starvation
  1392  						// in Go versions prior to 1.9,
  1393  						// in single CPU environment. Explicitly relinquish processor.
  1394  						runtime.Gosched()
  1395  					}
  1396  				}()
  1397  			case *http2.RSTStreamFrame:
  1398  				if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeFlowControl {
  1399  					t.Errorf("RST stream received with streamID: %d and code: %v, want streamID: 1 and code: http2.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode))
  1400  				}
  1401  				close(success)
  1402  				return
  1403  			case *http2.PingFrame:
  1404  				mu.Lock()
  1405  				sfr.WritePing(true, frame.Data)
  1406  				mu.Unlock()
  1407  			default:
  1408  			}
  1409  		}
  1410  	}()
  1411  	connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second))
  1412  	defer cancel()
  1413  
  1414  	copts := ConnectOptions{ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil)}
  1415  	ct, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {})
  1416  	if err != nil {
  1417  		t.Fatalf("Error while creating client transport: %v", err)
  1418  	}
  1419  	defer ct.Close(fmt.Errorf("closed manually by test"))
  1420  
  1421  	str, err := ct.NewStream(connectCtx, &CallHdr{})
  1422  	if err != nil {
  1423  		t.Fatalf("Error while creating stream: %v", err)
  1424  	}
  1425  	timer := time.NewTimer(time.Second * 5)
  1426  	go func() { // This go routine mimics the one in stream.go to call CloseStream.
  1427  		<-str.Done()
  1428  		ct.CloseStream(str, nil)
  1429  	}()
  1430  	select {
  1431  	case <-timer.C:
  1432  		t.Fatalf("Test timed-out.")
  1433  	case <-success:
  1434  	}
  1435  }
  1436  
  1437  var encodingTestStatus = status.New(codes.Internal, "\n")
  1438  
  1439  func (s) TestEncodingRequiredStatus(t *testing.T) {
  1440  	server, ct, cancel := setUp(t, 0, encodingRequiredStatus)
  1441  	defer cancel()
  1442  	callHdr := &CallHdr{
  1443  		Host:   "localhost",
  1444  		Method: "foo",
  1445  	}
  1446  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1447  	defer cancel()
  1448  	s, err := ct.NewStream(ctx, callHdr)
  1449  	if err != nil {
  1450  		return
  1451  	}
  1452  	opts := Options{Last: true}
  1453  	if err := ct.Write(s, nil, expectedRequest, &opts); err != nil && err != errStreamDone {
  1454  		t.Fatalf("Failed to write the request: %v", err)
  1455  	}
  1456  	p := make([]byte, http2MaxFrameLen)
  1457  	if _, err := s.trReader.(*transportReader).Read(p); err != io.EOF {
  1458  		t.Fatalf("Read got error %v, want %v", err, io.EOF)
  1459  	}
  1460  	if !testutils.StatusErrEqual(s.Status().Err(), encodingTestStatus.Err()) {
  1461  		t.Fatalf("stream with status %v, want %v", s.Status(), encodingTestStatus)
  1462  	}
  1463  	ct.Close(fmt.Errorf("closed manually by test"))
  1464  	server.stop()
  1465  }
  1466  
  1467  func (s) TestInvalidHeaderField(t *testing.T) {
  1468  	server, ct, cancel := setUp(t, 0, invalidHeaderField)
  1469  	defer cancel()
  1470  	callHdr := &CallHdr{
  1471  		Host:   "localhost",
  1472  		Method: "foo",
  1473  	}
  1474  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1475  	defer cancel()
  1476  	s, err := ct.NewStream(ctx, callHdr)
  1477  	if err != nil {
  1478  		return
  1479  	}
  1480  	p := make([]byte, http2MaxFrameLen)
  1481  	_, err = s.trReader.(*transportReader).Read(p)
  1482  	if se, ok := status.FromError(err); !ok || se.Code() != codes.Internal || !strings.Contains(err.Error(), expectedInvalidHeaderField) {
  1483  		t.Fatalf("Read got error %v, want error with code %s and contains %q", err, codes.Internal, expectedInvalidHeaderField)
  1484  	}
  1485  	ct.Close(fmt.Errorf("closed manually by test"))
  1486  	server.stop()
  1487  }
  1488  
  1489  func (s) TestHeaderChanClosedAfterReceivingAnInvalidHeader(t *testing.T) {
  1490  	server, ct, cancel := setUp(t, 0, invalidHeaderField)
  1491  	defer cancel()
  1492  	defer server.stop()
  1493  	defer ct.Close(fmt.Errorf("closed manually by test"))
  1494  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1495  	defer cancel()
  1496  	s, err := ct.NewStream(ctx, &CallHdr{Host: "localhost", Method: "foo"})
  1497  	if err != nil {
  1498  		t.Fatalf("failed to create the stream")
  1499  	}
  1500  	timer := time.NewTimer(time.Second)
  1501  	defer timer.Stop()
  1502  	select {
  1503  	case <-s.headerChan:
  1504  	case <-timer.C:
  1505  		t.Errorf("s.headerChan: got open, want closed")
  1506  	}
  1507  }
  1508  
  1509  func (s) TestIsReservedHeader(t *testing.T) {
  1510  	tests := []struct {
  1511  		h    string
  1512  		want bool
  1513  	}{
  1514  		{"", false}, // but should be rejected earlier
  1515  		{"foo", false},
  1516  		{"content-type", true},
  1517  		{"user-agent", true},
  1518  		{":anything", true},
  1519  		{"grpc-message-type", true},
  1520  		{"grpc-encoding", true},
  1521  		{"grpc-message", true},
  1522  		{"grpc-status", true},
  1523  		{"grpc-timeout", true},
  1524  		{"te", true},
  1525  	}
  1526  	for _, tt := range tests {
  1527  		got := isReservedHeader(tt.h)
  1528  		if got != tt.want {
  1529  			t.Errorf("isReservedHeader(%q) = %v; want %v", tt.h, got, tt.want)
  1530  		}
  1531  	}
  1532  }
  1533  
  1534  func (s) TestContextErr(t *testing.T) {
  1535  	for _, test := range []struct {
  1536  		// input
  1537  		errIn error
  1538  		// outputs
  1539  		errOut error
  1540  	}{
  1541  		{context.DeadlineExceeded, status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())},
  1542  		{context.Canceled, status.Error(codes.Canceled, context.Canceled.Error())},
  1543  	} {
  1544  		err := ContextErr(test.errIn)
  1545  		if err.Error() != test.errOut.Error() {
  1546  			t.Fatalf("ContextErr{%v} = %v \nwant %v", test.errIn, err, test.errOut)
  1547  		}
  1548  	}
  1549  }
  1550  
  1551  type windowSizeConfig struct {
  1552  	serverStream int32
  1553  	serverConn   int32
  1554  	clientStream int32
  1555  	clientConn   int32
  1556  }
  1557  
  1558  func (s) TestAccountCheckWindowSizeWithLargeWindow(t *testing.T) {
  1559  	wc := windowSizeConfig{
  1560  		serverStream: 10 * 1024 * 1024,
  1561  		serverConn:   12 * 1024 * 1024,
  1562  		clientStream: 6 * 1024 * 1024,
  1563  		clientConn:   8 * 1024 * 1024,
  1564  	}
  1565  	testFlowControlAccountCheck(t, 1024*1024, wc)
  1566  }
  1567  
  1568  func (s) TestAccountCheckWindowSizeWithSmallWindow(t *testing.T) {
  1569  	// These settings disable dynamic window sizes based on BDP estimation;
  1570  	// must be at least defaultWindowSize or the setting is ignored.
  1571  	wc := windowSizeConfig{
  1572  		serverStream: defaultWindowSize,
  1573  		serverConn:   defaultWindowSize,
  1574  		clientStream: defaultWindowSize,
  1575  		clientConn:   defaultWindowSize,
  1576  	}
  1577  	testFlowControlAccountCheck(t, 1024*1024, wc)
  1578  }
  1579  
  1580  func (s) TestAccountCheckDynamicWindowSmallMessage(t *testing.T) {
  1581  	testFlowControlAccountCheck(t, 1024, windowSizeConfig{})
  1582  }
  1583  
  1584  func (s) TestAccountCheckDynamicWindowLargeMessage(t *testing.T) {
  1585  	testFlowControlAccountCheck(t, 1024*1024, windowSizeConfig{})
  1586  }
  1587  
  1588  func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) {
  1589  	sc := &ServerConfig{
  1590  		InitialWindowSize:     wc.serverStream,
  1591  		InitialConnWindowSize: wc.serverConn,
  1592  	}
  1593  	co := ConnectOptions{
  1594  		InitialWindowSize:     wc.clientStream,
  1595  		InitialConnWindowSize: wc.clientConn,
  1596  	}
  1597  	server, client, cancel := setUpWithOptions(t, 0, sc, pingpong, co)
  1598  	defer cancel()
  1599  	defer server.stop()
  1600  	defer client.Close(fmt.Errorf("closed manually by test"))
  1601  	waitWhileTrue(t, func() (bool, error) {
  1602  		server.mu.Lock()
  1603  		defer server.mu.Unlock()
  1604  		if len(server.conns) == 0 {
  1605  			return true, fmt.Errorf("timed out while waiting for server transport to be created")
  1606  		}
  1607  		return false, nil
  1608  	})
  1609  	var st *http2Server
  1610  	server.mu.Lock()
  1611  	for k := range server.conns {
  1612  		st = k.(*http2Server)
  1613  	}
  1614  	server.mu.Unlock()
  1615  
  1616  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1617  	defer cancel()
  1618  	const numStreams = 5
  1619  	clientStreams := make([]*Stream, numStreams)
  1620  	for i := 0; i < numStreams; i++ {
  1621  		var err error
  1622  		clientStreams[i], err = client.NewStream(ctx, &CallHdr{})
  1623  		if err != nil {
  1624  			t.Fatalf("Failed to create stream. Err: %v", err)
  1625  		}
  1626  	}
  1627  	var wg sync.WaitGroup
  1628  	// For each stream send pingpong messages to the server.
  1629  	for _, stream := range clientStreams {
  1630  		wg.Add(1)
  1631  		go func(stream *Stream) {
  1632  			defer wg.Done()
  1633  			buf := make([]byte, msgSize+5)
  1634  			buf[0] = byte(0)
  1635  			binary.BigEndian.PutUint32(buf[1:], uint32(msgSize))
  1636  			opts := Options{}
  1637  			header := make([]byte, 5)
  1638  			for i := 1; i <= 5; i++ {
  1639  				if err := client.Write(stream, nil, buf, &opts); err != nil {
  1640  					t.Errorf("Error on client while writing message %v on stream %v: %v", i, stream.id, err)
  1641  					return
  1642  				}
  1643  				if _, err := stream.Read(header); err != nil {
  1644  					t.Errorf("Error on client while reading data frame header %v on stream %v: %v", i, stream.id, err)
  1645  					return
  1646  				}
  1647  				sz := binary.BigEndian.Uint32(header[1:])
  1648  				recvMsg := make([]byte, int(sz))
  1649  				if _, err := stream.Read(recvMsg); err != nil {
  1650  					t.Errorf("Error on client while reading data %v on stream %v: %v", i, stream.id, err)
  1651  					return
  1652  				}
  1653  				if len(recvMsg) != msgSize {
  1654  					t.Errorf("Length of message %v received by client on stream %v: %v, want: %v", i, stream.id, len(recvMsg), msgSize)
  1655  					return
  1656  				}
  1657  			}
  1658  			t.Logf("stream %v done with pingpongs", stream.id)
  1659  		}(stream)
  1660  	}
  1661  	wg.Wait()
  1662  	serverStreams := map[uint32]*Stream{}
  1663  	loopyClientStreams := map[uint32]*outStream{}
  1664  	loopyServerStreams := map[uint32]*outStream{}
  1665  	// Get all the streams from server reader and writer and client writer.
  1666  	st.mu.Lock()
  1667  	client.mu.Lock()
  1668  	for _, stream := range clientStreams {
  1669  		id := stream.id
  1670  		serverStreams[id] = st.activeStreams[id]
  1671  		loopyServerStreams[id] = st.loopy.estdStreams[id]
  1672  		loopyClientStreams[id] = client.loopy.estdStreams[id]
  1673  
  1674  	}
  1675  	client.mu.Unlock()
  1676  	st.mu.Unlock()
  1677  	// Close all streams
  1678  	for _, stream := range clientStreams {
  1679  		client.Write(stream, nil, nil, &Options{Last: true})
  1680  		if _, err := stream.Read(make([]byte, 5)); err != io.EOF {
  1681  			t.Fatalf("Client expected an EOF from the server. Got: %v", err)
  1682  		}
  1683  	}
  1684  	// Close down both server and client so that their internals can be read without data
  1685  	// races.
  1686  	client.Close(errors.New("closed manually by test"))
  1687  	st.Close(errors.New("closed manually by test"))
  1688  	<-st.readerDone
  1689  	<-st.loopyWriterDone
  1690  	<-client.readerDone
  1691  	<-client.writerDone
  1692  	for _, cstream := range clientStreams {
  1693  		id := cstream.id
  1694  		sstream := serverStreams[id]
  1695  		loopyServerStream := loopyServerStreams[id]
  1696  		loopyClientStream := loopyClientStreams[id]
  1697  		if loopyServerStream == nil {
  1698  			t.Fatalf("Unexpected nil loopyServerStream")
  1699  		}
  1700  		// Check stream flow control.
  1701  		if int(cstream.fc.limit+cstream.fc.delta-cstream.fc.pendingData-cstream.fc.pendingUpdate) != int(st.loopy.oiws)-loopyServerStream.bytesOutStanding {
  1702  			t.Fatalf("Account mismatch: client stream inflow limit(%d) + delta(%d) - pendingData(%d) - pendingUpdate(%d) != server outgoing InitialWindowSize(%d) - outgoingStream.bytesOutStanding(%d)", cstream.fc.limit, cstream.fc.delta, cstream.fc.pendingData, cstream.fc.pendingUpdate, st.loopy.oiws, loopyServerStream.bytesOutStanding)
  1703  		}
  1704  		if int(sstream.fc.limit+sstream.fc.delta-sstream.fc.pendingData-sstream.fc.pendingUpdate) != int(client.loopy.oiws)-loopyClientStream.bytesOutStanding {
  1705  			t.Fatalf("Account mismatch: server stream inflow limit(%d) + delta(%d) - pendingData(%d) - pendingUpdate(%d) != client outgoing InitialWindowSize(%d) - outgoingStream.bytesOutStanding(%d)", sstream.fc.limit, sstream.fc.delta, sstream.fc.pendingData, sstream.fc.pendingUpdate, client.loopy.oiws, loopyClientStream.bytesOutStanding)
  1706  		}
  1707  	}
  1708  	// Check transport flow control.
  1709  	if client.fc.limit != client.fc.unacked+st.loopy.sendQuota {
  1710  		t.Fatalf("Account mismatch: client transport inflow(%d) != client unacked(%d) + server sendQuota(%d)", client.fc.limit, client.fc.unacked, st.loopy.sendQuota)
  1711  	}
  1712  	if st.fc.limit != st.fc.unacked+client.loopy.sendQuota {
  1713  		t.Fatalf("Account mismatch: server transport inflow(%d) != server unacked(%d) + client sendQuota(%d)", st.fc.limit, st.fc.unacked, client.loopy.sendQuota)
  1714  	}
  1715  }
  1716  
  1717  func waitWhileTrue(t *testing.T, condition func() (bool, error)) {
  1718  	var (
  1719  		wait bool
  1720  		err  error
  1721  	)
  1722  	timer := time.NewTimer(time.Second * 5)
  1723  	for {
  1724  		wait, err = condition()
  1725  		if wait {
  1726  			select {
  1727  			case <-timer.C:
  1728  				t.Fatalf(err.Error())
  1729  			default:
  1730  				time.Sleep(50 * time.Millisecond)
  1731  				continue
  1732  			}
  1733  		}
  1734  		if !timer.Stop() {
  1735  			<-timer.C
  1736  		}
  1737  		break
  1738  	}
  1739  }
  1740  
  1741  // If any error occurs on a call to Stream.Read, future calls
  1742  // should continue to return that same error.
  1743  func (s) TestReadGivesSameErrorAfterAnyErrorOccurs(t *testing.T) {
  1744  	testRecvBuffer := newRecvBuffer()
  1745  	s := &Stream{
  1746  		ctx:         context.Background(),
  1747  		buf:         testRecvBuffer,
  1748  		requestRead: func(int) {},
  1749  	}
  1750  	s.trReader = &transportReader{
  1751  		reader: &recvBufferReader{
  1752  			ctx:        s.ctx,
  1753  			ctxDone:    s.ctx.Done(),
  1754  			recv:       s.buf,
  1755  			freeBuffer: func(*bytes.Buffer) {},
  1756  		},
  1757  		windowHandler: func(int) {},
  1758  	}
  1759  	testData := make([]byte, 1)
  1760  	testData[0] = 5
  1761  	testBuffer := bytes.NewBuffer(testData)
  1762  	testErr := errors.New("test error")
  1763  	s.write(recvMsg{buffer: testBuffer, err: testErr})
  1764  
  1765  	inBuf := make([]byte, 1)
  1766  	actualCount, actualErr := s.Read(inBuf)
  1767  	if actualCount != 0 {
  1768  		t.Errorf("actualCount, _ := s.Read(_) differs; want 0; got %v", actualCount)
  1769  	}
  1770  	if actualErr.Error() != testErr.Error() {
  1771  		t.Errorf("_ , actualErr := s.Read(_) differs; want actualErr.Error() to be %v; got %v", testErr.Error(), actualErr.Error())
  1772  	}
  1773  
  1774  	s.write(recvMsg{buffer: testBuffer, err: nil})
  1775  	s.write(recvMsg{buffer: testBuffer, err: errors.New("different error from first")})
  1776  
  1777  	for i := 0; i < 2; i++ {
  1778  		inBuf := make([]byte, 1)
  1779  		actualCount, actualErr := s.Read(inBuf)
  1780  		if actualCount != 0 {
  1781  			t.Errorf("actualCount, _ := s.Read(_) differs; want %v; got %v", 0, actualCount)
  1782  		}
  1783  		if actualErr.Error() != testErr.Error() {
  1784  			t.Errorf("_ , actualErr := s.Read(_) differs; want actualErr.Error() to be %v; got %v", testErr.Error(), actualErr.Error())
  1785  		}
  1786  	}
  1787  }
  1788  
  1789  // TestHeadersCausingStreamError tests headers that should cause a stream protocol
  1790  // error, which would end up with a RST_STREAM being sent to the client and also
  1791  // the server closing the stream.
  1792  func (s) TestHeadersCausingStreamError(t *testing.T) {
  1793  	tests := []struct {
  1794  		name    string
  1795  		headers []struct {
  1796  			name   string
  1797  			values []string
  1798  		}
  1799  	}{
  1800  		// "Transports must consider requests containing the Connection header
  1801  		// as malformed" - A41 Malformed requests map to a stream error of type
  1802  		// PROTOCOL_ERROR.
  1803  		{
  1804  			name: "Connection header present",
  1805  			headers: []struct {
  1806  				name   string
  1807  				values []string
  1808  			}{
  1809  				{name: ":method", values: []string{"POST"}},
  1810  				{name: ":path", values: []string{"foo"}},
  1811  				{name: ":authority", values: []string{"localhost"}},
  1812  				{name: "content-type", values: []string{"application/grpc"}},
  1813  				{name: "connection", values: []string{"not-supported"}},
  1814  			},
  1815  		},
  1816  		// multiple :authority or multiple Host headers would make the eventual
  1817  		// :authority ambiguous as per A41. Since these headers won't have a
  1818  		// content-type that corresponds to a grpc-client, the server should
  1819  		// simply write a RST_STREAM to the wire.
  1820  		{
  1821  			// Note: multiple authority headers are handled by the framer
  1822  			// itself, which will cause a stream error. Thus, it will never get
  1823  			// to operateHeaders with the check in operateHeaders for stream
  1824  			// error, but the server transport will still send a stream error.
  1825  			name: "Multiple authority headers",
  1826  			headers: []struct {
  1827  				name   string
  1828  				values []string
  1829  			}{
  1830  				{name: ":method", values: []string{"POST"}},
  1831  				{name: ":path", values: []string{"foo"}},
  1832  				{name: ":authority", values: []string{"localhost", "localhost2"}},
  1833  				{name: "host", values: []string{"localhost"}},
  1834  			},
  1835  		},
  1836  	}
  1837  	for _, test := range tests {
  1838  		t.Run(test.name, func(t *testing.T) {
  1839  			server := setUpServerOnly(t, 0, &ServerConfig{}, suspended)
  1840  			defer server.stop()
  1841  			// Create a client directly to not tie what you can send to API of
  1842  			// http2_client.go (i.e. control headers being sent).
  1843  			mconn, err := net.Dial("tcp", server.lis.Addr().String())
  1844  			if err != nil {
  1845  				t.Fatalf("Client failed to dial: %v", err)
  1846  			}
  1847  			defer mconn.Close()
  1848  
  1849  			if n, err := mconn.Write(clientPreface); err != nil || n != len(clientPreface) {
  1850  				t.Fatalf("mconn.Write(clientPreface) = %d, %v, want %d, <nil>", n, err, len(clientPreface))
  1851  			}
  1852  
  1853  			framer := http2.NewFramer(mconn, mconn)
  1854  			if err := framer.WriteSettings(); err != nil {
  1855  				t.Fatalf("Error while writing settings: %v", err)
  1856  			}
  1857  
  1858  			// result chan indicates that reader received a RSTStream from server.
  1859  			// An error will be passed on it if any other frame is received.
  1860  			result := testutils.NewChannel()
  1861  
  1862  			// Launch a reader goroutine.
  1863  			go func() {
  1864  				for {
  1865  					frame, err := framer.ReadFrame()
  1866  					if err != nil {
  1867  						return
  1868  					}
  1869  					switch frame := frame.(type) {
  1870  					case *http2.SettingsFrame:
  1871  						// Do nothing. A settings frame is expected from server preface.
  1872  					case *http2.RSTStreamFrame:
  1873  						if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeProtocol {
  1874  							// Client only created a single stream, so RST Stream should be for that single stream.
  1875  							result.Send(fmt.Errorf("RST stream received with streamID: %d and code %v, want streamID: 1 and code: http.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode)))
  1876  						}
  1877  						// Records that client successfully received RST Stream frame.
  1878  						result.Send(nil)
  1879  						return
  1880  					default:
  1881  						// The server should send nothing but a single RST Stream frame.
  1882  						result.Send(errors.New("the client received a frame other than RST Stream"))
  1883  					}
  1884  				}
  1885  			}()
  1886  
  1887  			var buf bytes.Buffer
  1888  			henc := hpack.NewEncoder(&buf)
  1889  
  1890  			// Needs to build headers deterministically to conform to gRPC over
  1891  			// HTTP/2 spec.
  1892  			for _, header := range test.headers {
  1893  				for _, value := range header.values {
  1894  					if err := henc.WriteField(hpack.HeaderField{Name: header.name, Value: value}); err != nil {
  1895  						t.Fatalf("Error while encoding header: %v", err)
  1896  					}
  1897  				}
  1898  			}
  1899  
  1900  			if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil {
  1901  				t.Fatalf("Error while writing headers: %v", err)
  1902  			}
  1903  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1904  			defer cancel()
  1905  			r, err := result.Receive(ctx)
  1906  			if err != nil {
  1907  				t.Fatalf("Error receiving from channel: %v", err)
  1908  			}
  1909  			if r != nil {
  1910  				t.Fatalf("want nil, got %v", r)
  1911  			}
  1912  		})
  1913  	}
  1914  }
  1915  
  1916  // TestHeadersHTTPStatusGRPCStatus tests requests with certain headers get a
  1917  // certain HTTP and gRPC status back.
  1918  func (s) TestHeadersHTTPStatusGRPCStatus(t *testing.T) {
  1919  	tests := []struct {
  1920  		name    string
  1921  		headers []struct {
  1922  			name   string
  1923  			values []string
  1924  		}
  1925  		httpStatusWant  string
  1926  		grpcStatusWant  string
  1927  		grpcMessageWant string
  1928  	}{
  1929  		// Note: multiple authority headers are handled by the framer itself,
  1930  		// which will cause a stream error. Thus, it will never get to
  1931  		// operateHeaders with the check in operateHeaders for possible
  1932  		// grpc-status sent back.
  1933  
  1934  		// multiple :authority or multiple Host headers would make the eventual
  1935  		// :authority ambiguous as per A41. This takes precedence even over the
  1936  		// fact a request is non grpc. All of these requests should be rejected
  1937  		// with grpc-status Internal. Thus, requests with multiple hosts should
  1938  		// get rejected with HTTP Status 400 and gRPC status Internal,
  1939  		// regardless of whether the client is speaking gRPC or not.
  1940  		{
  1941  			name: "Multiple host headers non grpc",
  1942  			headers: []struct {
  1943  				name   string
  1944  				values []string
  1945  			}{
  1946  				{name: ":method", values: []string{"POST"}},
  1947  				{name: ":path", values: []string{"foo"}},
  1948  				{name: ":authority", values: []string{"localhost"}},
  1949  				{name: "host", values: []string{"localhost", "localhost2"}},
  1950  			},
  1951  			httpStatusWant:  "400",
  1952  			grpcStatusWant:  "13",
  1953  			grpcMessageWant: "both must only have 1 value as per HTTP/2 spec",
  1954  		},
  1955  		{
  1956  			name: "Multiple host headers grpc",
  1957  			headers: []struct {
  1958  				name   string
  1959  				values []string
  1960  			}{
  1961  				{name: ":method", values: []string{"POST"}},
  1962  				{name: ":path", values: []string{"foo"}},
  1963  				{name: ":authority", values: []string{"localhost"}},
  1964  				{name: "content-type", values: []string{"application/grpc"}},
  1965  				{name: "host", values: []string{"localhost", "localhost2"}},
  1966  			},
  1967  			httpStatusWant:  "400",
  1968  			grpcStatusWant:  "13",
  1969  			grpcMessageWant: "both must only have 1 value as per HTTP/2 spec",
  1970  		},
  1971  		// If the client sends an HTTP/2 request with a :method header with a
  1972  		// value other than POST, as specified in the gRPC over HTTP/2
  1973  		// specification, the server should fail the RPC.
  1974  		{
  1975  			name: "Client Sending Wrong Method",
  1976  			headers: []struct {
  1977  				name   string
  1978  				values []string
  1979  			}{
  1980  				{name: ":method", values: []string{"PUT"}},
  1981  				{name: ":path", values: []string{"foo"}},
  1982  				{name: ":authority", values: []string{"localhost"}},
  1983  				{name: "content-type", values: []string{"application/grpc"}},
  1984  			},
  1985  			httpStatusWant:  "405",
  1986  			grpcStatusWant:  "13",
  1987  			grpcMessageWant: "which should be POST",
  1988  		},
  1989  		{
  1990  			name: "Client Sending Wrong Content-Type",
  1991  			headers: []struct {
  1992  				name   string
  1993  				values []string
  1994  			}{
  1995  				{name: ":method", values: []string{"POST"}},
  1996  				{name: ":path", values: []string{"foo"}},
  1997  				{name: ":authority", values: []string{"localhost"}},
  1998  				{name: "content-type", values: []string{"application/json"}},
  1999  			},
  2000  			httpStatusWant:  "415",
  2001  			grpcStatusWant:  "3",
  2002  			grpcMessageWant: `invalid gRPC request content-type "application/json"`,
  2003  		},
  2004  		{
  2005  			name: "Client Sending Bad Timeout",
  2006  			headers: []struct {
  2007  				name   string
  2008  				values []string
  2009  			}{
  2010  				{name: ":method", values: []string{"POST"}},
  2011  				{name: ":path", values: []string{"foo"}},
  2012  				{name: ":authority", values: []string{"localhost"}},
  2013  				{name: "content-type", values: []string{"application/grpc"}},
  2014  				{name: "grpc-timeout", values: []string{"18f6n"}},
  2015  			},
  2016  			httpStatusWant:  "400",
  2017  			grpcStatusWant:  "13",
  2018  			grpcMessageWant: "malformed grpc-timeout",
  2019  		},
  2020  		{
  2021  			name: "Client Sending Bad Binary Header",
  2022  			headers: []struct {
  2023  				name   string
  2024  				values []string
  2025  			}{
  2026  				{name: ":method", values: []string{"POST"}},
  2027  				{name: ":path", values: []string{"foo"}},
  2028  				{name: ":authority", values: []string{"localhost"}},
  2029  				{name: "content-type", values: []string{"application/grpc"}},
  2030  				{name: "foobar-bin", values: []string{"X()3e@#$-"}},
  2031  			},
  2032  			httpStatusWant:  "400",
  2033  			grpcStatusWant:  "13",
  2034  			grpcMessageWant: `header "foobar-bin": illegal base64 data`,
  2035  		},
  2036  	}
  2037  	for _, test := range tests {
  2038  		t.Run(test.name, func(t *testing.T) {
  2039  			server := setUpServerOnly(t, 0, &ServerConfig{}, suspended)
  2040  			defer server.stop()
  2041  			// Create a client directly to not tie what you can send to API of
  2042  			// http2_client.go (i.e. control headers being sent).
  2043  			mconn, err := net.Dial("tcp", server.lis.Addr().String())
  2044  			if err != nil {
  2045  				t.Fatalf("Client failed to dial: %v", err)
  2046  			}
  2047  			defer mconn.Close()
  2048  
  2049  			if n, err := mconn.Write(clientPreface); err != nil || n != len(clientPreface) {
  2050  				t.Fatalf("mconn.Write(clientPreface) = %d, %v, want %d, <nil>", n, err, len(clientPreface))
  2051  			}
  2052  
  2053  			framer := http2.NewFramer(mconn, mconn)
  2054  			framer.ReadMetaHeaders = hpack.NewDecoder(4096, nil)
  2055  			if err := framer.WriteSettings(); err != nil {
  2056  				t.Fatalf("Error while writing settings: %v", err)
  2057  			}
  2058  
  2059  			// result chan indicates that reader received a Headers Frame with
  2060  			// desired grpc status and message from server. An error will be passed
  2061  			// on it if any other frame is received.
  2062  			result := testutils.NewChannel()
  2063  
  2064  			// Launch a reader goroutine.
  2065  			go func() {
  2066  				for {
  2067  					frame, err := framer.ReadFrame()
  2068  					if err != nil {
  2069  						return
  2070  					}
  2071  					switch frame := frame.(type) {
  2072  					case *http2.SettingsFrame:
  2073  						// Do nothing. A settings frame is expected from server preface.
  2074  					case *http2.MetaHeadersFrame:
  2075  						var httpStatus, grpcStatus, grpcMessage string
  2076  						for _, header := range frame.Fields {
  2077  							if header.Name == ":status" {
  2078  								httpStatus = header.Value
  2079  							}
  2080  							if header.Name == "grpc-status" {
  2081  								grpcStatus = header.Value
  2082  							}
  2083  							if header.Name == "grpc-message" {
  2084  								grpcMessage = header.Value
  2085  							}
  2086  						}
  2087  						if httpStatus != test.httpStatusWant {
  2088  							result.Send(fmt.Errorf("incorrect HTTP Status got %v, want %v", httpStatus, test.httpStatusWant))
  2089  							return
  2090  						}
  2091  						if grpcStatus != test.grpcStatusWant { // grpc status code internal
  2092  							result.Send(fmt.Errorf("incorrect gRPC Status got %v, want %v", grpcStatus, test.grpcStatusWant))
  2093  							return
  2094  						}
  2095  						if !strings.Contains(grpcMessage, test.grpcMessageWant) {
  2096  							result.Send(fmt.Errorf("incorrect gRPC message, want %q got %q", test.grpcMessageWant, grpcMessage))
  2097  							return
  2098  						}
  2099  
  2100  						// Records that client successfully received a HeadersFrame
  2101  						// with expected Trailers-Only response.
  2102  						result.Send(nil)
  2103  						return
  2104  					default:
  2105  						// The server should send nothing but a single Settings and Headers frame.
  2106  						result.Send(errors.New("the client received a frame other than Settings or Headers"))
  2107  					}
  2108  				}
  2109  			}()
  2110  
  2111  			var buf bytes.Buffer
  2112  			henc := hpack.NewEncoder(&buf)
  2113  
  2114  			// Needs to build headers deterministically to conform to gRPC over
  2115  			// HTTP/2 spec.
  2116  			for _, header := range test.headers {
  2117  				for _, value := range header.values {
  2118  					if err := henc.WriteField(hpack.HeaderField{Name: header.name, Value: value}); err != nil {
  2119  						t.Fatalf("Error while encoding header: %v", err)
  2120  					}
  2121  				}
  2122  			}
  2123  
  2124  			if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil {
  2125  				t.Fatalf("Error while writing headers: %v", err)
  2126  			}
  2127  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2128  			defer cancel()
  2129  			r, err := result.Receive(ctx)
  2130  			if err != nil {
  2131  				t.Fatalf("Error receiving from channel: %v", err)
  2132  			}
  2133  			if r != nil {
  2134  				t.Fatalf("want nil, got %v", r)
  2135  			}
  2136  		})
  2137  	}
  2138  }
  2139  
  2140  func (s) TestWriteHeaderConnectionError(t *testing.T) {
  2141  	server, client, cancel := setUp(t, 0, notifyCall)
  2142  	defer cancel()
  2143  	defer server.stop()
  2144  
  2145  	waitWhileTrue(t, func() (bool, error) {
  2146  		server.mu.Lock()
  2147  		defer server.mu.Unlock()
  2148  
  2149  		if len(server.conns) == 0 {
  2150  			return true, fmt.Errorf("timed-out while waiting for connection to be created on the server")
  2151  		}
  2152  		return false, nil
  2153  	})
  2154  
  2155  	server.mu.Lock()
  2156  
  2157  	if len(server.conns) != 1 {
  2158  		t.Fatalf("Server has %d connections from the client, want 1", len(server.conns))
  2159  	}
  2160  
  2161  	// Get the server transport for the connecton to the client.
  2162  	var serverTransport *http2Server
  2163  	for k := range server.conns {
  2164  		serverTransport = k.(*http2Server)
  2165  	}
  2166  	notifyChan := make(chan struct{})
  2167  	server.h.notify = notifyChan
  2168  	server.mu.Unlock()
  2169  
  2170  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2171  	defer cancel()
  2172  	cstream, err := client.NewStream(ctx, &CallHdr{})
  2173  	if err != nil {
  2174  		t.Fatalf("Client failed to create first stream. Err: %v", err)
  2175  	}
  2176  
  2177  	<-notifyChan // Wait for server stream to be established.
  2178  	var sstream *Stream
  2179  	// Access stream on the server.
  2180  	serverTransport.mu.Lock()
  2181  	for _, v := range serverTransport.activeStreams {
  2182  		if v.id == cstream.id {
  2183  			sstream = v
  2184  		}
  2185  	}
  2186  	serverTransport.mu.Unlock()
  2187  	if sstream == nil {
  2188  		t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream.id)
  2189  	}
  2190  
  2191  	client.Close(fmt.Errorf("closed manually by test"))
  2192  
  2193  	// Wait for server transport to be closed.
  2194  	<-serverTransport.done
  2195  
  2196  	// Write header on a closed server transport.
  2197  	err = serverTransport.WriteHeader(sstream, metadata.MD{})
  2198  	st := status.Convert(err)
  2199  	if st.Code() != codes.Unavailable {
  2200  		t.Fatalf("WriteHeader() failed with status code %s, want %s", st.Code(), codes.Unavailable)
  2201  	}
  2202  }
  2203  
  2204  func (s) TestPingPong1B(t *testing.T) {
  2205  	runPingPongTest(t, 1)
  2206  }
  2207  
  2208  func (s) TestPingPong1KB(t *testing.T) {
  2209  	runPingPongTest(t, 1024)
  2210  }
  2211  
  2212  func (s) TestPingPong64KB(t *testing.T) {
  2213  	runPingPongTest(t, 65536)
  2214  }
  2215  
  2216  func (s) TestPingPong1MB(t *testing.T) {
  2217  	runPingPongTest(t, 1048576)
  2218  }
  2219  
  2220  // This is a stress-test of flow control logic.
  2221  func runPingPongTest(t *testing.T, msgSize int) {
  2222  	server, client, cancel := setUp(t, 0, pingpong)
  2223  	defer cancel()
  2224  	defer server.stop()
  2225  	defer client.Close(fmt.Errorf("closed manually by test"))
  2226  	waitWhileTrue(t, func() (bool, error) {
  2227  		server.mu.Lock()
  2228  		defer server.mu.Unlock()
  2229  		if len(server.conns) == 0 {
  2230  			return true, fmt.Errorf("timed out while waiting for server transport to be created")
  2231  		}
  2232  		return false, nil
  2233  	})
  2234  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2235  	defer cancel()
  2236  	stream, err := client.NewStream(ctx, &CallHdr{})
  2237  	if err != nil {
  2238  		t.Fatalf("Failed to create stream. Err: %v", err)
  2239  	}
  2240  	msg := make([]byte, msgSize)
  2241  	outgoingHeader := make([]byte, 5)
  2242  	outgoingHeader[0] = byte(0)
  2243  	binary.BigEndian.PutUint32(outgoingHeader[1:], uint32(msgSize))
  2244  	opts := &Options{}
  2245  	incomingHeader := make([]byte, 5)
  2246  
  2247  	ctx, cancel = context.WithTimeout(ctx, time.Second)
  2248  	defer cancel()
  2249  	for ctx.Err() == nil {
  2250  		if err := client.Write(stream, outgoingHeader, msg, opts); err != nil {
  2251  			t.Fatalf("Error on client while writing message. Err: %v", err)
  2252  		}
  2253  		if _, err := stream.Read(incomingHeader); err != nil {
  2254  			t.Fatalf("Error on client while reading data header. Err: %v", err)
  2255  		}
  2256  		sz := binary.BigEndian.Uint32(incomingHeader[1:])
  2257  		recvMsg := make([]byte, int(sz))
  2258  		if _, err := stream.Read(recvMsg); err != nil {
  2259  			t.Fatalf("Error on client while reading data. Err: %v", err)
  2260  		}
  2261  	}
  2262  
  2263  	client.Write(stream, nil, nil, &Options{Last: true})
  2264  	if _, err := stream.Read(incomingHeader); err != io.EOF {
  2265  		t.Fatalf("Client expected EOF from the server. Got: %v", err)
  2266  	}
  2267  }
  2268  
  2269  type tableSizeLimit struct {
  2270  	mu     sync.Mutex
  2271  	limits []uint32
  2272  }
  2273  
  2274  func (t *tableSizeLimit) add(limit uint32) {
  2275  	t.mu.Lock()
  2276  	t.limits = append(t.limits, limit)
  2277  	t.mu.Unlock()
  2278  }
  2279  
  2280  func (t *tableSizeLimit) getLen() int {
  2281  	t.mu.Lock()
  2282  	defer t.mu.Unlock()
  2283  	return len(t.limits)
  2284  }
  2285  
  2286  func (t *tableSizeLimit) getIndex(i int) uint32 {
  2287  	t.mu.Lock()
  2288  	defer t.mu.Unlock()
  2289  	return t.limits[i]
  2290  }
  2291  
  2292  func (s) TestHeaderTblSize(t *testing.T) {
  2293  	limits := &tableSizeLimit{}
  2294  	updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
  2295  		e.SetMaxDynamicTableSizeLimit(v)
  2296  		limits.add(v)
  2297  	}
  2298  	defer func() {
  2299  		updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
  2300  			e.SetMaxDynamicTableSizeLimit(v)
  2301  		}
  2302  	}()
  2303  
  2304  	server, ct, cancel := setUp(t, 0, normal)
  2305  	defer cancel()
  2306  	defer ct.Close(fmt.Errorf("closed manually by test"))
  2307  	defer server.stop()
  2308  	ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  2309  	defer ctxCancel()
  2310  	_, err := ct.NewStream(ctx, &CallHdr{})
  2311  	if err != nil {
  2312  		t.Fatalf("failed to open stream: %v", err)
  2313  	}
  2314  
  2315  	var svrTransport ServerTransport
  2316  	var i int
  2317  	for i = 0; i < 1000; i++ {
  2318  		server.mu.Lock()
  2319  		if len(server.conns) != 0 {
  2320  			server.mu.Unlock()
  2321  			break
  2322  		}
  2323  		server.mu.Unlock()
  2324  		time.Sleep(10 * time.Millisecond)
  2325  		continue
  2326  	}
  2327  	if i == 1000 {
  2328  		t.Fatalf("unable to create any server transport after 10s")
  2329  	}
  2330  
  2331  	for st := range server.conns {
  2332  		svrTransport = st
  2333  		break
  2334  	}
  2335  	svrTransport.(*http2Server).controlBuf.put(&outgoingSettings{
  2336  		ss: []http2.Setting{
  2337  			{
  2338  				ID:  http2.SettingHeaderTableSize,
  2339  				Val: uint32(100),
  2340  			},
  2341  		},
  2342  	})
  2343  
  2344  	for i = 0; i < 1000; i++ {
  2345  		if limits.getLen() != 1 {
  2346  			time.Sleep(10 * time.Millisecond)
  2347  			continue
  2348  		}
  2349  		if val := limits.getIndex(0); val != uint32(100) {
  2350  			t.Fatalf("expected limits[0] = 100, got %d", val)
  2351  		}
  2352  		break
  2353  	}
  2354  	if i == 1000 {
  2355  		t.Fatalf("expected len(limits) = 1 within 10s, got != 1")
  2356  	}
  2357  
  2358  	ct.controlBuf.put(&outgoingSettings{
  2359  		ss: []http2.Setting{
  2360  			{
  2361  				ID:  http2.SettingHeaderTableSize,
  2362  				Val: uint32(200),
  2363  			},
  2364  		},
  2365  	})
  2366  
  2367  	for i := 0; i < 1000; i++ {
  2368  		if limits.getLen() != 2 {
  2369  			time.Sleep(10 * time.Millisecond)
  2370  			continue
  2371  		}
  2372  		if val := limits.getIndex(1); val != uint32(200) {
  2373  			t.Fatalf("expected limits[1] = 200, got %d", val)
  2374  		}
  2375  		break
  2376  	}
  2377  	if i == 1000 {
  2378  		t.Fatalf("expected len(limits) = 2 within 10s, got != 2")
  2379  	}
  2380  }
  2381  
  2382  // attrTransportCreds is a transport credential implementation which stores
  2383  // Attributes from the ClientHandshakeInfo struct passed in the context locally
  2384  // for the test to inspect.
  2385  type attrTransportCreds struct {
  2386  	credentials.TransportCredentials
  2387  	attr *attributes.Attributes
  2388  }
  2389  
  2390  func (ac *attrTransportCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
  2391  	ai := credentials.ClientHandshakeInfoFromContext(ctx)
  2392  	ac.attr = ai.Attributes
  2393  	return rawConn, nil, nil
  2394  }
  2395  func (ac *attrTransportCreds) Info() credentials.ProtocolInfo {
  2396  	return credentials.ProtocolInfo{}
  2397  }
  2398  func (ac *attrTransportCreds) Clone() credentials.TransportCredentials {
  2399  	return nil
  2400  }
  2401  
  2402  // TestClientHandshakeInfo adds attributes to the resolver.Address passes to
  2403  // NewClientTransport and verifies that these attributes are received by the
  2404  // transport credential handshaker.
  2405  func (s) TestClientHandshakeInfo(t *testing.T) {
  2406  	server := setUpServerOnly(t, 0, &ServerConfig{}, pingpong)
  2407  	defer server.stop()
  2408  
  2409  	const (
  2410  		testAttrKey = "foo"
  2411  		testAttrVal = "bar"
  2412  	)
  2413  	addr := resolver.Address{
  2414  		Addr:       "localhost:" + server.port,
  2415  		Attributes: attributes.New(testAttrKey, testAttrVal),
  2416  	}
  2417  	ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second))
  2418  	defer cancel()
  2419  	creds := &attrTransportCreds{}
  2420  
  2421  	copts := ConnectOptions{
  2422  		TransportCredentials: creds,
  2423  		ChannelzParentID:     channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil),
  2424  	}
  2425  	tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func(GoAwayReason) {})
  2426  	if err != nil {
  2427  		t.Fatalf("NewClientTransport(): %v", err)
  2428  	}
  2429  	defer tr.Close(fmt.Errorf("closed manually by test"))
  2430  
  2431  	wantAttr := attributes.New(testAttrKey, testAttrVal)
  2432  	if gotAttr := creds.attr; !cmp.Equal(gotAttr, wantAttr, cmp.AllowUnexported(attributes.Attributes{})) {
  2433  		t.Fatalf("received attributes %v in creds, want %v", gotAttr, wantAttr)
  2434  	}
  2435  }
  2436  
  2437  // TestClientHandshakeInfoDialer adds attributes to the resolver.Address passes to
  2438  // NewClientTransport and verifies that these attributes are received by a custom
  2439  // dialer.
  2440  func (s) TestClientHandshakeInfoDialer(t *testing.T) {
  2441  	server := setUpServerOnly(t, 0, &ServerConfig{}, pingpong)
  2442  	defer server.stop()
  2443  
  2444  	const (
  2445  		testAttrKey = "foo"
  2446  		testAttrVal = "bar"
  2447  	)
  2448  	addr := resolver.Address{
  2449  		Addr:       "localhost:" + server.port,
  2450  		Attributes: attributes.New(testAttrKey, testAttrVal),
  2451  	}
  2452  	ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second))
  2453  	defer cancel()
  2454  
  2455  	var attr *attributes.Attributes
  2456  	dialer := func(ctx context.Context, addr string) (net.Conn, error) {
  2457  		ai := credentials.ClientHandshakeInfoFromContext(ctx)
  2458  		attr = ai.Attributes
  2459  		return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
  2460  	}
  2461  
  2462  	copts := ConnectOptions{
  2463  		Dialer:           dialer,
  2464  		ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil),
  2465  	}
  2466  	tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func(GoAwayReason) {})
  2467  	if err != nil {
  2468  		t.Fatalf("NewClientTransport(): %v", err)
  2469  	}
  2470  	defer tr.Close(fmt.Errorf("closed manually by test"))
  2471  
  2472  	wantAttr := attributes.New(testAttrKey, testAttrVal)
  2473  	if gotAttr := attr; !cmp.Equal(gotAttr, wantAttr, cmp.AllowUnexported(attributes.Attributes{})) {
  2474  		t.Errorf("Received attributes %v in custom dialer, want %v", gotAttr, wantAttr)
  2475  	}
  2476  }
  2477  
  2478  func (s) TestClientDecodeHeaderStatusErr(t *testing.T) {
  2479  	testStream := func() *Stream {
  2480  		return &Stream{
  2481  			done:       make(chan struct{}),
  2482  			headerChan: make(chan struct{}),
  2483  			buf: &recvBuffer{
  2484  				c:  make(chan recvMsg),
  2485  				mu: sync.Mutex{},
  2486  			},
  2487  		}
  2488  	}
  2489  
  2490  	testClient := func(ts *Stream) *http2Client {
  2491  		return &http2Client{
  2492  			mu: sync.Mutex{},
  2493  			activeStreams: map[uint32]*Stream{
  2494  				0: ts,
  2495  			},
  2496  			controlBuf: &controlBuffer{
  2497  				ch:   make(chan struct{}),
  2498  				done: make(chan struct{}),
  2499  				list: &itemList{},
  2500  			},
  2501  		}
  2502  	}
  2503  
  2504  	for _, test := range []struct {
  2505  		name string
  2506  		// input
  2507  		metaHeaderFrame *http2.MetaHeadersFrame
  2508  		// output
  2509  		wantStatus *status.Status
  2510  	}{
  2511  		{
  2512  			name: "valid header",
  2513  			metaHeaderFrame: &http2.MetaHeadersFrame{
  2514  				Fields: []hpack.HeaderField{
  2515  					{Name: "content-type", Value: "application/grpc"},
  2516  					{Name: "grpc-status", Value: "0"},
  2517  					{Name: ":status", Value: "200"},
  2518  				},
  2519  			},
  2520  			// no error
  2521  			wantStatus: status.New(codes.OK, ""),
  2522  		},
  2523  		{
  2524  			name: "missing content-type header",
  2525  			metaHeaderFrame: &http2.MetaHeadersFrame{
  2526  				Fields: []hpack.HeaderField{
  2527  					{Name: "grpc-status", Value: "0"},
  2528  					{Name: ":status", Value: "200"},
  2529  				},
  2530  			},
  2531  			wantStatus: status.New(
  2532  				codes.Unknown,
  2533  				"malformed header: missing HTTP content-type",
  2534  			),
  2535  		},
  2536  		{
  2537  			name: "invalid grpc status header field",
  2538  			metaHeaderFrame: &http2.MetaHeadersFrame{
  2539  				Fields: []hpack.HeaderField{
  2540  					{Name: "content-type", Value: "application/grpc"},
  2541  					{Name: "grpc-status", Value: "xxxx"},
  2542  					{Name: ":status", Value: "200"},
  2543  				},
  2544  			},
  2545  			wantStatus: status.New(
  2546  				codes.Internal,
  2547  				"transport: malformed grpc-status: strconv.ParseInt: parsing \"xxxx\": invalid syntax",
  2548  			),
  2549  		},
  2550  		{
  2551  			name: "invalid http content type",
  2552  			metaHeaderFrame: &http2.MetaHeadersFrame{
  2553  				Fields: []hpack.HeaderField{
  2554  					{Name: "content-type", Value: "application/json"},
  2555  				},
  2556  			},
  2557  			wantStatus: status.New(
  2558  				codes.Internal,
  2559  				"malformed header: missing HTTP status; transport: received unexpected content-type \"application/json\"",
  2560  			),
  2561  		},
  2562  		{
  2563  			name: "http fallback and invalid http status",
  2564  			metaHeaderFrame: &http2.MetaHeadersFrame{
  2565  				Fields: []hpack.HeaderField{
  2566  					// No content type provided then fallback into handling http error.
  2567  					{Name: ":status", Value: "xxxx"},
  2568  				},
  2569  			},
  2570  			wantStatus: status.New(
  2571  				codes.Internal,
  2572  				"transport: malformed http-status: strconv.ParseInt: parsing \"xxxx\": invalid syntax",
  2573  			),
  2574  		},
  2575  		{
  2576  			name: "http2 frame size exceeds",
  2577  			metaHeaderFrame: &http2.MetaHeadersFrame{
  2578  				Fields:    nil,
  2579  				Truncated: true,
  2580  			},
  2581  			wantStatus: status.New(
  2582  				codes.Internal,
  2583  				"peer header list size exceeded limit",
  2584  			),
  2585  		},
  2586  		{
  2587  			name: "bad status in grpc mode",
  2588  			metaHeaderFrame: &http2.MetaHeadersFrame{
  2589  				Fields: []hpack.HeaderField{
  2590  					{Name: "content-type", Value: "application/grpc"},
  2591  					{Name: "grpc-status", Value: "0"},
  2592  					{Name: ":status", Value: "504"},
  2593  				},
  2594  			},
  2595  			wantStatus: status.New(
  2596  				codes.Unavailable,
  2597  				"unexpected HTTP status code received from server: 504 (Gateway Timeout)",
  2598  			),
  2599  		},
  2600  		{
  2601  			name: "missing http status",
  2602  			metaHeaderFrame: &http2.MetaHeadersFrame{
  2603  				Fields: []hpack.HeaderField{
  2604  					{Name: "content-type", Value: "application/grpc"},
  2605  				},
  2606  			},
  2607  			wantStatus: status.New(
  2608  				codes.Internal,
  2609  				"malformed header: missing HTTP status",
  2610  			),
  2611  		},
  2612  	} {
  2613  
  2614  		t.Run(test.name, func(t *testing.T) {
  2615  			ts := testStream()
  2616  			s := testClient(ts)
  2617  
  2618  			test.metaHeaderFrame.HeadersFrame = &http2.HeadersFrame{
  2619  				FrameHeader: http2.FrameHeader{
  2620  					StreamID: 0,
  2621  				},
  2622  			}
  2623  
  2624  			s.operateHeaders(test.metaHeaderFrame)
  2625  
  2626  			got := ts.status
  2627  			want := test.wantStatus
  2628  			if got.Code() != want.Code() || got.Message() != want.Message() {
  2629  				t.Fatalf("operateHeaders(%v); status = \ngot: %s\nwant: %s", test.metaHeaderFrame, got, want)
  2630  			}
  2631  		})
  2632  		t.Run(fmt.Sprintf("%s-end_stream", test.name), func(t *testing.T) {
  2633  			ts := testStream()
  2634  			s := testClient(ts)
  2635  
  2636  			test.metaHeaderFrame.HeadersFrame = &http2.HeadersFrame{
  2637  				FrameHeader: http2.FrameHeader{
  2638  					StreamID: 0,
  2639  					Flags:    http2.FlagHeadersEndStream,
  2640  				},
  2641  			}
  2642  
  2643  			s.operateHeaders(test.metaHeaderFrame)
  2644  
  2645  			got := ts.status
  2646  			want := test.wantStatus
  2647  			if got.Code() != want.Code() || got.Message() != want.Message() {
  2648  				t.Fatalf("operateHeaders(%v); status = \ngot: %s\nwant: %s", test.metaHeaderFrame, got, want)
  2649  			}
  2650  		})
  2651  	}
  2652  }
  2653  
  2654  func TestConnectionError_Unwrap(t *testing.T) {
  2655  	err := connectionErrorf(false, os.ErrNotExist, "unwrap me")
  2656  	if !errors.Is(err, os.ErrNotExist) {
  2657  		t.Error("ConnectionError does not unwrap")
  2658  	}
  2659  }