github.com/cloudwego/kitex@v0.9.0/pkg/remote/trans/nphttp2/grpc/transport_test.go (about)

     1  /*
     2   *
     3   * Copyright 2014 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   * This file may have been modified by CloudWeGo authors. All CloudWeGo
    18   * Modifications are Copyright 2021 CloudWeGo Authors.
    19   */
    20  
    21  package grpc
    22  
    23  import (
    24  	"bytes"
    25  	"context"
    26  	"crypto/tls"
    27  	"encoding/binary"
    28  	"errors"
    29  	"fmt"
    30  	"io"
    31  	"math"
    32  	"net"
    33  	"runtime"
    34  	"strconv"
    35  	"strings"
    36  	"sync"
    37  	"testing"
    38  	"time"
    39  
    40  	"github.com/cloudwego/netpoll"
    41  	"golang.org/x/net/http2"
    42  	"golang.org/x/net/http2/hpack"
    43  
    44  	"github.com/cloudwego/kitex/internal/test"
    45  	"github.com/cloudwego/kitex/pkg/remote/trans/nphttp2/codes"
    46  	"github.com/cloudwego/kitex/pkg/remote/trans/nphttp2/grpc/grpcframe"
    47  	"github.com/cloudwego/kitex/pkg/remote/trans/nphttp2/grpc/testutils"
    48  	"github.com/cloudwego/kitex/pkg/remote/trans/nphttp2/status"
    49  )
    50  
    51  type server struct {
    52  	lis        netpoll.Listener
    53  	eventLoop  netpoll.EventLoop
    54  	port       string
    55  	startedErr chan error // error (or nil) with server start value
    56  	mu         sync.Mutex
    57  	conns      map[ServerTransport]bool
    58  	h          *testStreamHandler
    59  	ready      chan struct{}
    60  }
    61  
    62  var (
    63  	expectedRequest            = []byte("ping")
    64  	expectedResponse           = []byte("pong")
    65  	expectedRequestLarge       = make([]byte, initialWindowSize*2)
    66  	expectedResponseLarge      = make([]byte, initialWindowSize*2)
    67  	expectedInvalidHeaderField = "invalid/content-type"
    68  )
    69  
    70  func init() {
    71  	expectedRequestLarge[0] = 'g'
    72  	expectedRequestLarge[len(expectedRequestLarge)-1] = 'r'
    73  	expectedResponseLarge[0] = 'p'
    74  	expectedResponseLarge[len(expectedResponseLarge)-1] = 'c'
    75  }
    76  
    77  type testStreamHandler struct {
    78  	t           *http2Server
    79  	notify      chan struct{}
    80  	getNotified chan struct{}
    81  }
    82  
    83  type hType int
    84  
    85  const (
    86  	normal hType = iota
    87  	suspended
    88  	notifyCall
    89  	misbehaved
    90  	encodingRequiredStatus
    91  	invalidHeaderField
    92  	delayRead
    93  	pingpong
    94  )
    95  
    96  func (h *testStreamHandler) handleStreamAndNotify(s *Stream) {
    97  	if h.notify == nil {
    98  		return
    99  	}
   100  	go func() {
   101  		select {
   102  		case <-h.notify:
   103  		default:
   104  			close(h.notify)
   105  		}
   106  	}()
   107  }
   108  
   109  func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) {
   110  	req := expectedRequest
   111  	resp := expectedResponse
   112  	if s.Method() == "foo.Large" {
   113  		req = expectedRequestLarge
   114  		resp = expectedResponseLarge
   115  	}
   116  	p := make([]byte, len(req))
   117  	_, err := s.Read(p)
   118  	if err != nil {
   119  		return
   120  	}
   121  	if !bytes.Equal(p, req) {
   122  		t.Errorf("handleStream got %v, want %v", p, req)
   123  		h.t.WriteStatus(s, status.New(codes.Internal, "panic"))
   124  		return
   125  	}
   126  	// send a response back to the client.
   127  	h.t.Write(s, nil, resp, &Options{})
   128  	// send the trailer to end the stream.
   129  	h.t.WriteStatus(s, status.New(codes.OK, ""))
   130  }
   131  
   132  func (h *testStreamHandler) handleStreamPingPong(t *testing.T, s *Stream) {
   133  	header := make([]byte, 5)
   134  	for {
   135  		if _, err := s.Read(header); err != nil {
   136  			if err == io.EOF {
   137  				h.t.WriteStatus(s, status.New(codes.OK, ""))
   138  				return
   139  			}
   140  			t.Errorf("Error on server while reading data header: %v", err)
   141  			h.t.WriteStatus(s, status.New(codes.Internal, "panic"))
   142  			return
   143  		}
   144  		sz := binary.BigEndian.Uint32(header[1:])
   145  		msg := make([]byte, int(sz))
   146  		if _, err := s.Read(msg); err != nil {
   147  			t.Errorf("Error on server while reading message: %v", err)
   148  			h.t.WriteStatus(s, status.New(codes.Internal, "panic"))
   149  			return
   150  		}
   151  		buf := make([]byte, sz+5)
   152  		buf[0] = byte(0)
   153  		binary.BigEndian.PutUint32(buf[1:], uint32(sz))
   154  		copy(buf[5:], msg)
   155  		h.t.Write(s, nil, buf, &Options{})
   156  	}
   157  }
   158  
   159  func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) {
   160  	conn, ok := s.st.(*http2Server)
   161  	if !ok {
   162  		t.Errorf("Failed to convert %v to *http2Server", s.st)
   163  		h.t.WriteStatus(s, status.New(codes.Internal, ""))
   164  		return
   165  	}
   166  	var sent int
   167  	p := make([]byte, http2MaxFrameLen)
   168  	for sent < int(initialWindowSize) {
   169  		n := int(initialWindowSize) - sent
   170  		// The last message may be smaller than http2MaxFrameLen
   171  		if n <= http2MaxFrameLen {
   172  			if s.Method() == "foo.Connection" {
   173  				// Violate connection level flow control window of client but do not
   174  				// violate any stream level windows.
   175  				p = make([]byte, n)
   176  			} else {
   177  				// Violate stream level flow control window of client.
   178  				p = make([]byte, n+1)
   179  			}
   180  		}
   181  		conn.controlBuf.put(&dataFrame{
   182  			streamID:    s.id,
   183  			h:           nil,
   184  			d:           p,
   185  			onEachWrite: func() {},
   186  		})
   187  		sent += len(p)
   188  	}
   189  }
   190  
   191  func (h *testStreamHandler) handleStreamEncodingRequiredStatus(t *testing.T, s *Stream) {
   192  	// raw newline is not accepted by http2 framer so it must be encoded.
   193  	h.t.WriteStatus(s, encodingTestStatus)
   194  }
   195  
   196  func (h *testStreamHandler) handleStreamInvalidHeaderField(t *testing.T, s *Stream) {
   197  	headerFields := []hpack.HeaderField{}
   198  	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: expectedInvalidHeaderField})
   199  	h.t.controlBuf.put(&headerFrame{
   200  		streamID:  s.id,
   201  		hf:        headerFields,
   202  		endStream: false,
   203  	})
   204  }
   205  
   206  // handleStreamDelayRead delays reads so that the other side has to halt on
   207  // stream-level flow control.
   208  // This handler assumes dynamic flow control is turned off and assumes window
   209  // sizes to be set to defaultWindowSize.
   210  func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) {
   211  	req := expectedRequest
   212  	resp := expectedResponse
   213  	if s.Method() == "foo.Large" {
   214  		req = expectedRequestLarge
   215  		resp = expectedResponseLarge
   216  	}
   217  	var (
   218  		mu    sync.Mutex
   219  		total int
   220  	)
   221  	s.wq.replenish = func(n int) {
   222  		mu.Lock()
   223  		total += n
   224  		mu.Unlock()
   225  		s.wq.realReplenish(n)
   226  	}
   227  	getTotal := func() int {
   228  		mu.Lock()
   229  		defer mu.Unlock()
   230  		return total
   231  	}
   232  	done := make(chan struct{})
   233  	defer close(done)
   234  	go func() {
   235  		for {
   236  			select {
   237  			// Prevent goroutine from leaking.
   238  			case <-done:
   239  				return
   240  			default:
   241  			}
   242  			if getTotal() == int(defaultWindowSize) {
   243  				// Signal the client to start reading and
   244  				// thereby send window update.
   245  				close(h.notify)
   246  				return
   247  			}
   248  			runtime.Gosched()
   249  		}
   250  	}()
   251  	p := make([]byte, len(req))
   252  
   253  	// Let the other side run out of stream-level window before
   254  	// starting to read and thereby sending a window update.
   255  	timer := time.NewTimer(time.Second * 10)
   256  	select {
   257  	case <-h.getNotified:
   258  		timer.Stop()
   259  	case <-timer.C:
   260  		t.Errorf("Server timed-out.")
   261  		return
   262  	}
   263  	_, err := s.Read(p)
   264  	if err != nil {
   265  		t.Errorf("s.Read(_) = _, %v, want _, <nil>", err)
   266  		return
   267  	}
   268  
   269  	if !bytes.Equal(p, req) {
   270  		t.Errorf("handleStream got %v, want %v", p, req)
   271  		return
   272  	}
   273  	// This write will cause server to run out of stream level,
   274  	// flow control and the other side won't send a window update
   275  	// until that happens.
   276  	if err := h.t.Write(s, nil, resp, &Options{}); err != nil {
   277  		t.Errorf("server write got %v, want <nil>", err)
   278  		return
   279  	}
   280  	// Read one more time to ensure that everything remains fine and
   281  	// that the goroutine, that we launched earlier to signal client
   282  	// to read, gets enough time to process.
   283  	_, err = s.Read(p)
   284  	if err != nil {
   285  		t.Errorf("s.Read(_) = _, %v, want _, nil", err)
   286  		return
   287  	}
   288  	// send the trailer to end the stream.
   289  	if err := h.t.WriteStatus(s, status.New(codes.OK, "")); err != nil {
   290  		t.Errorf("server WriteStatus got %v, want <nil>", err)
   291  		return
   292  	}
   293  }
   294  
   295  // start starts server. Other goroutines should block on s.readyChan for further operations.
   296  func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hType) {
   297  	// 创建 listener
   298  	var err error
   299  	if port == 0 {
   300  		s.lis, err = netpoll.CreateListener("tcp", "localhost:0")
   301  	} else {
   302  		s.lis, err = netpoll.CreateListener("tcp", "localhost:"+strconv.Itoa(port))
   303  	}
   304  	if err != nil {
   305  		s.startedErr <- fmt.Errorf("failed to create netpoll listener: %v", err)
   306  		return
   307  	}
   308  	_, p, err := net.SplitHostPort(s.lis.Addr().String())
   309  	if err != nil {
   310  		s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err)
   311  		return
   312  	}
   313  	s.port = p
   314  	s.conns = make(map[ServerTransport]bool)
   315  	s.startedErr <- nil
   316  
   317  	// handle: 连接读数据和处理逻辑
   318  	var onConnect netpoll.OnConnect = func(ctx context.Context, connection netpoll.Connection) context.Context {
   319  		transport, err := NewServerTransport(context.Background(), connection, serverConfig)
   320  		if err != nil {
   321  			panic(fmt.Sprintf("NewServerTransport err: %s", err.Error()))
   322  		}
   323  		s.mu.Lock()
   324  		if s.conns == nil {
   325  			s.mu.Unlock()
   326  			transport.Close()
   327  			return ctx
   328  		}
   329  		s.conns[transport] = true
   330  		h := &testStreamHandler{t: transport.(*http2Server)}
   331  		s.h = h
   332  		s.mu.Unlock()
   333  		switch ht {
   334  		case notifyCall:
   335  			go transport.HandleStreams(func(stream *Stream) {
   336  				s.mu.Lock()
   337  				h.handleStreamAndNotify(stream)
   338  				s.mu.Unlock()
   339  			}, func(ctx context.Context, _ string) context.Context {
   340  				return ctx
   341  			})
   342  		case suspended:
   343  			go transport.HandleStreams(func(*Stream) {}, // Do nothing to handle the stream.
   344  				func(ctx context.Context, method string) context.Context {
   345  					return ctx
   346  				})
   347  		case misbehaved:
   348  			go transport.HandleStreams(func(s *Stream) {
   349  				go h.handleStreamMisbehave(t, s)
   350  			}, func(ctx context.Context, method string) context.Context {
   351  				return ctx
   352  			})
   353  		case encodingRequiredStatus:
   354  			go transport.HandleStreams(func(s *Stream) {
   355  				go h.handleStreamEncodingRequiredStatus(t, s)
   356  			}, func(ctx context.Context, method string) context.Context {
   357  				return ctx
   358  			})
   359  		case invalidHeaderField:
   360  			go transport.HandleStreams(func(s *Stream) {
   361  				go h.handleStreamInvalidHeaderField(t, s)
   362  			}, func(ctx context.Context, method string) context.Context {
   363  				return ctx
   364  			})
   365  		case delayRead:
   366  			h.notify = make(chan struct{})
   367  			h.getNotified = make(chan struct{})
   368  			s.mu.Lock()
   369  			close(s.ready)
   370  			s.mu.Unlock()
   371  			go transport.HandleStreams(func(s *Stream) {
   372  				go h.handleStreamDelayRead(t, s)
   373  			}, func(ctx context.Context, method string) context.Context {
   374  				return ctx
   375  			})
   376  		case pingpong:
   377  			go transport.HandleStreams(func(s *Stream) {
   378  				go h.handleStreamPingPong(t, s)
   379  			}, func(ctx context.Context, method string) context.Context {
   380  				return ctx
   381  			})
   382  		default:
   383  			go transport.HandleStreams(func(s *Stream) {
   384  				go h.handleStream(t, s)
   385  			}, func(ctx context.Context, method string) context.Context {
   386  				return ctx
   387  			})
   388  		}
   389  		return ctx
   390  	}
   391  
   392  	// options: EventLoop 初始化自定义配置项
   393  	opts := []netpoll.Option{
   394  		netpoll.WithIdleTimeout(10 * time.Minute),
   395  		netpoll.WithOnConnect(onConnect),
   396  	}
   397  
   398  	// 创建 EventLoop
   399  	s.eventLoop, err = netpoll.NewEventLoop(nil, opts...)
   400  	if err != nil {
   401  		panic("create netpoll event-loop fail")
   402  	}
   403  
   404  	// 运行 Server
   405  	err = s.eventLoop.Serve(s.lis)
   406  	if err != nil {
   407  		t.Logf("netpoll server Serve failed, err=%v", err)
   408  	}
   409  }
   410  
   411  func (s *server) wait(t *testing.T, timeout time.Duration) {
   412  	select {
   413  	case err := <-s.startedErr:
   414  		if err != nil {
   415  			t.Fatal(err)
   416  		}
   417  	case <-time.After(timeout):
   418  		t.Fatalf("Timed out after %v waiting for server to be ready", timeout)
   419  	}
   420  }
   421  
   422  func (s *server) stop() {
   423  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   424  	defer cancel()
   425  	if err := s.eventLoop.Shutdown(ctx); err != nil {
   426  		fmt.Printf("netpoll server exit failed, err=%v", err)
   427  	}
   428  	s.lis.Close()
   429  	s.mu.Lock()
   430  	for c := range s.conns {
   431  		c.Close()
   432  	}
   433  	s.conns = nil
   434  	s.mu.Unlock()
   435  }
   436  
   437  func (s *server) addr() string {
   438  	if s.lis == nil {
   439  		return ""
   440  	}
   441  	return s.lis.Addr().String()
   442  }
   443  
   444  func setUpServerOnly(t *testing.T, port int, serverConfig *ServerConfig, ht hType) *server {
   445  	server := &server{startedErr: make(chan error, 1), ready: make(chan struct{})}
   446  	go server.start(t, port, serverConfig, ht)
   447  	server.wait(t, time.Second)
   448  	return server
   449  }
   450  
   451  func setUp(t *testing.T, port int, maxStreams uint32, ht hType) (*server, *http2Client) {
   452  	return setUpWithOptions(t, port, &ServerConfig{MaxStreams: maxStreams}, ht, ConnectOptions{})
   453  }
   454  
   455  func setUpWithOptions(t *testing.T, port int, serverConfig *ServerConfig, ht hType, copts ConnectOptions) (*server, *http2Client) {
   456  	server := setUpServerOnly(t, port, serverConfig, ht)
   457  	conn, err := netpoll.NewDialer().DialTimeout("tcp", "localhost:"+server.port, time.Second)
   458  	if err != nil {
   459  		t.Fatalf("failed to dial connection: %v", err)
   460  	}
   461  	ct, connErr := NewClientTransport(context.Background(), conn.(netpoll.Connection), copts, "", func(GoAwayReason) {}, func() {})
   462  	if connErr != nil {
   463  		t.Fatalf("failed to create transport: %v", connErr)
   464  	}
   465  	return server, ct.(*http2Client)
   466  }
   467  
   468  func setUpWithNoPingServer(t *testing.T, copts ConnectOptions, connCh chan net.Conn) *http2Client {
   469  	lis, err := net.Listen("tcp", "localhost:0")
   470  	if err != nil {
   471  		fmt.Printf("Failed to listen: %v", err)
   472  		return nil
   473  	}
   474  	go func() {
   475  		exitCh := make(chan struct{}, 1)
   476  		// Launch a non responsive server.
   477  		eventLoop, err := netpoll.NewEventLoop(func(ctx context.Context, connection netpoll.Connection) error {
   478  			defer lis.Close()
   479  			connCh <- connection.(net.Conn)
   480  			exitCh <- struct{}{}
   481  			return nil
   482  		})
   483  		if err != nil {
   484  			fmt.Printf("Create netpoll event-loop failed")
   485  		}
   486  
   487  		go func() {
   488  			err = eventLoop.Serve(lis)
   489  			if err != nil {
   490  				fmt.Printf("netpoll server exit failed, err=%v", err)
   491  			}
   492  		}()
   493  
   494  		select {
   495  		case <-exitCh:
   496  			ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   497  			defer cancel()
   498  			if err := eventLoop.Shutdown(ctx); err != nil {
   499  				fmt.Printf("netpoll server exit failed, err=%v", err)
   500  			}
   501  		default:
   502  		}
   503  	}()
   504  
   505  	conn, err := netpoll.NewDialer().DialTimeout("tcp", lis.Addr().String(), time.Second)
   506  	if err != nil {
   507  		fmt.Printf("Failed to dial: %v", err)
   508  	}
   509  	tr, err := NewClientTransport(context.Background(), conn.(netpoll.Connection), copts, "mockDestService", func(GoAwayReason) {}, func() {})
   510  	if err != nil {
   511  		// Server clean-up.
   512  		lis.Close()
   513  		if conn, ok := <-connCh; ok {
   514  			conn.Close()
   515  		}
   516  		fmt.Printf("Failed to dial: %v", err)
   517  	}
   518  	return tr.(*http2Client)
   519  }
   520  
   521  // TestInflightStreamClosing ensures that closing in-flight stream
   522  // sends status error to concurrent stream reader.
   523  func TestInflightStreamClosing(t *testing.T) {
   524  	serverConfig := &ServerConfig{}
   525  	server, client := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
   526  	defer server.stop()
   527  	defer client.Close()
   528  
   529  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   530  	defer cancel()
   531  	stream, err := client.NewStream(ctx, &CallHdr{})
   532  	if err != nil {
   533  		t.Fatalf("Client failed to create RPC request: %v", err)
   534  	}
   535  
   536  	donec := make(chan struct{})
   537  	// serr := &status.Error{e: s.Proto()}
   538  	serr := status.Err(codes.Internal, "client connection is closing")
   539  	go func() {
   540  		defer close(donec)
   541  		if _, err := stream.Read(make([]byte, defaultWindowSize)); err != serr {
   542  			t.Errorf("unexpected Stream error %v, expected %v", err, serr)
   543  		}
   544  	}()
   545  
   546  	// should unblock concurrent stream.Read
   547  	client.CloseStream(stream, serr)
   548  
   549  	// wait for stream.Read error
   550  	timeout := time.NewTimer(3 * time.Second)
   551  	select {
   552  	case <-donec:
   553  		if !timeout.Stop() {
   554  			<-timeout.C
   555  		}
   556  	case <-timeout.C:
   557  		t.Fatalf("Test timed out, expected a status error.")
   558  	}
   559  }
   560  
   561  func TestClientSendAndReceive(t *testing.T) {
   562  	server, ct := setUp(t, 0, math.MaxUint32, normal)
   563  	callHdr := &CallHdr{
   564  		Host:   "localhost",
   565  		Method: "foo.Small",
   566  	}
   567  	ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   568  	defer ctxCancel()
   569  	s1, err1 := ct.NewStream(ctx, callHdr)
   570  	if err1 != nil {
   571  		t.Fatalf("failed to open stream: %v", err1)
   572  	}
   573  	if s1.id != 1 {
   574  		t.Fatalf("wrong stream id: %d", s1.id)
   575  	}
   576  	s2, err2 := ct.NewStream(ctx, callHdr)
   577  	if err2 != nil {
   578  		t.Fatalf("failed to open stream: %v", err2)
   579  	}
   580  	if s2.id != 3 {
   581  		t.Fatalf("wrong stream id: %d", s2.id)
   582  	}
   583  	opts := Options{Last: true}
   584  	if err := ct.Write(s1, nil, expectedRequest, &opts); err != nil && err != io.EOF {
   585  		t.Fatalf("failed to send data: %v", err)
   586  	}
   587  	p := make([]byte, len(expectedResponse))
   588  	_, recvErr := s1.Read(p)
   589  	if recvErr != nil || !bytes.Equal(p, expectedResponse) {
   590  		t.Fatalf("Error: %v, want <nil>; Result: %v, want %v", recvErr, p, expectedResponse)
   591  	}
   592  	_, recvErr = s1.Read(p)
   593  	if recvErr != io.EOF {
   594  		t.Fatalf("Error: %v; want <EOF>", recvErr)
   595  	}
   596  	ct.Close()
   597  	server.stop()
   598  }
   599  
   600  func TestClientErrorNotify(t *testing.T) {
   601  	server, ct := setUp(t, 0, math.MaxUint32, normal)
   602  	go func() {
   603  		if server != nil {
   604  			server.stop()
   605  		}
   606  	}()
   607  	// ct.reader should detect the error and activate ct.Error().
   608  	<-ct.Error()
   609  	ct.Close()
   610  }
   611  
   612  func performOneRPC(ct ClientTransport) {
   613  	callHdr := &CallHdr{
   614  		Host:   "localhost",
   615  		Method: "foo.Small",
   616  	}
   617  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   618  	defer cancel()
   619  	s, err := ct.NewStream(ctx, callHdr)
   620  	if err != nil {
   621  		return
   622  	}
   623  	opts := Options{Last: true}
   624  	if err := ct.Write(s, []byte{}, expectedRequest, &opts); err == nil || err == io.EOF {
   625  		time.Sleep(2 * time.Millisecond)
   626  		// The following s.Recv()'s could error out because the
   627  		// underlying transport is gone.
   628  		//
   629  		// Read response
   630  		p := make([]byte, len(expectedResponse))
   631  		s.Read(p)
   632  		// Read io.EOF
   633  		s.Read(p)
   634  	}
   635  }
   636  
   637  func TestClientMix(t *testing.T) {
   638  	s, ct := setUp(t, 0, math.MaxUint32, normal)
   639  	go func(s *server) {
   640  		time.Sleep(300 * time.Millisecond)
   641  		s.stop()
   642  	}(s)
   643  	go func(ct ClientTransport) {
   644  		<-ct.Error()
   645  		ct.Close()
   646  	}(ct)
   647  	for i := 0; i < 1000; i++ {
   648  		time.Sleep(1 * time.Millisecond)
   649  		go performOneRPC(ct)
   650  	}
   651  }
   652  
   653  func TestLargeMessage(t *testing.T) {
   654  	server, ct := setUp(t, 0, math.MaxUint32, normal)
   655  	callHdr := &CallHdr{
   656  		Host:   "localhost",
   657  		Method: "foo.Large",
   658  	}
   659  	ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   660  	defer ctxCancel()
   661  	var wg sync.WaitGroup
   662  	for i := 0; i < 2; i++ {
   663  		wg.Add(1)
   664  		go func() {
   665  			defer wg.Done()
   666  			s, err := ct.NewStream(ctx, callHdr)
   667  			if err != nil {
   668  				t.Errorf("%v.NewStream(_, _) = _, %v, want _, <nil>", ct, err)
   669  			}
   670  			if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true}); err != nil && err != io.EOF {
   671  				t.Errorf("%v.write(_, _, _) = %v, want  <nil>", ct, err)
   672  			}
   673  			p := make([]byte, len(expectedResponseLarge))
   674  			if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) {
   675  				t.Errorf("s.Read(%v) = _, %v, want %v, <nil>", err, p, expectedResponse)
   676  			}
   677  			if _, err = s.Read(p); err != io.EOF {
   678  				t.Errorf("Failed to complete the stream %v; want <EOF>", err)
   679  			}
   680  		}()
   681  	}
   682  	wg.Wait()
   683  	ct.Close()
   684  	server.stop()
   685  }
   686  
   687  func TestLargeMessageWithDelayRead(t *testing.T) {
   688  	// Disable dynamic flow control.
   689  	sc := &ServerConfig{
   690  		InitialWindowSize:     defaultWindowSize,
   691  		InitialConnWindowSize: defaultWindowSize,
   692  	}
   693  	co := ConnectOptions{
   694  		InitialWindowSize:     defaultWindowSize,
   695  		InitialConnWindowSize: defaultWindowSize,
   696  	}
   697  	server, ct := setUpWithOptions(t, 0, sc, delayRead, co)
   698  	defer server.stop()
   699  	defer ct.Close()
   700  	server.mu.Lock()
   701  	ready := server.ready
   702  	server.mu.Unlock()
   703  	callHdr := &CallHdr{
   704  		Host:   "localhost",
   705  		Method: "foo.Large",
   706  	}
   707  	ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
   708  	defer cancel()
   709  	s, err := ct.NewStream(ctx, callHdr)
   710  	if err != nil {
   711  		t.Fatalf("%v.NewStream(_, _) = _, %v, want _, <nil>", ct, err)
   712  		return
   713  	}
   714  	// Wait for server's handerler to be initialized
   715  	select {
   716  	case <-ready:
   717  	case <-ctx.Done():
   718  		t.Fatalf("Client timed out waiting for server handler to be initialized.")
   719  	}
   720  	server.mu.Lock()
   721  	serviceHandler := server.h
   722  	server.mu.Unlock()
   723  	var (
   724  		mu    sync.Mutex
   725  		total int
   726  	)
   727  	s.wq.replenish = func(n int) {
   728  		mu.Lock()
   729  		total += n
   730  		mu.Unlock()
   731  		s.wq.realReplenish(n)
   732  	}
   733  	getTotal := func() int {
   734  		mu.Lock()
   735  		defer mu.Unlock()
   736  		return total
   737  	}
   738  	done := make(chan struct{})
   739  	defer close(done)
   740  	go func() {
   741  		for {
   742  			select {
   743  			// Prevent goroutine from leaking in case of error.
   744  			case <-done:
   745  				return
   746  			default:
   747  			}
   748  			if getTotal() == int(defaultWindowSize) {
   749  				// unblock server to be able to read and
   750  				// thereby send stream level window update.
   751  				close(serviceHandler.getNotified)
   752  				return
   753  			}
   754  			runtime.Gosched()
   755  		}
   756  	}()
   757  	// This write will cause client to run out of stream level,
   758  	// flow control and the other side won't send a window update
   759  	// until that happens.
   760  	if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{}); err != nil {
   761  		t.Fatalf("write(_, _, _) = %v, want  <nil>", err)
   762  	}
   763  	p := make([]byte, len(expectedResponseLarge))
   764  
   765  	// Wait for the other side to run out of stream level flow control before
   766  	// reading and thereby sending a window update.
   767  	select {
   768  	case <-serviceHandler.notify:
   769  	case <-ctx.Done():
   770  		t.Fatalf("Client timed out")
   771  	}
   772  	if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) {
   773  		t.Fatalf("s.Read(_) = _, %v, want _, <nil>", err)
   774  	}
   775  	if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true}); err != nil {
   776  		t.Fatalf("write(_, _, _) = %v, want <nil>", err)
   777  	}
   778  	if _, err = s.Read(p); err != io.EOF {
   779  		t.Fatalf("Failed to complete the stream %v; want <EOF>", err)
   780  	}
   781  }
   782  
   783  // FIXME Test failed because goroutine leak.
   784  //func TestGracefulClose(t *testing.T) {
   785  //	server, ct := setUp(t, 0, math.MaxUint32, pingpong)
   786  //	defer func() {
   787  //		// Stop the server's listener to make the server's goroutines terminate
   788  //		// (after the last active stream is done).
   789  //		server.lis.Close()
   790  //		// Check for goroutine leaks (i.e. GracefulClose with an active stream
   791  //		// doesn't eventually close the connection when that stream completes).
   792  //		leakcheck.Check(t)
   793  //		// Correctly clean up the server
   794  //		server.stop()
   795  //	}()
   796  //	ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
   797  //	defer cancel()
   798  //	s, err := ct.NewStream(ctx, &CallHdr{})
   799  //	if err != nil {
   800  //		t.Fatalf("NewStream(_, _) = _, %v, want _, <nil>", err)
   801  //	}
   802  //	msg := make([]byte, 1024)
   803  //	outgoingHeader := make([]byte, 5)
   804  //	outgoingHeader[0] = byte(0)
   805  //	binary.BigEndian.PutUint32(outgoingHeader[1:], uint32(len(msg)))
   806  //	incomingHeader := make([]byte, 5)
   807  //	if err := ct.write(s, outgoingHeader, msg, &Options{}); err != nil {
   808  //		t.Fatalf("Error while writing: %v", err)
   809  //	}
   810  //	if _, err := s.Read(incomingHeader); err != nil {
   811  //		t.Fatalf("Error while reading: %v", err)
   812  //	}
   813  //	sz := binary.BigEndian.Uint32(incomingHeader[1:])
   814  //	recvMsg := make([]byte, int(sz))
   815  //	if _, err := s.Read(recvMsg); err != nil {
   816  //		t.Fatalf("Error while reading: %v", err)
   817  //	}
   818  //	ct.GracefulClose()
   819  //	var wg sync.WaitGroup
   820  //	// Expect the failure for all the follow-up streams because ct has been closed gracefully.
   821  //	for i := 0; i < 200; i++ {
   822  //		wg.Add(1)
   823  //		go func() {
   824  //			defer wg.Done()
   825  //			str, err := ct.NewStream(ctx, &CallHdr{})
   826  //			if err == ErrConnClosing {
   827  //				return
   828  //			} else if err != nil {
   829  //				t.Errorf("_.NewStream(_, _) = _, %v, want _, %v", err, ErrConnClosing)
   830  //				return
   831  //			}
   832  //			ct.write(str, nil, nil, &Options{Last: true})
   833  //			if _, err := str.Read(make([]byte, 8)); err != errStreamDrain && err != ErrConnClosing {
   834  //				t.Errorf("_.Read(_) = _, %v, want _, %v or %v", err, errStreamDrain, ErrConnClosing)
   835  //			}
   836  //		}()
   837  //	}
   838  //	ct.write(s, nil, nil, &Options{Last: true})
   839  //	if _, err := s.Read(incomingHeader); err != io.EOF {
   840  //		t.Fatalf("Client expected EOF from the server. Got: %v", err)
   841  //	}
   842  //	// The stream which was created before graceful close can still proceed.
   843  //	wg.Wait()
   844  //}
   845  
   846  func TestLargeMessageSuspension(t *testing.T) {
   847  	server, ct := setUp(t, 0, math.MaxUint32, suspended)
   848  	callHdr := &CallHdr{
   849  		Host:   "localhost",
   850  		Method: "foo.Large",
   851  	}
   852  	// Set a long enough timeout for writing a large message out.
   853  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   854  	defer cancel()
   855  	s, err := ct.NewStream(ctx, callHdr)
   856  	if err != nil {
   857  		t.Fatalf("failed to open stream: %v", err)
   858  	}
   859  	// Launch a goroutine simillar to the stream monitoring goroutine in
   860  	// stream.go to keep track of context timeout and call CloseStream.
   861  	go func() {
   862  		<-ctx.Done()
   863  		ct.CloseStream(s, ContextErr(ctx.Err()))
   864  	}()
   865  	// write should not be done successfully due to flow control.
   866  	msg := make([]byte, initialWindowSize*8)
   867  	ct.Write(s, nil, msg, &Options{})
   868  	err = ct.Write(s, nil, msg, &Options{Last: true})
   869  	if err != errStreamDone {
   870  		t.Fatalf("write got %v, want io.EOF", err)
   871  	}
   872  	expectedErr := status.Err(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
   873  	if _, err := s.Read(make([]byte, 8)); err.Error() != expectedErr.Error() {
   874  		t.Fatalf("Read got %v of type %T, want %v", err, err, expectedErr)
   875  	}
   876  	ct.Close()
   877  	server.stop()
   878  }
   879  
   880  func TestMaxStreams(t *testing.T) {
   881  	serverConfig := &ServerConfig{
   882  		MaxStreams: 1,
   883  	}
   884  	server, ct := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
   885  	defer ct.Close()
   886  	defer server.stop()
   887  	callHdr := &CallHdr{
   888  		Host:   "localhost",
   889  		Method: "foo.Large",
   890  	}
   891  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   892  	defer cancel()
   893  	s, err := ct.NewStream(ctx, callHdr)
   894  	if err != nil {
   895  		t.Fatalf("Failed to open stream: %v", err)
   896  	}
   897  	// Keep creating streams until one fails with deadline exceeded, marking the application
   898  	// of server settings on client.
   899  	slist := []*Stream{}
   900  	pctx, cancel := context.WithCancel(context.Background())
   901  	defer cancel()
   902  	timer := time.NewTimer(time.Second * 10)
   903  	expectedErr := status.Err(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
   904  	for {
   905  		select {
   906  		case <-timer.C:
   907  			t.Fatalf("Test timeout: client didn't receive server settings.")
   908  		default:
   909  		}
   910  		ctx, cancel := context.WithDeadline(pctx, time.Now().Add(time.Second))
   911  		// This is only to get rid of govet. All these context are based on a base
   912  		// context which is canceled at the end of the test.
   913  		defer cancel()
   914  		if str, err := ct.NewStream(ctx, callHdr); err == nil {
   915  			slist = append(slist, str)
   916  			continue
   917  		} else if err.Error() != expectedErr.Error() {
   918  			t.Fatalf("ct.NewStream(_,_) = _, %v, want _, %v", err, expectedErr)
   919  		}
   920  		timer.Stop()
   921  		break
   922  	}
   923  	done := make(chan struct{})
   924  	// Try and create a new stream.
   925  	go func() {
   926  		defer close(done)
   927  		ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
   928  		defer cancel()
   929  		if _, err := ct.NewStream(ctx, callHdr); err != nil {
   930  			t.Errorf("Failed to open stream: %v", err)
   931  		}
   932  	}()
   933  	// Close all the extra streams created and make sure the new stream is not created.
   934  	for _, str := range slist {
   935  		ct.CloseStream(str, nil)
   936  	}
   937  	select {
   938  	case <-done:
   939  		t.Fatalf("Test failed: didn't expect new stream to be created just yet.")
   940  	default:
   941  	}
   942  	// Close the first stream created so that the new stream can finally be created.
   943  	ct.CloseStream(s, nil)
   944  	<-done
   945  	ct.Close()
   946  	<-ct.writerDone
   947  	if ct.maxConcurrentStreams != 1 {
   948  		t.Fatalf("ct.maxConcurrentStreams: %d, want 1", ct.maxConcurrentStreams)
   949  	}
   950  }
   951  
   952  func TestServerContextCanceledOnClosedConnection(t *testing.T) {
   953  	server, ct := setUp(t, 0, math.MaxUint32, suspended)
   954  	callHdr := &CallHdr{
   955  		Host:   "localhost",
   956  		Method: "foo",
   957  	}
   958  	var sc *http2Server
   959  	// Wait until the server transport is setup.
   960  	for {
   961  		server.mu.Lock()
   962  		if len(server.conns) == 0 {
   963  			server.mu.Unlock()
   964  			time.Sleep(time.Millisecond)
   965  			continue
   966  		}
   967  		for k := range server.conns {
   968  			var ok bool
   969  			sc, ok = k.(*http2Server)
   970  			if !ok {
   971  				t.Fatalf("Failed to convert %v to *http2Server", k)
   972  			}
   973  		}
   974  		server.mu.Unlock()
   975  		break
   976  	}
   977  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   978  	defer cancel()
   979  	s, err := ct.NewStream(ctx, callHdr)
   980  	if err != nil {
   981  		t.Fatalf("Failed to open stream: %v", err)
   982  	}
   983  	ct.controlBuf.put(&dataFrame{
   984  		streamID:    s.id,
   985  		endStream:   false,
   986  		h:           nil,
   987  		d:           make([]byte, http2MaxFrameLen),
   988  		onEachWrite: func() {},
   989  	})
   990  	// Loop until the server side stream is created.
   991  	var ss *Stream
   992  	for {
   993  		time.Sleep(time.Second)
   994  		sc.mu.Lock()
   995  		if len(sc.activeStreams) == 0 {
   996  			sc.mu.Unlock()
   997  			continue
   998  		}
   999  		ss = sc.activeStreams[s.id]
  1000  		sc.mu.Unlock()
  1001  		break
  1002  	}
  1003  	ct.Close()
  1004  	select {
  1005  	case <-ss.Context().Done():
  1006  		if ss.Context().Err() != context.Canceled {
  1007  			t.Fatalf("ss.Context().Err() got %v, want %v", ss.Context().Err(), context.Canceled)
  1008  		}
  1009  	case <-time.After(3 * time.Second):
  1010  		t.Fatalf("Failed to cancel the context of the sever side stream.")
  1011  	}
  1012  	server.stop()
  1013  }
  1014  
  1015  // FIXME delete the comments
  1016  func TestClientConnDecoupledFromApplicationRead(t *testing.T) {
  1017  	connectOptions := ConnectOptions{
  1018  		InitialWindowSize:     defaultWindowSize,
  1019  		InitialConnWindowSize: defaultWindowSize,
  1020  	}
  1021  	server, client := setUpWithOptions(t, 0, &ServerConfig{}, notifyCall, connectOptions)
  1022  	defer server.stop()
  1023  	defer client.Close()
  1024  
  1025  	waitWhileTrue(t, func() (bool, error) {
  1026  		server.mu.Lock()
  1027  		defer server.mu.Unlock()
  1028  
  1029  		if len(server.conns) == 0 {
  1030  			return true, fmt.Errorf("timed-out while waiting for connection to be created on the server")
  1031  		}
  1032  		return false, nil
  1033  	})
  1034  
  1035  	var st *http2Server
  1036  	server.mu.Lock()
  1037  	for k := range server.conns {
  1038  		st = k.(*http2Server)
  1039  	}
  1040  	notifyChan := make(chan struct{})
  1041  	server.h.notify = notifyChan
  1042  	server.mu.Unlock()
  1043  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1044  	defer cancel()
  1045  	cstream1, err := client.NewStream(ctx, &CallHdr{})
  1046  	if err != nil {
  1047  		t.Fatalf("Client failed to create first stream. Err: %v", err)
  1048  	}
  1049  
  1050  	<-notifyChan
  1051  	var sstream1 *Stream
  1052  	// Access stream on the server.
  1053  	st.mu.Lock()
  1054  	for _, v := range st.activeStreams {
  1055  		if v.id == cstream1.id {
  1056  			sstream1 = v
  1057  		}
  1058  	}
  1059  	st.mu.Unlock()
  1060  	if sstream1 == nil {
  1061  		t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream1.id)
  1062  	}
  1063  	// Exhaust client's connection window.
  1064  	if err := st.Write(sstream1, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil {
  1065  		t.Fatalf("Server failed to write data. Err: %v", err)
  1066  	}
  1067  	notifyChan = make(chan struct{})
  1068  	server.mu.Lock()
  1069  	server.h.notify = notifyChan
  1070  	server.mu.Unlock()
  1071  	// Create another stream on client.
  1072  	cstream2, err := client.NewStream(ctx, &CallHdr{})
  1073  	if err != nil {
  1074  		t.Fatalf("Client failed to create second stream. Err: %v", err)
  1075  	}
  1076  	<-notifyChan
  1077  	var sstream2 *Stream
  1078  	st.mu.Lock()
  1079  	for _, v := range st.activeStreams {
  1080  		if v.id == cstream2.id {
  1081  			sstream2 = v
  1082  		}
  1083  	}
  1084  	st.mu.Unlock()
  1085  	if sstream2 == nil {
  1086  		t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream2.id)
  1087  	}
  1088  	// Server should be able to send data on the new stream, even though the client hasn't read anything on the first stream.
  1089  	if err := st.Write(sstream2, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil {
  1090  		t.Fatalf("Server failed to write data. Err: %v", err)
  1091  	}
  1092  
  1093  	// Client should be able to read data on second stream.
  1094  	if _, err := cstream2.Read(make([]byte, defaultWindowSize)); err != nil {
  1095  		t.Fatalf("_.Read(_) = _, %v, want _, <nil>", err)
  1096  	}
  1097  
  1098  	// Client should be able to read data on first stream.
  1099  	if _, err := cstream1.Read(make([]byte, defaultWindowSize)); err != nil {
  1100  		t.Fatalf("_.Read(_) = _, %v, want _, <nil>", err)
  1101  	}
  1102  }
  1103  
  1104  func TestServerConnDecoupledFromApplicationRead(t *testing.T) {
  1105  	serverConfig := &ServerConfig{
  1106  		InitialWindowSize:     defaultWindowSize,
  1107  		InitialConnWindowSize: defaultWindowSize,
  1108  	}
  1109  	server, client := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{})
  1110  	defer server.stop()
  1111  	defer client.Close()
  1112  	waitWhileTrue(t, func() (bool, error) {
  1113  		server.mu.Lock()
  1114  		defer server.mu.Unlock()
  1115  
  1116  		if len(server.conns) == 0 {
  1117  			return true, fmt.Errorf("timed-out while waiting for connection to be created on the server")
  1118  		}
  1119  		return false, nil
  1120  	})
  1121  	var st *http2Server
  1122  	server.mu.Lock()
  1123  	for k := range server.conns {
  1124  		st = k.(*http2Server)
  1125  	}
  1126  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1127  	defer cancel()
  1128  	server.mu.Unlock()
  1129  	cstream1, err := client.NewStream(ctx, &CallHdr{})
  1130  	if err != nil {
  1131  		t.Fatalf("Failed to create 1st stream. Err: %v", err)
  1132  	}
  1133  	// Exhaust server's connection window.
  1134  	if err := client.Write(cstream1, nil, make([]byte, defaultWindowSize), &Options{Last: true}); err != nil {
  1135  		t.Fatalf("Client failed to write data. Err: %v", err)
  1136  	}
  1137  	// Client should be able to create another stream and send data on it.
  1138  	cstream2, err := client.NewStream(ctx, &CallHdr{})
  1139  	if err != nil {
  1140  		t.Fatalf("Failed to create 2nd stream. Err: %v", err)
  1141  	}
  1142  	if err := client.Write(cstream2, nil, make([]byte, defaultWindowSize), &Options{}); err != nil {
  1143  		t.Fatalf("Client failed to write data. Err: %v", err)
  1144  	}
  1145  	// Get the streams on server.
  1146  	waitWhileTrue(t, func() (bool, error) {
  1147  		st.mu.Lock()
  1148  		defer st.mu.Unlock()
  1149  
  1150  		if len(st.activeStreams) != 2 {
  1151  			return true, fmt.Errorf("timed-out while waiting for server to have created the streams")
  1152  		}
  1153  		return false, nil
  1154  	})
  1155  	var sstream1 *Stream
  1156  	st.mu.Lock()
  1157  	for _, v := range st.activeStreams {
  1158  		if v.id == 1 {
  1159  			sstream1 = v
  1160  		}
  1161  	}
  1162  	st.mu.Unlock()
  1163  	// Reading from the stream on server should succeed.
  1164  	if _, err := sstream1.Read(make([]byte, defaultWindowSize)); err != nil {
  1165  		t.Fatalf("_.Read(_) = %v, want <nil>", err)
  1166  	}
  1167  
  1168  	if _, err := sstream1.Read(make([]byte, 1)); err != io.EOF {
  1169  		t.Fatalf("_.Read(_) = %v, want io.EOF", err)
  1170  	}
  1171  }
  1172  
  1173  func TestServerWithMisbehavedClient(t *testing.T) {
  1174  	var wg sync.WaitGroup
  1175  	server := setUpServerOnly(t, 0, &ServerConfig{}, suspended)
  1176  	defer func() {
  1177  		wg.Wait()
  1178  		server.stop()
  1179  	}()
  1180  	defer server.stop()
  1181  	// Create a client that can override server stream quota.
  1182  	mconn, err := netpoll.NewDialer().DialTimeout("tcp", server.lis.Addr().String(), time.Second)
  1183  	if err != nil {
  1184  		t.Fatalf("Clent failed to dial:%v", err)
  1185  	}
  1186  	defer mconn.Close()
  1187  	if err := mconn.(netpoll.Connection).SetIdleTimeout(10 * time.Second); err != nil {
  1188  		t.Fatalf("Failed to set write deadline: %v", err)
  1189  	}
  1190  	if n, err := mconn.Write(ClientPreface); err != nil || n != len(ClientPreface) {
  1191  		t.Fatalf("mconn.write(ClientPreface ) = %d, %v, want %d, <nil>", n, err, len(ClientPreface))
  1192  	}
  1193  	// success chan indicates that reader received a RSTStream from server.
  1194  	success := make(chan struct{})
  1195  	var mu sync.Mutex
  1196  	framer := grpcframe.NewFramer(mconn, mconn.(netpoll.Connection).Reader())
  1197  	if err := framer.WriteSettings(); err != nil {
  1198  		t.Fatalf("Error while writing settings: %v", err)
  1199  	}
  1200  	wg.Add(1)
  1201  	go func() { // Launch a reader for this misbehaving client.
  1202  		defer wg.Done()
  1203  		for {
  1204  			frame, err := framer.ReadFrame()
  1205  			if err != nil {
  1206  				return
  1207  			}
  1208  			switch frame := frame.(type) {
  1209  			case *http2.PingFrame:
  1210  				// write ping ack back so that server's BDP estimation works right.
  1211  				mu.Lock()
  1212  				framer.WritePing(true, frame.Data)
  1213  				mu.Unlock()
  1214  			case *http2.RSTStreamFrame:
  1215  				if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeFlowControl {
  1216  					t.Errorf("RST stream received with streamID: %d and code: %v, want streamID: 1 and code: http2.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode))
  1217  				}
  1218  				close(success)
  1219  				return
  1220  			default:
  1221  				// Do nothing.
  1222  			}
  1223  
  1224  		}
  1225  	}()
  1226  	// Create a stream.
  1227  	var buf bytes.Buffer
  1228  	henc := hpack.NewEncoder(&buf)
  1229  	// TODO(mmukhi): Remove unnecessary fields.
  1230  	if err := henc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}); err != nil {
  1231  		t.Fatalf("Error while encoding header: %v", err)
  1232  	}
  1233  	if err := henc.WriteField(hpack.HeaderField{Name: ":path", Value: "foo"}); err != nil {
  1234  		t.Fatalf("Error while encoding header: %v", err)
  1235  	}
  1236  	if err := henc.WriteField(hpack.HeaderField{Name: ":authority", Value: "localhost"}); err != nil {
  1237  		t.Fatalf("Error while encoding header: %v", err)
  1238  	}
  1239  	if err := henc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}); err != nil {
  1240  		t.Fatalf("Error while encoding header: %v", err)
  1241  	}
  1242  	mu.Lock()
  1243  	if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil {
  1244  		mu.Unlock()
  1245  		t.Fatalf("Error while writing headers: %v", err)
  1246  	}
  1247  	mu.Unlock()
  1248  
  1249  	// Test server behavior for violation of stream flow control window size restriction.
  1250  	timer := time.NewTimer(time.Second * 5)
  1251  	dbuf := make([]byte, http2MaxFrameLen)
  1252  	for {
  1253  		select {
  1254  		case <-timer.C:
  1255  			t.Fatalf("Test timed out.")
  1256  		case <-success:
  1257  			return
  1258  		default:
  1259  		}
  1260  		mu.Lock()
  1261  		if err := framer.WriteData(1, false, dbuf); err != nil {
  1262  			mu.Unlock()
  1263  			// Error here means the server could have closed the connection due to flow control
  1264  			// violation. Make sure that is the case by waiting for success chan to be closed.
  1265  			select {
  1266  			case <-timer.C:
  1267  				t.Fatalf("Error while writing data: %v", err)
  1268  			case <-success:
  1269  				return
  1270  			}
  1271  		}
  1272  		mu.Unlock()
  1273  		// This for loop is capable of hogging the CPU and cause starvation
  1274  		// in Go versions prior to 1.9,
  1275  		// in single CPU environment. Explicitly relinquish processor.
  1276  		runtime.Gosched()
  1277  	}
  1278  }
  1279  
  1280  // FIXME Test failed, hang up.
  1281  //func TestClientWithMisbehavedServer(t *testing.T) {
  1282  //	// Create a misbehaving server.
  1283  //	lis, err := netpoll.CreateListener("tcp", "localhost:0")
  1284  //	if err != nil {
  1285  //		t.Fatalf("Error while listening: %v", err)
  1286  //	}
  1287  //	defer lis.Close()
  1288  //	// success chan indicates that the server received
  1289  //	// RSTStream from the client.
  1290  //	success := make(chan struct{})
  1291  //
  1292  //	exitCh := make(chan struct{}, 1)
  1293  //	eventLoop, _ := netpoll.NewEventLoop(func(ctx context.Context, connection netpoll.Connection) error {
  1294  //		defer func() {
  1295  //			exitCh <- struct{}{}
  1296  //		}()
  1297  //		defer connection.Close()
  1298  //		if _, err := io.ReadFull(connection, make([]byte, len(ClientPreface))); err != nil {
  1299  //			t.Errorf("Error while reading clieng preface: %v", err)
  1300  //			return err
  1301  //		}
  1302  //		sfr := http2.NewFramer(connection, connection)
  1303  //		if err := sfr.WriteSettingsAck(); err != nil {
  1304  //			t.Errorf("Error while writing settings: %v", err)
  1305  //			return err
  1306  //		}
  1307  //		var mu sync.Mutex
  1308  //		for {
  1309  //			frame, err := sfr.ReadFrame()
  1310  //			if err != nil {
  1311  //				return err
  1312  //			}
  1313  //			switch frame := frame.(type) {
  1314  //			case *http2.HeadersFrame:
  1315  //				// When the client creates a stream, violate the stream flow control.
  1316  //				go func() {
  1317  //					buf := make([]byte, http2MaxFrameLen)
  1318  //					for {
  1319  //						mu.Lock()
  1320  //						if err := sfr.WriteData(1, false, buf); err != nil {
  1321  //							mu.Unlock()
  1322  //							return
  1323  //						}
  1324  //						mu.Unlock()
  1325  //						// This for loop is capable of hogging the CPU and cause starvation
  1326  //						// in Go versions prior to 1.9,
  1327  //						// in single CPU environment. Explicitly relinquish processor.
  1328  //						runtime.Gosched()
  1329  //					}
  1330  //				}()
  1331  //			case *http2.RSTStreamFrame:
  1332  //				if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeFlowControl {
  1333  //					t.Errorf("RST stream received with streamID: %d and code: %v, want streamID: 1 and code: http2.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode))
  1334  //				}
  1335  //				close(success)
  1336  //				return err
  1337  //			case *http2.PingFrame:
  1338  //				mu.Lock()
  1339  //				sfr.WritePing(true, frame.Data)
  1340  //				mu.Unlock()
  1341  //			default:
  1342  //			}
  1343  //		}
  1344  //	})
  1345  //	go func() {
  1346  //		if err := eventLoop.Serve(lis); err != nil {
  1347  //			t.Errorf("eventLoop Serve failed, err=%s", err.Error())
  1348  //		}
  1349  //	}()
  1350  //
  1351  //	select {
  1352  //	case <-exitCh:
  1353  //		var ctx, cancel = context.WithTimeout(context.Background(), time.Second)
  1354  //		defer cancel()
  1355  //		if err := eventLoop.Shutdown(ctx); err != nil {
  1356  //			t.Errorf("netpoll server exit failed, err=%v", err)
  1357  //		}
  1358  //	}
  1359  //
  1360  //	// FIXME
  1361  //	//go func() { // Launch the misbehaving server.
  1362  //	//	sconn, err := lis.Accept()
  1363  //	//	if err != nil {
  1364  //	//		t.Errorf("Error while accepting: %v", err)
  1365  //	//		return
  1366  //	//	}
  1367  //	//	defer sconn.Close()
  1368  //	//	if _, err := io.ReadFull(sconn, make([]byte, len(ClientPreface))); err != nil {
  1369  //	//		t.Errorf("Error while reading clieng preface: %v", err)
  1370  //	//		return
  1371  //	//	}
  1372  //	//	sfr := http2.NewFramer(sconn.(netpoll.Connection).Writer(), sconn.(netpoll.Connection).Reader())
  1373  //	//	if err := sfr.WriteSettingsAck(); err != nil {
  1374  //	//		t.Errorf("Error while writing settings: %v", err)
  1375  //	//		return
  1376  //	//	}
  1377  //	//	var mu sync.Mutex
  1378  //	//	for {
  1379  //	//		frame, err := sfr.ReadFrame()
  1380  //	//		if err != nil {
  1381  //	//			return
  1382  //	//		}
  1383  //	//		switch frame := frame.(type) {
  1384  //	//		case *http2.HeadersFrame:
  1385  //	//			// When the client creates a stream, violate the stream flow control.
  1386  //	//			go func() {
  1387  //	//				buf := make([]byte, http2MaxFrameLen)
  1388  //	//				for {
  1389  //	//					mu.Lock()
  1390  //	//					if err := sfr.WriteData(1, false, buf); err != nil {
  1391  //	//						mu.Unlock()
  1392  //	//						return
  1393  //	//					}
  1394  //	//					mu.Unlock()
  1395  //	//					// This for loop is capable of hogging the CPU and cause starvation
  1396  //	//					// in Go versions prior to 1.9,
  1397  //	//					// in single CPU environment. Explicitly relinquish processor.
  1398  //	//					runtime.Gosched()
  1399  //	//				}
  1400  //	//			}()
  1401  //	//		case *http2.RSTStreamFrame:
  1402  //	//			if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeFlowControl {
  1403  //	//				t.Errorf("RST stream received with streamID: %d and code: %v, want streamID: 1 and code: http2.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode))
  1404  //	//			}
  1405  //	//			close(success)
  1406  //	//			return
  1407  //	//		case *http2.PingFrame:
  1408  //	//			mu.Lock()
  1409  //	//			sfr.WritePing(true, frame.Data)
  1410  //	//			mu.Unlock()
  1411  //	//		default:
  1412  //	//		}
  1413  //	//	}
  1414  //	//}()
  1415  //	connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second))
  1416  //	defer cancel()
  1417  //	conn, err := netpoll.NewDialer().DialTimeout("tcp", lis.Addr().String(), time.Second)
  1418  //	if err != nil {
  1419  //		t.Fatalf("dial failed: %v", err)
  1420  //	}
  1421  //	ct, err := NewClientTransport(context.Background(), conn.(netpoll.Connection), ConnectOptions{}, "mockDestService", func(GoAwayReason) {}, func() {})
  1422  //	if err != nil {
  1423  //		t.Fatalf("Error while creating client transport: %v", err)
  1424  //	}
  1425  //	defer ct.Close()
  1426  //	str, err := ct.NewStream(connectCtx, &CallHdr{})
  1427  //	if err != nil {
  1428  //		t.Fatalf("Error while creating stream: %v", err)
  1429  //	}
  1430  //	timer := time.NewTimer(time.Second * 5)
  1431  //	go func() { // This go routine mimics the one in stream.go to call CloseStream.
  1432  //		<-str.Done()
  1433  //		ct.CloseStream(str, nil)
  1434  //	}()
  1435  //	select {
  1436  //	case <-timer.C:
  1437  //		t.Fatalf("Test timed-out.")
  1438  //	case <-success:
  1439  //	}
  1440  //}
  1441  
  1442  var encodingTestStatus = status.New(codes.Internal, "\n")
  1443  
  1444  func TestEncodingRequiredStatus(t *testing.T) {
  1445  	server, ct := setUp(t, 0, math.MaxUint32, encodingRequiredStatus)
  1446  	callHdr := &CallHdr{
  1447  		Host:   "localhost",
  1448  		Method: "foo",
  1449  	}
  1450  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1451  	defer cancel()
  1452  	s, err := ct.NewStream(ctx, callHdr)
  1453  	if err != nil {
  1454  		return
  1455  	}
  1456  	opts := Options{Last: true}
  1457  	if err := ct.Write(s, nil, expectedRequest, &opts); err != nil && err != errStreamDone {
  1458  		t.Fatalf("Failed to write the request: %v", err)
  1459  	}
  1460  	p := make([]byte, http2MaxFrameLen)
  1461  	if _, err := s.trReader.(*transportReader).Read(p); err != io.EOF {
  1462  		t.Fatalf("Read got error %v, want %v", err, io.EOF)
  1463  	}
  1464  	if !testutils.StatusErrEqual(s.Status().Err(), encodingTestStatus.Err()) {
  1465  		t.Fatalf("stream with status %v, want %v", s.Status(), encodingTestStatus)
  1466  	}
  1467  	ct.Close()
  1468  	server.stop()
  1469  }
  1470  
  1471  func TestInvalidHeaderField(t *testing.T) {
  1472  	server, ct := setUp(t, 0, math.MaxUint32, invalidHeaderField)
  1473  	callHdr := &CallHdr{
  1474  		Host:   "localhost",
  1475  		Method: "foo",
  1476  	}
  1477  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1478  	defer cancel()
  1479  	s, err := ct.NewStream(ctx, callHdr)
  1480  	if err != nil {
  1481  		return
  1482  	}
  1483  	p := make([]byte, http2MaxFrameLen)
  1484  	_, err = s.trReader.(*transportReader).Read(p)
  1485  	if se, ok := status.FromError(err); !ok || se.Code() != codes.Internal || !strings.Contains(err.Error(), expectedInvalidHeaderField) {
  1486  		t.Fatalf("Read got error %v, want error with code %v and contains %q", err, codes.Internal, expectedInvalidHeaderField)
  1487  	}
  1488  	ct.Close()
  1489  	server.stop()
  1490  }
  1491  
  1492  func TestHeaderChanClosedAfterReceivingAnInvalidHeader(t *testing.T) {
  1493  	server, ct := setUp(t, 0, math.MaxUint32, invalidHeaderField)
  1494  	defer server.stop()
  1495  	defer ct.Close()
  1496  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1497  	defer cancel()
  1498  	s, err := ct.NewStream(ctx, &CallHdr{Host: "localhost", Method: "foo"})
  1499  	if err != nil {
  1500  		t.Fatalf("failed to create the stream")
  1501  	}
  1502  	timer := time.NewTimer(time.Second)
  1503  	defer timer.Stop()
  1504  	select {
  1505  	case <-s.headerChan:
  1506  	case <-timer.C:
  1507  		t.Errorf("s.headerChan: got open, want closed")
  1508  	}
  1509  }
  1510  
  1511  func TestIsReservedHeader(t *testing.T) {
  1512  	tests := []struct {
  1513  		h    string
  1514  		want bool
  1515  	}{
  1516  		{"", false}, // but should be rejected earlier
  1517  		{"foo", false},
  1518  		{"content-type", true},
  1519  		{"user-agent", true},
  1520  		{":anything", true},
  1521  		{"grpc-message-type", true},
  1522  		{"grpc-encoding", true},
  1523  		{"grpc-message", true},
  1524  		{"grpc-status", true},
  1525  		{"grpc-timeout", true},
  1526  		{"te", true},
  1527  	}
  1528  	for _, tt := range tests {
  1529  		got := isReservedHeader(tt.h)
  1530  		if got != tt.want {
  1531  			t.Errorf("isReservedHeader(%q) = %v; want %v", tt.h, got, tt.want)
  1532  		}
  1533  	}
  1534  }
  1535  
  1536  func TestContextErr(t *testing.T) {
  1537  	for _, test := range []struct {
  1538  		// input
  1539  		errIn error
  1540  		// outputs
  1541  		errOut error
  1542  	}{
  1543  		{context.DeadlineExceeded, status.Err(codes.DeadlineExceeded, context.DeadlineExceeded.Error())},
  1544  		{context.Canceled, status.Err(codes.Canceled, context.Canceled.Error())},
  1545  	} {
  1546  		err := ContextErr(test.errIn)
  1547  		if err.Error() != test.errOut.Error() {
  1548  			t.Fatalf("ContextErr{%v} = %v \nwant %v", test.errIn, err, test.errOut)
  1549  		}
  1550  	}
  1551  }
  1552  
  1553  type windowSizeConfig struct {
  1554  	serverStream uint32
  1555  	serverConn   uint32
  1556  	clientStream uint32
  1557  	clientConn   uint32
  1558  }
  1559  
  1560  // FIXME Test failed.
  1561  //func TestAccountCheckWindowSizeWithLargeWindow(t *testing.T) {
  1562  //	wc := windowSizeConfig{
  1563  //		serverStream: 10 * 1024 * 1024,
  1564  //		serverConn:   12 * 1024 * 1024,
  1565  //		clientStream: 6 * 1024 * 1024,
  1566  //		clientConn:   8 * 1024 * 1024,
  1567  //	}
  1568  //	testFlowControlAccountCheck(t, 1024*1024, wc)
  1569  //}
  1570  
  1571  func TestAccountCheckWindowSizeWithSmallWindow(t *testing.T) {
  1572  	wc := windowSizeConfig{
  1573  		serverStream: defaultWindowSize,
  1574  		// Note this is smaller than initialConnWindowSize which is the current default.
  1575  		serverConn:   defaultWindowSize,
  1576  		clientStream: defaultWindowSize,
  1577  		clientConn:   defaultWindowSize,
  1578  	}
  1579  	testFlowControlAccountCheck(t, 1024*1024, wc)
  1580  }
  1581  
  1582  func TestAccountCheckDynamicWindowSmallMessage(t *testing.T) {
  1583  	testFlowControlAccountCheck(t, 1024, windowSizeConfig{})
  1584  }
  1585  
  1586  func TestAccountCheckDynamicWindowLargeMessage(t *testing.T) {
  1587  	testFlowControlAccountCheck(t, 1024*1024, windowSizeConfig{})
  1588  }
  1589  
  1590  func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) {
  1591  	sc := &ServerConfig{
  1592  		InitialWindowSize:     wc.serverStream,
  1593  		InitialConnWindowSize: wc.serverConn,
  1594  	}
  1595  	co := ConnectOptions{
  1596  		InitialWindowSize:     wc.clientStream,
  1597  		InitialConnWindowSize: wc.clientConn,
  1598  	}
  1599  	server, client := setUpWithOptions(t, 0, sc, pingpong, co)
  1600  	defer server.stop()
  1601  	defer client.Close()
  1602  	waitWhileTrue(t, func() (bool, error) {
  1603  		server.mu.Lock()
  1604  		defer server.mu.Unlock()
  1605  		if len(server.conns) == 0 {
  1606  			return true, fmt.Errorf("timed out while waiting for server transport to be created")
  1607  		}
  1608  		return false, nil
  1609  	})
  1610  	var st *http2Server
  1611  	server.mu.Lock()
  1612  	for k := range server.conns {
  1613  		st = k.(*http2Server)
  1614  	}
  1615  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1616  	defer cancel()
  1617  	server.mu.Unlock()
  1618  	const numStreams = 10
  1619  	clientStreams := make([]*Stream, numStreams)
  1620  	for i := 0; i < numStreams; i++ {
  1621  		var err error
  1622  		clientStreams[i], err = client.NewStream(ctx, &CallHdr{})
  1623  		if err != nil {
  1624  			t.Fatalf("Failed to create stream. Err: %v", err)
  1625  		}
  1626  	}
  1627  	var wg sync.WaitGroup
  1628  	// For each stream send pingpong messages to the server.
  1629  	for _, stream := range clientStreams {
  1630  		wg.Add(1)
  1631  		go func(stream *Stream) {
  1632  			defer wg.Done()
  1633  			buf := make([]byte, msgSize+5)
  1634  			buf[0] = byte(0)
  1635  			binary.BigEndian.PutUint32(buf[1:], uint32(msgSize))
  1636  			opts := Options{}
  1637  			header := make([]byte, 5)
  1638  			for i := 1; i <= 10; i++ {
  1639  				if err := client.Write(stream, nil, buf, &opts); err != nil {
  1640  					t.Errorf("Error on client while writing message: %v", err)
  1641  					return
  1642  				}
  1643  				if _, err := stream.Read(header); err != nil {
  1644  					t.Errorf("Error on client while reading data frame header: %v", err)
  1645  					return
  1646  				}
  1647  				sz := binary.BigEndian.Uint32(header[1:])
  1648  				recvMsg := make([]byte, int(sz))
  1649  				if _, err := stream.Read(recvMsg); err != nil {
  1650  					t.Errorf("Error on client while reading data: %v", err)
  1651  					return
  1652  				}
  1653  				if len(recvMsg) != msgSize {
  1654  					t.Errorf("Length of message received by client: %v, want: %v", len(recvMsg), msgSize)
  1655  					return
  1656  				}
  1657  			}
  1658  		}(stream)
  1659  	}
  1660  	wg.Wait()
  1661  	serverStreams := map[uint32]*Stream{}
  1662  	loopyClientStreams := map[uint32]*outStream{}
  1663  	loopyServerStreams := map[uint32]*outStream{}
  1664  	// Get all the streams from server reader and writer and client writer.
  1665  	st.mu.Lock()
  1666  	for _, stream := range clientStreams {
  1667  		id := stream.id
  1668  		serverStreams[id] = st.activeStreams[id]
  1669  		loopyServerStreams[id] = st.loopy.estdStreams[id]
  1670  		loopyClientStreams[id] = client.loopy.estdStreams[id]
  1671  
  1672  	}
  1673  	st.mu.Unlock()
  1674  	// Close all streams
  1675  	for _, stream := range clientStreams {
  1676  		client.Write(stream, nil, nil, &Options{Last: true})
  1677  		if _, err := stream.Read(make([]byte, 5)); err != io.EOF {
  1678  			t.Fatalf("Client expected an EOF from the server. Got: %v", err)
  1679  		}
  1680  	}
  1681  	// Close down both server and client so that their internals can be read without data
  1682  	// races.
  1683  	client.Close()
  1684  	st.Close()
  1685  	<-st.readerDone
  1686  	<-st.writerDone
  1687  	<-client.readerDone
  1688  	<-client.writerDone
  1689  	for _, cstream := range clientStreams {
  1690  		id := cstream.id
  1691  		sstream := serverStreams[id]
  1692  		loopyServerStream := loopyServerStreams[id]
  1693  		loopyClientStream := loopyClientStreams[id]
  1694  		// Check stream flow control.
  1695  		if int(cstream.fc.limit+cstream.fc.delta-cstream.fc.pendingData-cstream.fc.pendingUpdate) != int(st.loopy.oiws)-loopyServerStream.bytesOutStanding {
  1696  			t.Fatalf("Account mismatch: client stream inflow limit(%d) + delta(%d) - pendingData(%d) - pendingUpdate(%d) != server outgoing InitialWindowSize(%d) - outgoingStream.bytesOutStanding(%d)", cstream.fc.limit, cstream.fc.delta, cstream.fc.pendingData, cstream.fc.pendingUpdate, st.loopy.oiws, loopyServerStream.bytesOutStanding)
  1697  		}
  1698  		if int(sstream.fc.limit+sstream.fc.delta-sstream.fc.pendingData-sstream.fc.pendingUpdate) != int(client.loopy.oiws)-loopyClientStream.bytesOutStanding {
  1699  			t.Fatalf("Account mismatch: server stream inflow limit(%d) + delta(%d) - pendingData(%d) - pendingUpdate(%d) != client outgoing InitialWindowSize(%d) - outgoingStream.bytesOutStanding(%d)", sstream.fc.limit, sstream.fc.delta, sstream.fc.pendingData, sstream.fc.pendingUpdate, client.loopy.oiws, loopyClientStream.bytesOutStanding)
  1700  		}
  1701  	}
  1702  	// Check transport flow control.
  1703  	if client.fc.limit != client.fc.unacked+st.loopy.sendQuota {
  1704  		t.Fatalf("Account mismatch: client transport inflow(%d) != client unacked(%d) + server sendQuota(%d)", client.fc.limit, client.fc.unacked, st.loopy.sendQuota)
  1705  	}
  1706  	if st.fc.limit != st.fc.unacked+client.loopy.sendQuota {
  1707  		t.Fatalf("Account mismatch: server transport inflow(%d) != server unacked(%d) + client sendQuota(%d)", st.fc.limit, st.fc.unacked, client.loopy.sendQuota)
  1708  	}
  1709  }
  1710  
  1711  func waitWhileTrue(t *testing.T, condition func() (bool, error)) {
  1712  	var (
  1713  		wait bool
  1714  		err  error
  1715  	)
  1716  	timer := time.NewTimer(time.Second * 5)
  1717  	for {
  1718  		wait, err = condition()
  1719  		if wait {
  1720  			select {
  1721  			case <-timer.C:
  1722  				t.Fatalf(err.Error())
  1723  			default:
  1724  				time.Sleep(50 * time.Millisecond)
  1725  				continue
  1726  			}
  1727  		}
  1728  		if !timer.Stop() {
  1729  			<-timer.C
  1730  		}
  1731  		break
  1732  	}
  1733  }
  1734  
  1735  // If any error occurs on a call to Stream.Read, future calls
  1736  // should continue to return that same error.
  1737  func TestReadGivesSameErrorAfterAnyErrorOccurs(t *testing.T) {
  1738  	testRecvBuffer := newRecvBuffer()
  1739  	s := &Stream{
  1740  		ctx:         context.Background(),
  1741  		buf:         testRecvBuffer,
  1742  		requestRead: func(int) {},
  1743  	}
  1744  	s.trReader = &transportReader{
  1745  		reader: &recvBufferReader{
  1746  			ctx:        s.ctx,
  1747  			ctxDone:    s.ctx.Done(),
  1748  			recv:       s.buf,
  1749  			freeBuffer: func(*bytes.Buffer) {},
  1750  		},
  1751  		windowHandler: func(int) {},
  1752  	}
  1753  	testData := make([]byte, 1)
  1754  	testData[0] = 5
  1755  	testBuffer := bytes.NewBuffer(testData)
  1756  	testErr := errors.New("test error")
  1757  	s.write(recvMsg{buffer: testBuffer, err: testErr})
  1758  
  1759  	inBuf := make([]byte, 1)
  1760  	actualCount, actualErr := s.Read(inBuf)
  1761  	if actualCount != 0 {
  1762  		t.Errorf("actualCount, _ := s.Read(_) differs; want 0; got %v", actualCount)
  1763  	}
  1764  	if actualErr.Error() != testErr.Error() {
  1765  		t.Errorf("_ , actualErr := s.Read(_) differs; want actualErr.Error() to be %v; got %v", testErr.Error(), actualErr.Error())
  1766  	}
  1767  
  1768  	s.write(recvMsg{buffer: testBuffer, err: nil})
  1769  	s.write(recvMsg{buffer: testBuffer, err: errors.New("different error from first")})
  1770  
  1771  	for i := 0; i < 2; i++ {
  1772  		inBuf := make([]byte, 1)
  1773  		actualCount, actualErr := s.Read(inBuf)
  1774  		if actualCount != 0 {
  1775  			t.Errorf("actualCount, _ := s.Read(_) differs; want %v; got %v", 0, actualCount)
  1776  		}
  1777  		if actualErr.Error() != testErr.Error() {
  1778  			t.Errorf("_ , actualErr := s.Read(_) differs; want actualErr.Error() to be %v; got %v", testErr.Error(), actualErr.Error())
  1779  		}
  1780  	}
  1781  }
  1782  
  1783  // FIXME Test failed.
  1784  // If the client sends an HTTP/2 request with a :method header with a value other than POST, as specified in
  1785  // the gRPC over HTTP/2 specification, the server should close the stream.
  1786  //func TestServerWithClientSendingWrongMethod(t *testing.T) {
  1787  //	server := setUpServerOnly(t, 0, &ServerConfig{}, suspended)
  1788  //	defer server.stop()
  1789  //	// Create a client directly to not couple what you can send to API of http2_client.go.
  1790  //	mconn, err := netpoll.NewDialer().DialTimeout("tcp", server.lis.Addr().String(), time.Second)
  1791  //	if err != nil {
  1792  //		t.Fatalf("Clent failed to dial:%v", err)
  1793  //	}
  1794  //	defer mconn.Close()
  1795  //
  1796  //	if n, err := mconn.write(ClientPreface); err != nil || n != len(ClientPreface) {
  1797  //		t.Fatalf("mconn.write(ClientPreface ) = %d, %v, want %d, <nil>", n, err, len(ClientPreface))
  1798  //	}
  1799  //
  1800  //	framer := http2.NewFramer(mconn, mconn)
  1801  //	if err := framer.WriteSettings(); err != nil {
  1802  //		t.Fatalf("Error while writing settings: %v", err)
  1803  //	}
  1804  //
  1805  //	// success chan indicates that reader received a RSTStream from server.
  1806  //	// An error will be passed on it if any other frame is received.
  1807  //	success := testutils.NewChannel()
  1808  //
  1809  //	// Launch a reader goroutine.
  1810  //	go func() {
  1811  //		for {
  1812  //			frame, err := framer.ReadFrame()
  1813  //			if err != nil {
  1814  //				return
  1815  //			}
  1816  //			switch frame := frame.(type) {
  1817  //			case *grpcframe.SettingsFrame:
  1818  //				// Do nothing. A settings frame is expected from server preface.
  1819  //			case *http2.RSTStreamFrame:
  1820  //				if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeProtocol {
  1821  //					// Client only created a single stream, so RST Stream should be for that single stream.
  1822  //					t.Errorf("RST stream received with streamID: %d and code %v, want streamID: 1 and code: http.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode))
  1823  //				}
  1824  //				// Records that client successfully received RST Stream frame.
  1825  //				success.Send(nil)
  1826  //				return
  1827  //			default:
  1828  //				// The server should send nothing but a single RST Stream frame.
  1829  //				success.Send(errors.New("The client received a frame other than RST Stream"))
  1830  //			}
  1831  //		}
  1832  //	}()
  1833  //
  1834  //	// Done with HTTP/2 setup - now create a stream with a bad method header.
  1835  //	var buf bytes.buffer
  1836  //	henc := hpack.NewEncoder(&buf)
  1837  //	// Method is required to be POST in a gRPC call.
  1838  //	if err := henc.WriteField(hpack.HeaderField{Name: ":method", Value: "PUT"}); err != nil {
  1839  //		t.Fatalf("Error while encoding header: %v", err)
  1840  //	}
  1841  //	// Have the rest of the headers be ok and within the gRPC over HTTP/2 spec.
  1842  //	if err := henc.WriteField(hpack.HeaderField{Name: ":path", Value: "foo"}); err != nil {
  1843  //		t.Fatalf("Error while encoding header: %v", err)
  1844  //	}
  1845  //	if err := henc.WriteField(hpack.HeaderField{Name: ":authority", Value: "localhost"}); err != nil {
  1846  //		t.Fatalf("Error while encoding header: %v", err)
  1847  //	}
  1848  //	if err := henc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}); err != nil {
  1849  //		t.Fatalf("Error while encoding header: %v", err)
  1850  //	}
  1851  //
  1852  //	if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil {
  1853  //		t.Fatalf("Error while writing headers: %v", err)
  1854  //	}
  1855  //	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1856  //	defer cancel()
  1857  //	if e, err := success.Receive(ctx); e != nil || err != nil {
  1858  //		t.Fatalf("Error in frame server should send: %v. Error receiving from channel: %v", e, err)
  1859  //	}
  1860  //}
  1861  
  1862  func TestPingPong1B(t *testing.T) {
  1863  	runPingPongTest(t, 1)
  1864  }
  1865  
  1866  func TestPingPong1KB(t *testing.T) {
  1867  	runPingPongTest(t, 1024)
  1868  }
  1869  
  1870  func TestPingPong64KB(t *testing.T) {
  1871  	runPingPongTest(t, 65536)
  1872  }
  1873  
  1874  func TestPingPong1MB(t *testing.T) {
  1875  	runPingPongTest(t, 1048576)
  1876  }
  1877  
  1878  // This is a stress-test of flow control logic.
  1879  func runPingPongTest(t *testing.T, msgSize int) {
  1880  	server, client := setUp(t, 0, 0, pingpong)
  1881  	defer server.stop()
  1882  	defer client.Close()
  1883  	waitWhileTrue(t, func() (bool, error) {
  1884  		server.mu.Lock()
  1885  		defer server.mu.Unlock()
  1886  		if len(server.conns) == 0 {
  1887  			return true, fmt.Errorf("timed out while waiting for server transport to be created")
  1888  		}
  1889  		return false, nil
  1890  	})
  1891  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1892  	defer cancel()
  1893  	stream, err := client.NewStream(ctx, &CallHdr{})
  1894  	if err != nil {
  1895  		t.Fatalf("Failed to create stream. Err: %v", err)
  1896  	}
  1897  	msg := make([]byte, msgSize)
  1898  	outgoingHeader := make([]byte, 5)
  1899  	outgoingHeader[0] = byte(0)
  1900  	binary.BigEndian.PutUint32(outgoingHeader[1:], uint32(msgSize))
  1901  	opts := &Options{}
  1902  	incomingHeader := make([]byte, 5)
  1903  	done := make(chan struct{})
  1904  	go func() {
  1905  		timer := time.NewTimer(time.Second * 1)
  1906  		<-timer.C
  1907  		close(done)
  1908  	}()
  1909  	for {
  1910  		select {
  1911  		case <-done:
  1912  			client.Write(stream, nil, nil, &Options{Last: true})
  1913  			if _, err := stream.Read(incomingHeader); err != io.EOF {
  1914  				t.Fatalf("Client expected EOF from the server. Got: %v", err)
  1915  			}
  1916  			return
  1917  		default:
  1918  			if err := client.Write(stream, outgoingHeader, msg, opts); err != nil {
  1919  				t.Fatalf("Error on client while writing message. Err: %v", err)
  1920  			}
  1921  			if _, err := stream.Read(incomingHeader); err != nil {
  1922  				t.Fatalf("Error on client while reading data header. Err: %v", err)
  1923  			}
  1924  			sz := binary.BigEndian.Uint32(incomingHeader[1:])
  1925  			recvMsg := make([]byte, int(sz))
  1926  			if _, err := stream.Read(recvMsg); err != nil {
  1927  				t.Fatalf("Error on client while reading data. Err: %v", err)
  1928  			}
  1929  		}
  1930  	}
  1931  }
  1932  
  1933  type tableSizeLimit struct {
  1934  	mu     sync.Mutex
  1935  	limits []uint32
  1936  }
  1937  
  1938  func (t *tableSizeLimit) add(limit uint32) {
  1939  	t.mu.Lock()
  1940  	t.limits = append(t.limits, limit)
  1941  	t.mu.Unlock()
  1942  }
  1943  
  1944  func (t *tableSizeLimit) getLen() int {
  1945  	t.mu.Lock()
  1946  	defer t.mu.Unlock()
  1947  	return len(t.limits)
  1948  }
  1949  
  1950  func (t *tableSizeLimit) getIndex(i int) uint32 {
  1951  	t.mu.Lock()
  1952  	defer t.mu.Unlock()
  1953  	return t.limits[i]
  1954  }
  1955  
  1956  func TestHeaderTblSize(t *testing.T) {
  1957  	limits := &tableSizeLimit{}
  1958  	updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
  1959  		e.SetMaxDynamicTableSizeLimit(v)
  1960  		limits.add(v)
  1961  	}
  1962  	defer func() {
  1963  		updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
  1964  			e.SetMaxDynamicTableSizeLimit(v)
  1965  		}
  1966  	}()
  1967  
  1968  	server, ct := setUp(t, 0, math.MaxUint32, normal)
  1969  	defer ct.Close()
  1970  	defer server.stop()
  1971  	ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1972  	defer ctxCancel()
  1973  	_, err := ct.NewStream(ctx, &CallHdr{})
  1974  	if err != nil {
  1975  		t.Fatalf("failed to open stream: %v", err)
  1976  	}
  1977  
  1978  	var svrTransport ServerTransport
  1979  	var i int
  1980  	for i = 0; i < 1000; i++ {
  1981  		server.mu.Lock()
  1982  		if len(server.conns) != 0 {
  1983  			server.mu.Unlock()
  1984  			break
  1985  		}
  1986  		server.mu.Unlock()
  1987  		time.Sleep(10 * time.Millisecond)
  1988  		continue
  1989  	}
  1990  	if i == 1000 {
  1991  		t.Fatalf("unable to create any server transport after 10s")
  1992  	}
  1993  
  1994  	for st := range server.conns {
  1995  		svrTransport = st
  1996  		break
  1997  	}
  1998  	svrTransport.(*http2Server).controlBuf.put(&outgoingSettings{
  1999  		ss: []http2.Setting{
  2000  			{
  2001  				ID:  http2.SettingHeaderTableSize,
  2002  				Val: uint32(100),
  2003  			},
  2004  		},
  2005  	})
  2006  
  2007  	for i = 0; i < 1000; i++ {
  2008  		if limits.getLen() != 1 {
  2009  			time.Sleep(10 * time.Millisecond)
  2010  			continue
  2011  		}
  2012  		if val := limits.getIndex(0); val != uint32(100) {
  2013  			t.Fatalf("expected limits[0] = 100, got %d", val)
  2014  		}
  2015  		break
  2016  	}
  2017  	if i == 1000 {
  2018  		t.Fatalf("expected len(limits) = 1 within 10s, got != 1")
  2019  	}
  2020  
  2021  	ct.controlBuf.put(&outgoingSettings{
  2022  		ss: []http2.Setting{
  2023  			{
  2024  				ID:  http2.SettingHeaderTableSize,
  2025  				Val: uint32(200),
  2026  			},
  2027  		},
  2028  	})
  2029  
  2030  	for i := 0; i < 1000; i++ {
  2031  		if limits.getLen() != 2 {
  2032  			time.Sleep(10 * time.Millisecond)
  2033  			continue
  2034  		}
  2035  		if val := limits.getIndex(1); val != uint32(200) {
  2036  			t.Fatalf("expected limits[1] = 200, got %d", val)
  2037  		}
  2038  		break
  2039  	}
  2040  	if i == 1000 {
  2041  		t.Fatalf("expected len(limits) = 2 within 10s, got != 2")
  2042  	}
  2043  }
  2044  
  2045  func TestTLSConfig(t *testing.T) {
  2046  	cfg := &tls.Config{}
  2047  	newCfg := TLSConfig(cfg)
  2048  	test.Assert(t, len(cfg.NextProtos) == 0)
  2049  	test.Assert(t, len(newCfg.NextProtos) == 1)
  2050  	test.Assert(t, newCfg.NextProtos[0] == alpnProtoStrH2)
  2051  	test.Assert(t, newCfg.MinVersion == tls.VersionTLS12)
  2052  }
  2053  
  2054  func TestTlsAppendH2ToALPNProtocols(t *testing.T) {
  2055  	var ps []string
  2056  	appended := tlsAppendH2ToALPNProtocols(ps)
  2057  	test.Assert(t, len(appended) == 1)
  2058  	test.Assert(t, appended[0] == alpnProtoStrH2)
  2059  	appended = tlsAppendH2ToALPNProtocols(appended)
  2060  	test.Assert(t, len(appended) == 1)
  2061  }