github.com/hxx258456/ccgo@v0.0.5-0.20230213014102-48b35f46f66f/grpc/binarylog/binarylog_end2end_test.go (about)

     1  /*
     2   *
     3   * Copyright 2018 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package binarylog_test
    20  
    21  import (
    22  	"context"
    23  	"fmt"
    24  	"io"
    25  	"net"
    26  	"sort"
    27  	"sync"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/golang/protobuf/proto"
    32  	grpc "github.com/hxx258456/ccgo/grpc"
    33  	"github.com/hxx258456/ccgo/grpc/binarylog"
    34  	"github.com/hxx258456/ccgo/grpc/grpclog"
    35  	iblog "github.com/hxx258456/ccgo/grpc/internal/binarylog"
    36  	"github.com/hxx258456/ccgo/grpc/internal/grpctest"
    37  	"github.com/hxx258456/ccgo/grpc/metadata"
    38  	"github.com/hxx258456/ccgo/grpc/status"
    39  
    40  	pb "github.com/hxx258456/ccgo/grpc/binarylog/grpc_binarylog_v1"
    41  	testgrpc "github.com/hxx258456/ccgo/grpc/interop/grpc_testing"
    42  	testpb "github.com/hxx258456/ccgo/grpc/interop/grpc_testing"
    43  )
    44  
    45  var grpclogLogger = grpclog.Component("binarylog")
    46  
    47  type s struct {
    48  	grpctest.Tester
    49  }
    50  
    51  func Test(t *testing.T) {
    52  	grpctest.RunSubTests(t, s{})
    53  }
    54  
    55  func init() {
    56  	// Setting environment variable in tests doesn't work because of the init
    57  	// orders. Set the loggers directly here.
    58  	iblog.SetLogger(iblog.AllLogger)
    59  	binarylog.SetSink(testSink)
    60  }
    61  
    62  var testSink = &testBinLogSink{}
    63  
    64  type testBinLogSink struct {
    65  	mu  sync.Mutex
    66  	buf []*pb.GrpcLogEntry
    67  }
    68  
    69  func (s *testBinLogSink) Write(e *pb.GrpcLogEntry) error {
    70  	s.mu.Lock()
    71  	s.buf = append(s.buf, e)
    72  	s.mu.Unlock()
    73  	return nil
    74  }
    75  
    76  func (s *testBinLogSink) Close() error { return nil }
    77  
    78  // Returns all client entris if client is true, otherwise return all server
    79  // entries.
    80  func (s *testBinLogSink) logEntries(client bool) []*pb.GrpcLogEntry {
    81  	logger := pb.GrpcLogEntry_LOGGER_SERVER
    82  	if client {
    83  		logger = pb.GrpcLogEntry_LOGGER_CLIENT
    84  	}
    85  	var ret []*pb.GrpcLogEntry
    86  	s.mu.Lock()
    87  	for _, e := range s.buf {
    88  		if e.Logger == logger {
    89  			ret = append(ret, e)
    90  		}
    91  	}
    92  	s.mu.Unlock()
    93  	return ret
    94  }
    95  
    96  func (s *testBinLogSink) clear() {
    97  	s.mu.Lock()
    98  	s.buf = nil
    99  	s.mu.Unlock()
   100  }
   101  
   102  var (
   103  	// For headers:
   104  	testMetadata = metadata.MD{
   105  		"key1": []string{"value1"},
   106  		"key2": []string{"value2"},
   107  	}
   108  	// For trailers:
   109  	testTrailerMetadata = metadata.MD{
   110  		"tkey1": []string{"trailerValue1"},
   111  		"tkey2": []string{"trailerValue2"},
   112  	}
   113  	// The id for which the service handler should return error.
   114  	errorID int32 = 32202
   115  
   116  	globalRPCID uint64 // RPC id starts with 1, but we do ++ at the beginning of each test.
   117  )
   118  
   119  func idToPayload(id int32) *testpb.Payload {
   120  	return &testpb.Payload{Body: []byte{byte(id), byte(id >> 8), byte(id >> 16), byte(id >> 24)}}
   121  }
   122  
   123  func payloadToID(p *testpb.Payload) int32 {
   124  	if p == nil || len(p.Body) != 4 {
   125  		panic("invalid payload")
   126  	}
   127  	return int32(p.Body[0]) + int32(p.Body[1])<<8 + int32(p.Body[2])<<16 + int32(p.Body[3])<<24
   128  }
   129  
   130  type testServer struct {
   131  	testgrpc.UnimplementedTestServiceServer
   132  	te *test
   133  }
   134  
   135  func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
   136  	md, ok := metadata.FromIncomingContext(ctx)
   137  	if ok {
   138  		if err := grpc.SendHeader(ctx, md); err != nil {
   139  			return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", md, err)
   140  		}
   141  		if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil {
   142  			return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata, err)
   143  		}
   144  	}
   145  
   146  	if id := payloadToID(in.Payload); id == errorID {
   147  		return nil, fmt.Errorf("got error id: %v", id)
   148  	}
   149  
   150  	return &testpb.SimpleResponse{Payload: in.Payload}, nil
   151  }
   152  
   153  func (s *testServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error {
   154  	md, ok := metadata.FromIncomingContext(stream.Context())
   155  	if ok {
   156  		if err := stream.SendHeader(md); err != nil {
   157  			return status.Errorf(status.Code(err), "stream.SendHeader(%v) = %v, want %v", md, err, nil)
   158  		}
   159  		stream.SetTrailer(testTrailerMetadata)
   160  	}
   161  	for {
   162  		in, err := stream.Recv()
   163  		if err == io.EOF {
   164  			// read done.
   165  			return nil
   166  		}
   167  		if err != nil {
   168  			return err
   169  		}
   170  
   171  		if id := payloadToID(in.Payload); id == errorID {
   172  			return fmt.Errorf("got error id: %v", id)
   173  		}
   174  
   175  		if err := stream.Send(&testpb.StreamingOutputCallResponse{Payload: in.Payload}); err != nil {
   176  			return err
   177  		}
   178  	}
   179  }
   180  
   181  func (s *testServer) StreamingInputCall(stream testgrpc.TestService_StreamingInputCallServer) error {
   182  	md, ok := metadata.FromIncomingContext(stream.Context())
   183  	if ok {
   184  		if err := stream.SendHeader(md); err != nil {
   185  			return status.Errorf(status.Code(err), "stream.SendHeader(%v) = %v, want %v", md, err, nil)
   186  		}
   187  		stream.SetTrailer(testTrailerMetadata)
   188  	}
   189  	for {
   190  		in, err := stream.Recv()
   191  		if err == io.EOF {
   192  			// read done.
   193  			return stream.SendAndClose(&testpb.StreamingInputCallResponse{AggregatedPayloadSize: 0})
   194  		}
   195  		if err != nil {
   196  			return err
   197  		}
   198  
   199  		if id := payloadToID(in.Payload); id == errorID {
   200  			return fmt.Errorf("got error id: %v", id)
   201  		}
   202  	}
   203  }
   204  
   205  func (s *testServer) StreamingOutputCall(in *testpb.StreamingOutputCallRequest, stream testgrpc.TestService_StreamingOutputCallServer) error {
   206  	md, ok := metadata.FromIncomingContext(stream.Context())
   207  	if ok {
   208  		if err := stream.SendHeader(md); err != nil {
   209  			return status.Errorf(status.Code(err), "stream.SendHeader(%v) = %v, want %v", md, err, nil)
   210  		}
   211  		stream.SetTrailer(testTrailerMetadata)
   212  	}
   213  
   214  	if id := payloadToID(in.Payload); id == errorID {
   215  		return fmt.Errorf("got error id: %v", id)
   216  	}
   217  
   218  	for i := 0; i < 5; i++ {
   219  		if err := stream.Send(&testpb.StreamingOutputCallResponse{Payload: in.Payload}); err != nil {
   220  			return err
   221  		}
   222  	}
   223  	return nil
   224  }
   225  
   226  // test is an end-to-end test. It should be created with the newTest
   227  // func, modified as needed, and then started with its startServer method.
   228  // It should be cleaned up with the tearDown method.
   229  type test struct {
   230  	t *testing.T
   231  
   232  	testService testgrpc.TestServiceServer // nil means none
   233  	// srv and srvAddr are set once startServer is called.
   234  	srv     *grpc.Server
   235  	srvAddr string // Server IP without port.
   236  	srvIP   net.IP
   237  	srvPort int
   238  
   239  	cc *grpc.ClientConn // nil until requested via clientConn
   240  
   241  	// Fields for client address. Set by the service handler.
   242  	clientAddrMu sync.Mutex
   243  	clientIP     net.IP
   244  	clientPort   int
   245  }
   246  
   247  func (te *test) tearDown() {
   248  	if te.cc != nil {
   249  		te.cc.Close()
   250  		te.cc = nil
   251  	}
   252  	te.srv.Stop()
   253  }
   254  
   255  // newTest returns a new test using the provided testing.T and
   256  // environment.  It is returned with default values. Tests should
   257  // modify it before calling its startServer and clientConn methods.
   258  func newTest(t *testing.T) *test {
   259  	te := &test{
   260  		t: t,
   261  	}
   262  	return te
   263  }
   264  
   265  type listenerWrapper struct {
   266  	net.Listener
   267  	te *test
   268  }
   269  
   270  func (lw *listenerWrapper) Accept() (net.Conn, error) {
   271  	conn, err := lw.Listener.Accept()
   272  	if err != nil {
   273  		return nil, err
   274  	}
   275  	lw.te.clientAddrMu.Lock()
   276  	lw.te.clientIP = conn.RemoteAddr().(*net.TCPAddr).IP
   277  	lw.te.clientPort = conn.RemoteAddr().(*net.TCPAddr).Port
   278  	lw.te.clientAddrMu.Unlock()
   279  	return conn, nil
   280  }
   281  
   282  // startServer starts a gRPC server listening. Callers should defer a
   283  // call to te.tearDown to clean up.
   284  func (te *test) startServer(ts testgrpc.TestServiceServer) {
   285  	te.testService = ts
   286  	lis, err := net.Listen("tcp", "localhost:0")
   287  
   288  	lis = &listenerWrapper{
   289  		Listener: lis,
   290  		te:       te,
   291  	}
   292  
   293  	if err != nil {
   294  		te.t.Fatalf("Failed to listen: %v", err)
   295  	}
   296  	var opts []grpc.ServerOption
   297  	s := grpc.NewServer(opts...)
   298  	te.srv = s
   299  	if te.testService != nil {
   300  		testgrpc.RegisterTestServiceServer(s, te.testService)
   301  	}
   302  
   303  	go s.Serve(lis)
   304  	te.srvAddr = lis.Addr().String()
   305  	te.srvIP = lis.Addr().(*net.TCPAddr).IP
   306  	te.srvPort = lis.Addr().(*net.TCPAddr).Port
   307  }
   308  
   309  func (te *test) clientConn() *grpc.ClientConn {
   310  	if te.cc != nil {
   311  		return te.cc
   312  	}
   313  	opts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithBlock()}
   314  
   315  	var err error
   316  	te.cc, err = grpc.Dial(te.srvAddr, opts...)
   317  	if err != nil {
   318  		te.t.Fatalf("Dial(%q) = %v", te.srvAddr, err)
   319  	}
   320  	return te.cc
   321  }
   322  
   323  type rpcType int
   324  
   325  const (
   326  	unaryRPC rpcType = iota
   327  	clientStreamRPC
   328  	serverStreamRPC
   329  	fullDuplexStreamRPC
   330  	cancelRPC
   331  )
   332  
   333  type rpcConfig struct {
   334  	count    int     // Number of requests and responses for streaming RPCs.
   335  	success  bool    // Whether the RPC should succeed or return error.
   336  	callType rpcType // Type of RPC.
   337  }
   338  
   339  func (te *test) doUnaryCall(c *rpcConfig) (*testpb.SimpleRequest, *testpb.SimpleResponse, error) {
   340  	var (
   341  		resp *testpb.SimpleResponse
   342  		req  *testpb.SimpleRequest
   343  		err  error
   344  	)
   345  	tc := testgrpc.NewTestServiceClient(te.clientConn())
   346  	if c.success {
   347  		req = &testpb.SimpleRequest{Payload: idToPayload(errorID + 1)}
   348  	} else {
   349  		req = &testpb.SimpleRequest{Payload: idToPayload(errorID)}
   350  	}
   351  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   352  	defer cancel()
   353  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
   354  
   355  	resp, err = tc.UnaryCall(ctx, req)
   356  	return req, resp, err
   357  }
   358  
   359  func (te *test) doFullDuplexCallRoundtrip(c *rpcConfig) ([]proto.Message, []proto.Message, error) {
   360  	var (
   361  		reqs  []proto.Message
   362  		resps []proto.Message
   363  		err   error
   364  	)
   365  	tc := testgrpc.NewTestServiceClient(te.clientConn())
   366  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   367  	defer cancel()
   368  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
   369  
   370  	stream, err := tc.FullDuplexCall(ctx)
   371  	if err != nil {
   372  		return reqs, resps, err
   373  	}
   374  
   375  	if c.callType == cancelRPC {
   376  		cancel()
   377  		return reqs, resps, context.Canceled
   378  	}
   379  
   380  	var startID int32
   381  	if !c.success {
   382  		startID = errorID
   383  	}
   384  	for i := 0; i < c.count; i++ {
   385  		req := &testpb.StreamingOutputCallRequest{
   386  			Payload: idToPayload(int32(i) + startID),
   387  		}
   388  		reqs = append(reqs, req)
   389  		if err = stream.Send(req); err != nil {
   390  			return reqs, resps, err
   391  		}
   392  		var resp *testpb.StreamingOutputCallResponse
   393  		if resp, err = stream.Recv(); err != nil {
   394  			return reqs, resps, err
   395  		}
   396  		resps = append(resps, resp)
   397  	}
   398  	if err = stream.CloseSend(); err != nil && err != io.EOF {
   399  		return reqs, resps, err
   400  	}
   401  	if _, err = stream.Recv(); err != io.EOF {
   402  		return reqs, resps, err
   403  	}
   404  
   405  	return reqs, resps, nil
   406  }
   407  
   408  func (te *test) doClientStreamCall(c *rpcConfig) ([]proto.Message, proto.Message, error) {
   409  	var (
   410  		reqs []proto.Message
   411  		resp *testpb.StreamingInputCallResponse
   412  		err  error
   413  	)
   414  	tc := testgrpc.NewTestServiceClient(te.clientConn())
   415  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   416  	defer cancel()
   417  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
   418  
   419  	stream, err := tc.StreamingInputCall(ctx)
   420  	if err != nil {
   421  		return reqs, resp, err
   422  	}
   423  	var startID int32
   424  	if !c.success {
   425  		startID = errorID
   426  	}
   427  	for i := 0; i < c.count; i++ {
   428  		req := &testpb.StreamingInputCallRequest{
   429  			Payload: idToPayload(int32(i) + startID),
   430  		}
   431  		reqs = append(reqs, req)
   432  		if err = stream.Send(req); err != nil {
   433  			return reqs, resp, err
   434  		}
   435  	}
   436  	resp, err = stream.CloseAndRecv()
   437  	return reqs, resp, err
   438  }
   439  
   440  func (te *test) doServerStreamCall(c *rpcConfig) (proto.Message, []proto.Message, error) {
   441  	var (
   442  		req   *testpb.StreamingOutputCallRequest
   443  		resps []proto.Message
   444  		err   error
   445  	)
   446  
   447  	tc := testgrpc.NewTestServiceClient(te.clientConn())
   448  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   449  	defer cancel()
   450  	ctx = metadata.NewOutgoingContext(ctx, testMetadata)
   451  
   452  	var startID int32
   453  	if !c.success {
   454  		startID = errorID
   455  	}
   456  	req = &testpb.StreamingOutputCallRequest{Payload: idToPayload(startID)}
   457  	stream, err := tc.StreamingOutputCall(ctx, req)
   458  	if err != nil {
   459  		return req, resps, err
   460  	}
   461  	for {
   462  		var resp *testpb.StreamingOutputCallResponse
   463  		resp, err := stream.Recv()
   464  		if err == io.EOF {
   465  			return req, resps, nil
   466  		} else if err != nil {
   467  			return req, resps, err
   468  		}
   469  		resps = append(resps, resp)
   470  	}
   471  }
   472  
   473  type expectedData struct {
   474  	te *test
   475  	cc *rpcConfig
   476  
   477  	method    string
   478  	requests  []proto.Message
   479  	responses []proto.Message
   480  	err       error
   481  }
   482  
   483  func (ed *expectedData) newClientHeaderEntry(client bool, rpcID, inRPCID uint64) *pb.GrpcLogEntry {
   484  	logger := pb.GrpcLogEntry_LOGGER_CLIENT
   485  	var peer *pb.Address
   486  	if !client {
   487  		logger = pb.GrpcLogEntry_LOGGER_SERVER
   488  		ed.te.clientAddrMu.Lock()
   489  		peer = &pb.Address{
   490  			Address: ed.te.clientIP.String(),
   491  			IpPort:  uint32(ed.te.clientPort),
   492  		}
   493  		if ed.te.clientIP.To4() != nil {
   494  			peer.Type = pb.Address_TYPE_IPV4
   495  		} else {
   496  			peer.Type = pb.Address_TYPE_IPV6
   497  		}
   498  		ed.te.clientAddrMu.Unlock()
   499  	}
   500  	return &pb.GrpcLogEntry{
   501  		Timestamp:            nil,
   502  		CallId:               rpcID,
   503  		SequenceIdWithinCall: inRPCID,
   504  		Type:                 pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
   505  		Logger:               logger,
   506  		Payload: &pb.GrpcLogEntry_ClientHeader{
   507  			ClientHeader: &pb.ClientHeader{
   508  				Metadata:   iblog.MdToMetadataProto(testMetadata),
   509  				MethodName: ed.method,
   510  				Authority:  ed.te.srvAddr,
   511  			},
   512  		},
   513  		Peer: peer,
   514  	}
   515  }
   516  
   517  func (ed *expectedData) newServerHeaderEntry(client bool, rpcID, inRPCID uint64) *pb.GrpcLogEntry {
   518  	logger := pb.GrpcLogEntry_LOGGER_SERVER
   519  	var peer *pb.Address
   520  	if client {
   521  		logger = pb.GrpcLogEntry_LOGGER_CLIENT
   522  		peer = &pb.Address{
   523  			Address: ed.te.srvIP.String(),
   524  			IpPort:  uint32(ed.te.srvPort),
   525  		}
   526  		if ed.te.srvIP.To4() != nil {
   527  			peer.Type = pb.Address_TYPE_IPV4
   528  		} else {
   529  			peer.Type = pb.Address_TYPE_IPV6
   530  		}
   531  	}
   532  	return &pb.GrpcLogEntry{
   533  		Timestamp:            nil,
   534  		CallId:               rpcID,
   535  		SequenceIdWithinCall: inRPCID,
   536  		Type:                 pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
   537  		Logger:               logger,
   538  		Payload: &pb.GrpcLogEntry_ServerHeader{
   539  			ServerHeader: &pb.ServerHeader{
   540  				Metadata: iblog.MdToMetadataProto(testMetadata),
   541  			},
   542  		},
   543  		Peer: peer,
   544  	}
   545  }
   546  
   547  func (ed *expectedData) newClientMessageEntry(client bool, rpcID, inRPCID uint64, msg proto.Message) *pb.GrpcLogEntry {
   548  	logger := pb.GrpcLogEntry_LOGGER_CLIENT
   549  	if !client {
   550  		logger = pb.GrpcLogEntry_LOGGER_SERVER
   551  	}
   552  	data, err := proto.Marshal(msg)
   553  	if err != nil {
   554  		grpclogLogger.Infof("binarylogging_testing: failed to marshal proto message: %v", err)
   555  	}
   556  	return &pb.GrpcLogEntry{
   557  		Timestamp:            nil,
   558  		CallId:               rpcID,
   559  		SequenceIdWithinCall: inRPCID,
   560  		Type:                 pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
   561  		Logger:               logger,
   562  		Payload: &pb.GrpcLogEntry_Message{
   563  			Message: &pb.Message{
   564  				Length: uint32(len(data)),
   565  				Data:   data,
   566  			},
   567  		},
   568  	}
   569  }
   570  
   571  func (ed *expectedData) newServerMessageEntry(client bool, rpcID, inRPCID uint64, msg proto.Message) *pb.GrpcLogEntry {
   572  	logger := pb.GrpcLogEntry_LOGGER_CLIENT
   573  	if !client {
   574  		logger = pb.GrpcLogEntry_LOGGER_SERVER
   575  	}
   576  	data, err := proto.Marshal(msg)
   577  	if err != nil {
   578  		grpclogLogger.Infof("binarylogging_testing: failed to marshal proto message: %v", err)
   579  	}
   580  	return &pb.GrpcLogEntry{
   581  		Timestamp:            nil,
   582  		CallId:               rpcID,
   583  		SequenceIdWithinCall: inRPCID,
   584  		Type:                 pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
   585  		Logger:               logger,
   586  		Payload: &pb.GrpcLogEntry_Message{
   587  			Message: &pb.Message{
   588  				Length: uint32(len(data)),
   589  				Data:   data,
   590  			},
   591  		},
   592  	}
   593  }
   594  
   595  func (ed *expectedData) newHalfCloseEntry(client bool, rpcID, inRPCID uint64) *pb.GrpcLogEntry {
   596  	logger := pb.GrpcLogEntry_LOGGER_CLIENT
   597  	if !client {
   598  		logger = pb.GrpcLogEntry_LOGGER_SERVER
   599  	}
   600  	return &pb.GrpcLogEntry{
   601  		Timestamp:            nil,
   602  		CallId:               rpcID,
   603  		SequenceIdWithinCall: inRPCID,
   604  		Type:                 pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
   605  		Payload:              nil, // No payload here.
   606  		Logger:               logger,
   607  	}
   608  }
   609  
   610  func (ed *expectedData) newServerTrailerEntry(client bool, rpcID, inRPCID uint64, stErr error) *pb.GrpcLogEntry {
   611  	logger := pb.GrpcLogEntry_LOGGER_SERVER
   612  	var peer *pb.Address
   613  	if client {
   614  		logger = pb.GrpcLogEntry_LOGGER_CLIENT
   615  		peer = &pb.Address{
   616  			Address: ed.te.srvIP.String(),
   617  			IpPort:  uint32(ed.te.srvPort),
   618  		}
   619  		if ed.te.srvIP.To4() != nil {
   620  			peer.Type = pb.Address_TYPE_IPV4
   621  		} else {
   622  			peer.Type = pb.Address_TYPE_IPV6
   623  		}
   624  	}
   625  	st, ok := status.FromError(stErr)
   626  	if !ok {
   627  		grpclogLogger.Info("binarylogging: error in trailer is not a status error")
   628  	}
   629  	stProto := st.Proto()
   630  	var (
   631  		detailsBytes []byte
   632  		err          error
   633  	)
   634  	if stProto != nil && len(stProto.Details) != 0 {
   635  		detailsBytes, err = proto.Marshal(stProto)
   636  		if err != nil {
   637  			grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
   638  		}
   639  	}
   640  	return &pb.GrpcLogEntry{
   641  		Timestamp:            nil,
   642  		CallId:               rpcID,
   643  		SequenceIdWithinCall: inRPCID,
   644  		Type:                 pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
   645  		Logger:               logger,
   646  		Payload: &pb.GrpcLogEntry_Trailer{
   647  			Trailer: &pb.Trailer{
   648  				Metadata: iblog.MdToMetadataProto(testTrailerMetadata),
   649  				// st will be nil if err was not a status error, but nil is ok.
   650  				StatusCode:    uint32(st.Code()),
   651  				StatusMessage: st.Message(),
   652  				StatusDetails: detailsBytes,
   653  			},
   654  		},
   655  		Peer: peer,
   656  	}
   657  }
   658  
   659  func (ed *expectedData) newCancelEntry(rpcID, inRPCID uint64) *pb.GrpcLogEntry {
   660  	return &pb.GrpcLogEntry{
   661  		Timestamp:            nil,
   662  		CallId:               rpcID,
   663  		SequenceIdWithinCall: inRPCID,
   664  		Type:                 pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
   665  		Logger:               pb.GrpcLogEntry_LOGGER_CLIENT,
   666  		Payload:              nil,
   667  	}
   668  }
   669  
   670  func (ed *expectedData) toClientLogEntries() []*pb.GrpcLogEntry {
   671  	var (
   672  		ret     []*pb.GrpcLogEntry
   673  		idInRPC uint64 = 1
   674  	)
   675  	ret = append(ret, ed.newClientHeaderEntry(true, globalRPCID, idInRPC))
   676  	idInRPC++
   677  
   678  	switch ed.cc.callType {
   679  	case unaryRPC, fullDuplexStreamRPC:
   680  		for i := 0; i < len(ed.requests); i++ {
   681  			ret = append(ret, ed.newClientMessageEntry(true, globalRPCID, idInRPC, ed.requests[i]))
   682  			idInRPC++
   683  			if i == 0 {
   684  				// First message, append ServerHeader.
   685  				ret = append(ret, ed.newServerHeaderEntry(true, globalRPCID, idInRPC))
   686  				idInRPC++
   687  			}
   688  			if !ed.cc.success {
   689  				// There is no response in the RPC error case.
   690  				continue
   691  			}
   692  			ret = append(ret, ed.newServerMessageEntry(true, globalRPCID, idInRPC, ed.responses[i]))
   693  			idInRPC++
   694  		}
   695  		if ed.cc.success && ed.cc.callType == fullDuplexStreamRPC {
   696  			ret = append(ret, ed.newHalfCloseEntry(true, globalRPCID, idInRPC))
   697  			idInRPC++
   698  		}
   699  	case clientStreamRPC, serverStreamRPC:
   700  		for i := 0; i < len(ed.requests); i++ {
   701  			ret = append(ret, ed.newClientMessageEntry(true, globalRPCID, idInRPC, ed.requests[i]))
   702  			idInRPC++
   703  		}
   704  		if ed.cc.callType == clientStreamRPC {
   705  			ret = append(ret, ed.newHalfCloseEntry(true, globalRPCID, idInRPC))
   706  			idInRPC++
   707  		}
   708  		ret = append(ret, ed.newServerHeaderEntry(true, globalRPCID, idInRPC))
   709  		idInRPC++
   710  		if ed.cc.success {
   711  			for i := 0; i < len(ed.responses); i++ {
   712  				ret = append(ret, ed.newServerMessageEntry(true, globalRPCID, idInRPC, ed.responses[0]))
   713  				idInRPC++
   714  			}
   715  		}
   716  	}
   717  
   718  	if ed.cc.callType == cancelRPC {
   719  		ret = append(ret, ed.newCancelEntry(globalRPCID, idInRPC))
   720  		idInRPC++
   721  	} else {
   722  		ret = append(ret, ed.newServerTrailerEntry(true, globalRPCID, idInRPC, ed.err))
   723  		idInRPC++
   724  	}
   725  	return ret
   726  }
   727  
   728  func (ed *expectedData) toServerLogEntries() []*pb.GrpcLogEntry {
   729  	var (
   730  		ret     []*pb.GrpcLogEntry
   731  		idInRPC uint64 = 1
   732  	)
   733  	ret = append(ret, ed.newClientHeaderEntry(false, globalRPCID, idInRPC))
   734  	idInRPC++
   735  
   736  	switch ed.cc.callType {
   737  	case unaryRPC:
   738  		ret = append(ret, ed.newClientMessageEntry(false, globalRPCID, idInRPC, ed.requests[0]))
   739  		idInRPC++
   740  		ret = append(ret, ed.newServerHeaderEntry(false, globalRPCID, idInRPC))
   741  		idInRPC++
   742  		if ed.cc.success {
   743  			ret = append(ret, ed.newServerMessageEntry(false, globalRPCID, idInRPC, ed.responses[0]))
   744  			idInRPC++
   745  		}
   746  	case fullDuplexStreamRPC:
   747  		ret = append(ret, ed.newServerHeaderEntry(false, globalRPCID, idInRPC))
   748  		idInRPC++
   749  		for i := 0; i < len(ed.requests); i++ {
   750  			ret = append(ret, ed.newClientMessageEntry(false, globalRPCID, idInRPC, ed.requests[i]))
   751  			idInRPC++
   752  			if !ed.cc.success {
   753  				// There is no response in the RPC error case.
   754  				continue
   755  			}
   756  			ret = append(ret, ed.newServerMessageEntry(false, globalRPCID, idInRPC, ed.responses[i]))
   757  			idInRPC++
   758  		}
   759  
   760  		if ed.cc.success && ed.cc.callType == fullDuplexStreamRPC {
   761  			ret = append(ret, ed.newHalfCloseEntry(false, globalRPCID, idInRPC))
   762  			idInRPC++
   763  		}
   764  	case clientStreamRPC:
   765  		ret = append(ret, ed.newServerHeaderEntry(false, globalRPCID, idInRPC))
   766  		idInRPC++
   767  		for i := 0; i < len(ed.requests); i++ {
   768  			ret = append(ret, ed.newClientMessageEntry(false, globalRPCID, idInRPC, ed.requests[i]))
   769  			idInRPC++
   770  		}
   771  		if ed.cc.success {
   772  			ret = append(ret, ed.newHalfCloseEntry(false, globalRPCID, idInRPC))
   773  			idInRPC++
   774  			ret = append(ret, ed.newServerMessageEntry(false, globalRPCID, idInRPC, ed.responses[0]))
   775  			idInRPC++
   776  		}
   777  	case serverStreamRPC:
   778  		ret = append(ret, ed.newClientMessageEntry(false, globalRPCID, idInRPC, ed.requests[0]))
   779  		idInRPC++
   780  		ret = append(ret, ed.newServerHeaderEntry(false, globalRPCID, idInRPC))
   781  		idInRPC++
   782  		for i := 0; i < len(ed.responses); i++ {
   783  			ret = append(ret, ed.newServerMessageEntry(false, globalRPCID, idInRPC, ed.responses[0]))
   784  			idInRPC++
   785  		}
   786  	}
   787  
   788  	ret = append(ret, ed.newServerTrailerEntry(false, globalRPCID, idInRPC, ed.err))
   789  	idInRPC++
   790  
   791  	return ret
   792  }
   793  
   794  func runRPCs(t *testing.T, cc *rpcConfig) *expectedData {
   795  	te := newTest(t)
   796  	te.startServer(&testServer{te: te})
   797  	defer te.tearDown()
   798  
   799  	expect := &expectedData{
   800  		te: te,
   801  		cc: cc,
   802  	}
   803  
   804  	switch cc.callType {
   805  	case unaryRPC:
   806  		expect.method = "/grpc.testing.TestService/UnaryCall"
   807  		req, resp, err := te.doUnaryCall(cc)
   808  		expect.requests = []proto.Message{req}
   809  		expect.responses = []proto.Message{resp}
   810  		expect.err = err
   811  	case clientStreamRPC:
   812  		expect.method = "/grpc.testing.TestService/StreamingInputCall"
   813  		reqs, resp, err := te.doClientStreamCall(cc)
   814  		expect.requests = reqs
   815  		expect.responses = []proto.Message{resp}
   816  		expect.err = err
   817  	case serverStreamRPC:
   818  		expect.method = "/grpc.testing.TestService/StreamingOutputCall"
   819  		req, resps, err := te.doServerStreamCall(cc)
   820  		expect.responses = resps
   821  		expect.requests = []proto.Message{req}
   822  		expect.err = err
   823  	case fullDuplexStreamRPC, cancelRPC:
   824  		expect.method = "/grpc.testing.TestService/FullDuplexCall"
   825  		expect.requests, expect.responses, expect.err = te.doFullDuplexCallRoundtrip(cc)
   826  	}
   827  	if cc.success != (expect.err == nil) {
   828  		t.Fatalf("cc.success: %v, got error: %v", cc.success, expect.err)
   829  	}
   830  	te.cc.Close()
   831  	te.srv.GracefulStop() // Wait for the server to stop.
   832  
   833  	return expect
   834  }
   835  
   836  // equalLogEntry sorts the metadata entries by key (to compare metadata).
   837  //
   838  // This function is typically called with only two entries. It's written in this
   839  // way so the code can be put in a for loop instead of copied twice.
   840  func equalLogEntry(entries ...*pb.GrpcLogEntry) (equal bool) {
   841  	for i, e := range entries {
   842  		// Clear out some fields we don't compare.
   843  		e.Timestamp = nil
   844  		e.CallId = 0 // CallID is global to the binary, hard to compare.
   845  		if h := e.GetClientHeader(); h != nil {
   846  			h.Timeout = nil
   847  			tmp := append(h.Metadata.Entry[:0], h.Metadata.Entry...)
   848  			h.Metadata.Entry = tmp
   849  			sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key })
   850  		}
   851  		if h := e.GetServerHeader(); h != nil {
   852  			tmp := append(h.Metadata.Entry[:0], h.Metadata.Entry...)
   853  			h.Metadata.Entry = tmp
   854  			sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key })
   855  		}
   856  		if h := e.GetTrailer(); h != nil {
   857  			sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key })
   858  		}
   859  
   860  		if i > 0 && !proto.Equal(e, entries[i-1]) {
   861  			return false
   862  		}
   863  	}
   864  	return true
   865  }
   866  
   867  func testClientBinaryLog(t *testing.T, c *rpcConfig) error {
   868  	defer testSink.clear()
   869  	expect := runRPCs(t, c)
   870  	want := expect.toClientLogEntries()
   871  	var got []*pb.GrpcLogEntry
   872  	// In racy cases, some entries are not logged when the RPC is finished (e.g.
   873  	// context.Cancel).
   874  	//
   875  	// Check 10 times, with a sleep of 1/100 seconds between each check. Makes
   876  	// it an 1-second wait in total.
   877  	for i := 0; i < 10; i++ {
   878  		got = testSink.logEntries(true) // all client entries.
   879  		if len(want) == len(got) {
   880  			break
   881  		}
   882  		time.Sleep(100 * time.Millisecond)
   883  	}
   884  	if len(want) != len(got) {
   885  		for i, e := range want {
   886  			t.Errorf("in want: %d, %s", i, e.GetType())
   887  		}
   888  		for i, e := range got {
   889  			t.Errorf("in got: %d, %s", i, e.GetType())
   890  		}
   891  		return fmt.Errorf("didn't get same amount of log entries, want: %d, got: %d", len(want), len(got))
   892  	}
   893  	var errored bool
   894  	for i := 0; i < len(got); i++ {
   895  		if !equalLogEntry(want[i], got[i]) {
   896  			t.Errorf("entry: %d, want %+v, got %+v", i, want[i], got[i])
   897  			errored = true
   898  		}
   899  	}
   900  	if errored {
   901  		return fmt.Errorf("test failed")
   902  	}
   903  	return nil
   904  }
   905  
   906  func (s) TestClientBinaryLogUnaryRPC(t *testing.T) {
   907  	if err := testClientBinaryLog(t, &rpcConfig{success: true, callType: unaryRPC}); err != nil {
   908  		t.Fatal(err)
   909  	}
   910  }
   911  
   912  func (s) TestClientBinaryLogUnaryRPCError(t *testing.T) {
   913  	if err := testClientBinaryLog(t, &rpcConfig{success: false, callType: unaryRPC}); err != nil {
   914  		t.Fatal(err)
   915  	}
   916  }
   917  
   918  func (s) TestClientBinaryLogClientStreamRPC(t *testing.T) {
   919  	count := 5
   920  	if err := testClientBinaryLog(t, &rpcConfig{count: count, success: true, callType: clientStreamRPC}); err != nil {
   921  		t.Fatal(err)
   922  	}
   923  }
   924  
   925  func (s) TestClientBinaryLogClientStreamRPCError(t *testing.T) {
   926  	count := 1
   927  	if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: clientStreamRPC}); err != nil {
   928  		t.Fatal(err)
   929  	}
   930  }
   931  
   932  func (s) TestClientBinaryLogServerStreamRPC(t *testing.T) {
   933  	count := 5
   934  	if err := testClientBinaryLog(t, &rpcConfig{count: count, success: true, callType: serverStreamRPC}); err != nil {
   935  		t.Fatal(err)
   936  	}
   937  }
   938  
   939  func (s) TestClientBinaryLogServerStreamRPCError(t *testing.T) {
   940  	count := 5
   941  	if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: serverStreamRPC}); err != nil {
   942  		t.Fatal(err)
   943  	}
   944  }
   945  
   946  func (s) TestClientBinaryLogFullDuplexRPC(t *testing.T) {
   947  	count := 5
   948  	if err := testClientBinaryLog(t, &rpcConfig{count: count, success: true, callType: fullDuplexStreamRPC}); err != nil {
   949  		t.Fatal(err)
   950  	}
   951  }
   952  
   953  func (s) TestClientBinaryLogFullDuplexRPCError(t *testing.T) {
   954  	count := 5
   955  	if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: fullDuplexStreamRPC}); err != nil {
   956  		t.Fatal(err)
   957  	}
   958  }
   959  
   960  func (s) TestClientBinaryLogCancel(t *testing.T) {
   961  	count := 5
   962  	if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: cancelRPC}); err != nil {
   963  		t.Fatal(err)
   964  	}
   965  }
   966  
   967  func testServerBinaryLog(t *testing.T, c *rpcConfig) error {
   968  	defer testSink.clear()
   969  	expect := runRPCs(t, c)
   970  	want := expect.toServerLogEntries()
   971  	var got []*pb.GrpcLogEntry
   972  	// In racy cases, some entries are not logged when the RPC is finished (e.g.
   973  	// context.Cancel). This is unlikely to happen on server side, but it does
   974  	// no harm to retry.
   975  	//
   976  	// Check 10 times, with a sleep of 1/100 seconds between each check. Makes
   977  	// it an 1-second wait in total.
   978  	for i := 0; i < 10; i++ {
   979  		got = testSink.logEntries(false) // all server entries.
   980  		if len(want) == len(got) {
   981  			break
   982  		}
   983  		time.Sleep(100 * time.Millisecond)
   984  	}
   985  
   986  	if len(want) != len(got) {
   987  		for i, e := range want {
   988  			t.Errorf("in want: %d, %s", i, e.GetType())
   989  		}
   990  		for i, e := range got {
   991  			t.Errorf("in got: %d, %s", i, e.GetType())
   992  		}
   993  		return fmt.Errorf("didn't get same amount of log entries, want: %d, got: %d", len(want), len(got))
   994  	}
   995  	var errored bool
   996  	for i := 0; i < len(got); i++ {
   997  		if !equalLogEntry(want[i], got[i]) {
   998  			t.Errorf("entry: %d, want %+v, got %+v", i, want[i], got[i])
   999  			errored = true
  1000  		}
  1001  	}
  1002  	if errored {
  1003  		return fmt.Errorf("test failed")
  1004  	}
  1005  	return nil
  1006  }
  1007  
  1008  func (s) TestServerBinaryLogUnaryRPC(t *testing.T) {
  1009  	if err := testServerBinaryLog(t, &rpcConfig{success: true, callType: unaryRPC}); err != nil {
  1010  		t.Fatal(err)
  1011  	}
  1012  }
  1013  
  1014  func (s) TestServerBinaryLogUnaryRPCError(t *testing.T) {
  1015  	if err := testServerBinaryLog(t, &rpcConfig{success: false, callType: unaryRPC}); err != nil {
  1016  		t.Fatal(err)
  1017  	}
  1018  }
  1019  
  1020  func (s) TestServerBinaryLogClientStreamRPC(t *testing.T) {
  1021  	count := 5
  1022  	if err := testServerBinaryLog(t, &rpcConfig{count: count, success: true, callType: clientStreamRPC}); err != nil {
  1023  		t.Fatal(err)
  1024  	}
  1025  }
  1026  
  1027  func (s) TestServerBinaryLogClientStreamRPCError(t *testing.T) {
  1028  	count := 1
  1029  	if err := testServerBinaryLog(t, &rpcConfig{count: count, success: false, callType: clientStreamRPC}); err != nil {
  1030  		t.Fatal(err)
  1031  	}
  1032  }
  1033  
  1034  func (s) TestServerBinaryLogServerStreamRPC(t *testing.T) {
  1035  	count := 5
  1036  	if err := testServerBinaryLog(t, &rpcConfig{count: count, success: true, callType: serverStreamRPC}); err != nil {
  1037  		t.Fatal(err)
  1038  	}
  1039  }
  1040  
  1041  func (s) TestServerBinaryLogServerStreamRPCError(t *testing.T) {
  1042  	count := 5
  1043  	if err := testServerBinaryLog(t, &rpcConfig{count: count, success: false, callType: serverStreamRPC}); err != nil {
  1044  		t.Fatal(err)
  1045  	}
  1046  }
  1047  
  1048  func (s) TestServerBinaryLogFullDuplex(t *testing.T) {
  1049  	count := 5
  1050  	if err := testServerBinaryLog(t, &rpcConfig{count: count, success: true, callType: fullDuplexStreamRPC}); err != nil {
  1051  		t.Fatal(err)
  1052  	}
  1053  }
  1054  
  1055  func (s) TestServerBinaryLogFullDuplexError(t *testing.T) {
  1056  	count := 5
  1057  	if err := testServerBinaryLog(t, &rpcConfig{count: count, success: false, callType: fullDuplexStreamRPC}); err != nil {
  1058  		t.Fatal(err)
  1059  	}
  1060  }