github.com/stackdocker/rkt@v0.10.1-0.20151109095037-1aa827478248/Godeps/_workspace/src/google.golang.org/grpc/test/end2end_test.go (about) 1 /* 2 * 3 * Copyright 2014, Google Inc. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following disclaimer 14 * in the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Google Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 */ 33 34 package grpc_test 35 36 import ( 37 "fmt" 38 "io" 39 "math" 40 "net" 41 "reflect" 42 "runtime" 43 "sync" 44 "syscall" 45 "testing" 46 "time" 47 48 "github.com/coreos/rkt/Godeps/_workspace/src/github.com/golang/protobuf/proto" 49 "github.com/coreos/rkt/Godeps/_workspace/src/golang.org/x/net/context" 50 "github.com/coreos/rkt/Godeps/_workspace/src/google.golang.org/grpc" 51 "github.com/coreos/rkt/Godeps/_workspace/src/google.golang.org/grpc/codes" 52 "github.com/coreos/rkt/Godeps/_workspace/src/google.golang.org/grpc/credentials" 53 "github.com/coreos/rkt/Godeps/_workspace/src/google.golang.org/grpc/grpclog" 54 "github.com/coreos/rkt/Godeps/_workspace/src/google.golang.org/grpc/health" 55 healthpb "github.com/coreos/rkt/Godeps/_workspace/src/google.golang.org/grpc/health/grpc_health_v1alpha" 56 "github.com/coreos/rkt/Godeps/_workspace/src/google.golang.org/grpc/metadata" 57 testpb "github.com/coreos/rkt/Godeps/_workspace/src/google.golang.org/grpc/test/grpc_testing" 58 ) 59 60 var ( 61 testMetadata = metadata.MD{ 62 "key1": []string{"value1"}, 63 "key2": []string{"value2"}, 64 } 65 testAppUA = "myApp1/1.0 myApp2/0.9" 66 ) 67 68 type testServer struct { 69 security string // indicate the authentication protocol used by this server. 70 } 71 72 func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { 73 if md, ok := metadata.FromContext(ctx); ok { 74 // For testing purpose, returns an error if there is attached metadata other than 75 // the user agent set by the client application. 76 if _, ok := md["user-agent"]; !ok { 77 return nil, grpc.Errorf(codes.DataLoss, "got extra metadata") 78 } 79 var str []string 80 for _, entry := range md["user-agent"] { 81 str = append(str, "ua", entry) 82 } 83 grpc.SendHeader(ctx, metadata.Pairs(str...)) 84 } 85 return new(testpb.Empty), nil 86 } 87 88 func newPayload(t testpb.PayloadType, size int32) *testpb.Payload { 89 if size < 0 { 90 grpclog.Fatalf("Requested a response with invalid length %d", size) 91 } 92 body := make([]byte, size) 93 switch t { 94 case testpb.PayloadType_COMPRESSABLE: 95 case testpb.PayloadType_UNCOMPRESSABLE: 96 grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") 97 default: 98 grpclog.Fatalf("Unsupported payload type: %d", t) 99 } 100 return &testpb.Payload{ 101 Type: t.Enum(), 102 Body: body, 103 } 104 } 105 106 func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { 107 md, ok := metadata.FromContext(ctx) 108 if ok { 109 if err := grpc.SendHeader(ctx, md); err != nil { 110 grpclog.Fatalf("grpc.SendHeader(%v, %v) = %v, want %v", ctx, md, err, nil) 111 } 112 grpc.SetTrailer(ctx, md) 113 } 114 if s.security != "" { 115 // Check Auth info 116 authInfo, ok := credentials.FromContext(ctx) 117 if !ok { 118 grpclog.Fatalf("Failed to get AuthInfo from ctx.") 119 } 120 var authType string 121 switch info := authInfo.(type) { 122 case credentials.TLSInfo: 123 authType = info.AuthType() 124 default: 125 grpclog.Fatalf("Unknown AuthInfo type") 126 } 127 if authType != s.security { 128 grpclog.Fatalf("Wrong auth type: got %q, want %q", authType, s.security) 129 } 130 } 131 132 // Simulate some service delay. 133 time.Sleep(time.Second) 134 return &testpb.SimpleResponse{ 135 Payload: newPayload(in.GetResponseType(), in.GetResponseSize()), 136 }, nil 137 } 138 139 func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { 140 if md, ok := metadata.FromContext(stream.Context()); ok { 141 // For testing purpose, returns an error if there is attached metadata. 142 if len(md) > 0 { 143 return grpc.Errorf(codes.DataLoss, "got extra metadata") 144 } 145 } 146 cs := args.GetResponseParameters() 147 for _, c := range cs { 148 if us := c.GetIntervalUs(); us > 0 { 149 time.Sleep(time.Duration(us) * time.Microsecond) 150 } 151 if err := stream.Send(&testpb.StreamingOutputCallResponse{ 152 Payload: newPayload(args.GetResponseType(), c.GetSize()), 153 }); err != nil { 154 return err 155 } 156 } 157 return nil 158 } 159 160 func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { 161 var sum int 162 for { 163 in, err := stream.Recv() 164 if err == io.EOF { 165 return stream.SendAndClose(&testpb.StreamingInputCallResponse{ 166 AggregatedPayloadSize: proto.Int32(int32(sum)), 167 }) 168 } 169 if err != nil { 170 return err 171 } 172 p := in.GetPayload().GetBody() 173 sum += len(p) 174 } 175 } 176 177 func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { 178 md, ok := metadata.FromContext(stream.Context()) 179 if ok { 180 if err := stream.SendHeader(md); err != nil { 181 grpclog.Fatalf("%v.SendHeader(%v) = %v, want %v", stream, md, err, nil) 182 } 183 stream.SetTrailer(md) 184 } 185 for { 186 in, err := stream.Recv() 187 if err == io.EOF { 188 // read done. 189 return nil 190 } 191 if err != nil { 192 return err 193 } 194 cs := in.GetResponseParameters() 195 for _, c := range cs { 196 if us := c.GetIntervalUs(); us > 0 { 197 time.Sleep(time.Duration(us) * time.Microsecond) 198 } 199 if err := stream.Send(&testpb.StreamingOutputCallResponse{ 200 Payload: newPayload(in.GetResponseType(), c.GetSize()), 201 }); err != nil { 202 return err 203 } 204 } 205 } 206 } 207 208 func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { 209 var msgBuf []*testpb.StreamingOutputCallRequest 210 for { 211 in, err := stream.Recv() 212 if err == io.EOF { 213 // read done. 214 break 215 } 216 if err != nil { 217 return err 218 } 219 msgBuf = append(msgBuf, in) 220 } 221 for _, m := range msgBuf { 222 cs := m.GetResponseParameters() 223 for _, c := range cs { 224 if us := c.GetIntervalUs(); us > 0 { 225 time.Sleep(time.Duration(us) * time.Microsecond) 226 } 227 if err := stream.Send(&testpb.StreamingOutputCallResponse{ 228 Payload: newPayload(m.GetResponseType(), c.GetSize()), 229 }); err != nil { 230 return err 231 } 232 } 233 } 234 return nil 235 } 236 237 const tlsDir = "testdata/" 238 239 func TestDialTimeout(t *testing.T) { 240 conn, err := grpc.Dial("Non-Existent.Server:80", grpc.WithTimeout(time.Millisecond), grpc.WithBlock(), grpc.WithInsecure()) 241 if err == nil { 242 conn.Close() 243 } 244 if err != grpc.ErrClientConnTimeout { 245 t.Fatalf("grpc.Dial(_, _) = %v, %v, want %v", conn, err, grpc.ErrClientConnTimeout) 246 } 247 } 248 249 func TestTLSDialTimeout(t *testing.T) { 250 creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") 251 if err != nil { 252 t.Fatalf("Failed to create credentials %v", err) 253 } 254 conn, err := grpc.Dial("Non-Existent.Server:80", grpc.WithTransportCredentials(creds), grpc.WithTimeout(time.Millisecond), grpc.WithBlock()) 255 if err == nil { 256 conn.Close() 257 } 258 if err != grpc.ErrClientConnTimeout { 259 t.Fatalf("grpc.Dial(_, _) = %v, %v, want %v", conn, err, grpc.ErrClientConnTimeout) 260 } 261 } 262 263 func TestCredentialsMisuse(t *testing.T) { 264 creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") 265 if err != nil { 266 t.Fatalf("Failed to create credentials %v", err) 267 } 268 // Two conflicting credential configurations 269 if _, err := grpc.Dial("Non-Existent.Server:80", grpc.WithTransportCredentials(creds), grpc.WithTimeout(time.Millisecond), grpc.WithBlock(), grpc.WithInsecure()); err != grpc.ErrCredentialsMisuse { 270 t.Fatalf("grpc.Dial(_, _) = _, %v, want _, %v", err, grpc.ErrCredentialsMisuse) 271 } 272 // security info on insecure connection 273 if _, err := grpc.Dial("Non-Existent.Server:80", grpc.WithPerRPCCredentials(creds), grpc.WithTimeout(time.Millisecond), grpc.WithBlock(), grpc.WithInsecure()); err != grpc.ErrCredentialsMisuse { 274 t.Fatalf("grpc.Dial(_, _) = _, %v, want _, %v", err, grpc.ErrCredentialsMisuse) 275 } 276 } 277 278 func TestReconnectTimeout(t *testing.T) { 279 lis, err := net.Listen("tcp", ":0") 280 if err != nil { 281 t.Fatalf("Failed to listen: %v", err) 282 } 283 _, port, err := net.SplitHostPort(lis.Addr().String()) 284 if err != nil { 285 t.Fatalf("Failed to parse listener address: %v", err) 286 } 287 addr := "localhost:" + port 288 conn, err := grpc.Dial(addr, grpc.WithTimeout(5*time.Second), grpc.WithBlock(), grpc.WithInsecure()) 289 if err != nil { 290 t.Fatalf("Failed to dial to the server %q: %v", addr, err) 291 } 292 // Close unaccepted connection (i.e., conn). 293 lis.Close() 294 tc := testpb.NewTestServiceClient(conn) 295 waitC := make(chan struct{}) 296 go func() { 297 defer close(waitC) 298 argSize := 271828 299 respSize := 314159 300 req := &testpb.SimpleRequest{ 301 ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), 302 ResponseSize: proto.Int32(int32(respSize)), 303 Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), 304 } 305 if _, err := tc.UnaryCall(context.Background(), req); err == nil { 306 t.Fatalf("TestService/UnaryCall(_, _) = _, <nil>, want _, non-nil") 307 } 308 }() 309 // Block untill reconnect times out. 310 <-waitC 311 if err := conn.Close(); err != grpc.ErrClientConnClosing { 312 t.Fatalf("%v.Close() = %v, want %v", conn, err, grpc.ErrClientConnClosing) 313 } 314 } 315 316 func unixDialer(addr string, timeout time.Duration) (net.Conn, error) { 317 return net.DialTimeout("unix", addr, timeout) 318 } 319 320 type env struct { 321 network string // The type of network such as tcp, unix, etc. 322 dialer func(addr string, timeout time.Duration) (net.Conn, error) 323 security string // The security protocol such as TLS, SSH, etc. 324 } 325 326 func listTestEnv() []env { 327 if runtime.GOOS == "windows" { 328 return []env{env{"tcp", nil, ""}, env{"tcp", nil, "tls"}} 329 } 330 return []env{env{"tcp", nil, ""}, env{"tcp", nil, "tls"}, env{"unix", unixDialer, ""}, env{"unix", unixDialer, "tls"}} 331 } 332 333 func setUp(hs *health.HealthServer, maxStream uint32, ua string, e env) (s *grpc.Server, cc *grpc.ClientConn) { 334 sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(maxStream)} 335 la := ":0" 336 switch e.network { 337 case "unix": 338 la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now()) 339 syscall.Unlink(la) 340 } 341 lis, err := net.Listen(e.network, la) 342 if err != nil { 343 grpclog.Fatalf("Failed to listen: %v", err) 344 } 345 if e.security == "tls" { 346 creds, err := credentials.NewServerTLSFromFile(tlsDir+"server1.pem", tlsDir+"server1.key") 347 if err != nil { 348 grpclog.Fatalf("Failed to generate credentials %v", err) 349 } 350 sopts = append(sopts, grpc.Creds(creds)) 351 } 352 s = grpc.NewServer(sopts...) 353 if hs != nil { 354 healthpb.RegisterHealthCheckServer(s, hs) 355 } 356 testpb.RegisterTestServiceServer(s, &testServer{security: e.security}) 357 go s.Serve(lis) 358 addr := la 359 switch e.network { 360 case "unix": 361 default: 362 _, port, err := net.SplitHostPort(lis.Addr().String()) 363 if err != nil { 364 grpclog.Fatalf("Failed to parse listener address: %v", err) 365 } 366 addr = "localhost:" + port 367 } 368 if e.security == "tls" { 369 creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") 370 if err != nil { 371 grpclog.Fatalf("Failed to create credentials %v", err) 372 } 373 cc, err = grpc.Dial(addr, grpc.WithTransportCredentials(creds), grpc.WithDialer(e.dialer), grpc.WithUserAgent(ua)) 374 } else { 375 cc, err = grpc.Dial(addr, grpc.WithDialer(e.dialer), grpc.WithInsecure(), grpc.WithUserAgent(ua)) 376 } 377 if err != nil { 378 grpclog.Fatalf("Dial(%q) = %v", addr, err) 379 } 380 return 381 } 382 383 func tearDown(s *grpc.Server, cc *grpc.ClientConn) { 384 cc.Close() 385 s.Stop() 386 } 387 388 func TestTimeoutOnDeadServer(t *testing.T) { 389 for _, e := range listTestEnv() { 390 testTimeoutOnDeadServer(t, e) 391 } 392 } 393 394 func testTimeoutOnDeadServer(t *testing.T, e env) { 395 s, cc := setUp(nil, math.MaxUint32, "", e) 396 tc := testpb.NewTestServiceClient(cc) 397 if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok { 398 t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok) 399 } 400 if ok := cc.WaitForStateChange(time.Second, grpc.Connecting); !ok { 401 t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok) 402 } 403 if cc.State() != grpc.Ready { 404 t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready) 405 } 406 if ok := cc.WaitForStateChange(time.Millisecond, grpc.Ready); ok { 407 t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok) 408 } 409 s.Stop() 410 // Set -1 as the timeout to make sure if transportMonitor gets error 411 // notification in time the failure path of the 1st invoke of 412 // ClientConn.wait hits the deadline exceeded error. 413 ctx, _ := context.WithTimeout(context.Background(), -1) 414 if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { 415 t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded) 416 } 417 if ok := cc.WaitForStateChange(time.Second, grpc.Ready); !ok { 418 t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok) 419 } 420 state := cc.State() 421 if state != grpc.Connecting && state != grpc.TransientFailure { 422 t.Fatalf("cc.State() = %s, want %s or %s", state, grpc.Connecting, grpc.TransientFailure) 423 } 424 cc.Close() 425 } 426 427 func healthCheck(t time.Duration, cc *grpc.ClientConn, serviceName string) (*healthpb.HealthCheckResponse, error) { 428 ctx, _ := context.WithTimeout(context.Background(), t) 429 hc := healthpb.NewHealthCheckClient(cc) 430 req := &healthpb.HealthCheckRequest{ 431 Service: serviceName, 432 } 433 return hc.Check(ctx, req) 434 } 435 436 func TestHealthCheckOnSuccess(t *testing.T) { 437 for _, e := range listTestEnv() { 438 testHealthCheckOnSuccess(t, e) 439 } 440 } 441 442 func testHealthCheckOnSuccess(t *testing.T, e env) { 443 hs := health.NewHealthServer() 444 hs.SetServingStatus("grpc.health.v1alpha.HealthCheck", 1) 445 s, cc := setUp(hs, math.MaxUint32, "", e) 446 defer tearDown(s, cc) 447 if _, err := healthCheck(1*time.Second, cc, "grpc.health.v1alpha.HealthCheck"); err != nil { 448 t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, <nil>", err) 449 } 450 } 451 452 func TestHealthCheckOnFailure(t *testing.T) { 453 for _, e := range listTestEnv() { 454 testHealthCheckOnFailure(t, e) 455 } 456 } 457 458 func testHealthCheckOnFailure(t *testing.T, e env) { 459 hs := health.NewHealthServer() 460 hs.SetServingStatus("grpc.health.v1alpha.HealthCheck", 1) 461 s, cc := setUp(hs, math.MaxUint32, "", e) 462 defer tearDown(s, cc) 463 if _, err := healthCheck(0*time.Second, cc, "grpc.health.v1alpha.HealthCheck"); err != grpc.Errorf(codes.DeadlineExceeded, "context deadline exceeded") { 464 t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, error code %d", err, codes.DeadlineExceeded) 465 } 466 } 467 468 func TestHealthCheckOff(t *testing.T) { 469 for _, e := range listTestEnv() { 470 testHealthCheckOff(t, e) 471 } 472 } 473 474 func testHealthCheckOff(t *testing.T, e env) { 475 s, cc := setUp(nil, math.MaxUint32, "", e) 476 defer tearDown(s, cc) 477 if _, err := healthCheck(1*time.Second, cc, ""); err != grpc.Errorf(codes.Unimplemented, "unknown service grpc.health.v1alpha.HealthCheck") { 478 t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, error code %d", err, codes.Unimplemented) 479 } 480 } 481 482 func TestHealthCheckServingStatus(t *testing.T) { 483 for _, e := range listTestEnv() { 484 testHealthCheckServingStatus(t, e) 485 } 486 } 487 488 func testHealthCheckServingStatus(t *testing.T, e env) { 489 hs := health.NewHealthServer() 490 s, cc := setUp(hs, math.MaxUint32, "", e) 491 defer tearDown(s, cc) 492 out, err := healthCheck(1*time.Second, cc, "") 493 if err != nil { 494 t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, <nil>", err) 495 } 496 if out.Status != healthpb.HealthCheckResponse_SERVING { 497 t.Fatalf("Got the serving status %v, want SERVING", out.Status) 498 } 499 if _, err := healthCheck(1*time.Second, cc, "grpc.health.v1alpha.HealthCheck"); err != grpc.Errorf(codes.NotFound, "unknown service") { 500 t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, error code %d", err, codes.NotFound) 501 } 502 hs.SetServingStatus("grpc.health.v1alpha.HealthCheck", healthpb.HealthCheckResponse_SERVING) 503 out, err = healthCheck(1*time.Second, cc, "grpc.health.v1alpha.HealthCheck") 504 if err != nil { 505 t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, <nil>", err) 506 } 507 if out.Status != healthpb.HealthCheckResponse_SERVING { 508 t.Fatalf("Got the serving status %v, want SERVING", out.Status) 509 } 510 hs.SetServingStatus("grpc.health.v1alpha.HealthCheck", healthpb.HealthCheckResponse_NOT_SERVING) 511 out, err = healthCheck(1*time.Second, cc, "grpc.health.v1alpha.HealthCheck") 512 if err != nil { 513 t.Fatalf("HealthCheck/Check(_, _) = _, %v, want _, <nil>", err) 514 } 515 if out.Status != healthpb.HealthCheckResponse_NOT_SERVING { 516 t.Fatalf("Got the serving status %v, want NOT_SERVING", out.Status) 517 } 518 519 } 520 521 func TestEmptyUnaryWithUserAgent(t *testing.T) { 522 for _, e := range listTestEnv() { 523 testEmptyUnaryWithUserAgent(t, e) 524 } 525 } 526 527 func testEmptyUnaryWithUserAgent(t *testing.T, e env) { 528 s, cc := setUp(nil, math.MaxUint32, testAppUA, e) 529 // Wait until cc is connected. 530 if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok { 531 t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok) 532 } 533 if ok := cc.WaitForStateChange(10*time.Second, grpc.Connecting); !ok { 534 t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok) 535 } 536 if cc.State() != grpc.Ready { 537 t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready) 538 } 539 if ok := cc.WaitForStateChange(time.Second, grpc.Ready); ok { 540 t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok) 541 } 542 tc := testpb.NewTestServiceClient(cc) 543 var header metadata.MD 544 reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Header(&header)) 545 if err != nil || !proto.Equal(&testpb.Empty{}, reply) { 546 t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{}) 547 } 548 if v, ok := header["ua"]; !ok || v[0] != testAppUA { 549 t.Fatalf("header[\"ua\"] = %q, %t, want %q, true", v, ok, testAppUA) 550 } 551 tearDown(s, cc) 552 if ok := cc.WaitForStateChange(5*time.Second, grpc.Ready); !ok { 553 t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok) 554 } 555 if cc.State() != grpc.Shutdown { 556 t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Shutdown) 557 } 558 } 559 560 func TestFailedEmptyUnary(t *testing.T) { 561 for _, e := range listTestEnv() { 562 testFailedEmptyUnary(t, e) 563 } 564 } 565 566 func testFailedEmptyUnary(t *testing.T, e env) { 567 s, cc := setUp(nil, math.MaxUint32, "", e) 568 tc := testpb.NewTestServiceClient(cc) 569 defer tearDown(s, cc) 570 ctx := metadata.NewContext(context.Background(), testMetadata) 571 if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { 572 t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, grpc.Errorf(codes.DataLoss, "got extra metadata")) 573 } 574 } 575 576 func TestLargeUnary(t *testing.T) { 577 for _, e := range listTestEnv() { 578 testLargeUnary(t, e) 579 } 580 } 581 582 func testLargeUnary(t *testing.T, e env) { 583 s, cc := setUp(nil, math.MaxUint32, "", e) 584 tc := testpb.NewTestServiceClient(cc) 585 defer tearDown(s, cc) 586 argSize := 271828 587 respSize := 314159 588 req := &testpb.SimpleRequest{ 589 ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), 590 ResponseSize: proto.Int32(int32(respSize)), 591 Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), 592 } 593 reply, err := tc.UnaryCall(context.Background(), req) 594 if err != nil { 595 t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err) 596 } 597 pt := reply.GetPayload().GetType() 598 ps := len(reply.GetPayload().GetBody()) 599 if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { 600 t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) 601 } 602 } 603 604 func TestMetadataUnaryRPC(t *testing.T) { 605 for _, e := range listTestEnv() { 606 testMetadataUnaryRPC(t, e) 607 } 608 } 609 610 func testMetadataUnaryRPC(t *testing.T, e env) { 611 s, cc := setUp(nil, math.MaxUint32, "", e) 612 tc := testpb.NewTestServiceClient(cc) 613 defer tearDown(s, cc) 614 argSize := 2718 615 respSize := 314 616 req := &testpb.SimpleRequest{ 617 ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), 618 ResponseSize: proto.Int32(int32(respSize)), 619 Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), 620 } 621 var header, trailer metadata.MD 622 ctx := metadata.NewContext(context.Background(), testMetadata) 623 _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)) 624 if err != nil { 625 t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err) 626 } 627 if !reflect.DeepEqual(testMetadata, header) { 628 t.Fatalf("Received header metadata %v, want %v", header, testMetadata) 629 } 630 if !reflect.DeepEqual(testMetadata, trailer) { 631 t.Fatalf("Received trailer metadata %v, want %v", trailer, testMetadata) 632 } 633 } 634 635 func performOneRPC(t *testing.T, tc testpb.TestServiceClient, wg *sync.WaitGroup) { 636 argSize := 2718 637 respSize := 314 638 req := &testpb.SimpleRequest{ 639 ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), 640 ResponseSize: proto.Int32(int32(respSize)), 641 Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), 642 } 643 reply, err := tc.UnaryCall(context.Background(), req) 644 if err != nil { 645 t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, <nil>", err) 646 } 647 pt := reply.GetPayload().GetType() 648 ps := len(reply.GetPayload().GetBody()) 649 if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { 650 t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) 651 } 652 wg.Done() 653 } 654 655 func TestRetry(t *testing.T) { 656 for _, e := range listTestEnv() { 657 testRetry(t, e) 658 } 659 } 660 661 // This test mimics a user who sends 1000 RPCs concurrently on a faulty transport. 662 // TODO(zhaoq): Refactor to make this clearer and add more cases to test racy 663 // and error-prone paths. 664 func testRetry(t *testing.T, e env) { 665 s, cc := setUp(nil, math.MaxUint32, "", e) 666 tc := testpb.NewTestServiceClient(cc) 667 defer tearDown(s, cc) 668 var wg sync.WaitGroup 669 wg.Add(1) 670 go func() { 671 time.Sleep(1 * time.Second) 672 // The server shuts down the network connection to make a 673 // transport error which will be detected by the client side 674 // code. 675 s.TestingCloseConns() 676 wg.Done() 677 }() 678 // All these RPCs should succeed eventually. 679 for i := 0; i < 1000; i++ { 680 time.Sleep(2 * time.Millisecond) 681 wg.Add(1) 682 go performOneRPC(t, tc, &wg) 683 } 684 wg.Wait() 685 } 686 687 func TestRPCTimeout(t *testing.T) { 688 for _, e := range listTestEnv() { 689 testRPCTimeout(t, e) 690 } 691 } 692 693 // TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism. 694 func testRPCTimeout(t *testing.T, e env) { 695 s, cc := setUp(nil, math.MaxUint32, "", e) 696 tc := testpb.NewTestServiceClient(cc) 697 defer tearDown(s, cc) 698 argSize := 2718 699 respSize := 314 700 req := &testpb.SimpleRequest{ 701 ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), 702 ResponseSize: proto.Int32(int32(respSize)), 703 Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), 704 } 705 for i := -1; i <= 10; i++ { 706 ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond) 707 reply, err := tc.UnaryCall(ctx, req) 708 if grpc.Code(err) != codes.DeadlineExceeded { 709 t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.DeadlineExceeded) 710 } 711 } 712 } 713 714 func TestCancel(t *testing.T) { 715 for _, e := range listTestEnv() { 716 testCancel(t, e) 717 } 718 } 719 720 func testCancel(t *testing.T, e env) { 721 s, cc := setUp(nil, math.MaxUint32, "", e) 722 tc := testpb.NewTestServiceClient(cc) 723 defer tearDown(s, cc) 724 argSize := 2718 725 respSize := 314 726 req := &testpb.SimpleRequest{ 727 ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), 728 ResponseSize: proto.Int32(int32(respSize)), 729 Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), 730 } 731 ctx, cancel := context.WithCancel(context.Background()) 732 time.AfterFunc(1*time.Millisecond, cancel) 733 reply, err := tc.UnaryCall(ctx, req) 734 if grpc.Code(err) != codes.Canceled { 735 t.Fatalf(`TestService/UnaryCall(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.Canceled) 736 } 737 } 738 739 // The following tests the gRPC streaming RPC implementations. 740 // TODO(zhaoq): Have better coverage on error cases. 741 var ( 742 reqSizes = []int{27182, 8, 1828, 45904} 743 respSizes = []int{31415, 9, 2653, 58979} 744 ) 745 746 func TestPingPong(t *testing.T) { 747 for _, e := range listTestEnv() { 748 testPingPong(t, e) 749 } 750 } 751 752 func testPingPong(t *testing.T, e env) { 753 s, cc := setUp(nil, math.MaxUint32, "", e) 754 tc := testpb.NewTestServiceClient(cc) 755 defer tearDown(s, cc) 756 stream, err := tc.FullDuplexCall(context.Background()) 757 if err != nil { 758 t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err) 759 } 760 var index int 761 for index < len(reqSizes) { 762 respParam := []*testpb.ResponseParameters{ 763 { 764 Size: proto.Int32(int32(respSizes[index])), 765 }, 766 } 767 req := &testpb.StreamingOutputCallRequest{ 768 ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), 769 ResponseParameters: respParam, 770 Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])), 771 } 772 if err := stream.Send(req); err != nil { 773 t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err) 774 } 775 reply, err := stream.Recv() 776 if err != nil { 777 t.Fatalf("%v.Recv() = %v, want <nil>", stream, err) 778 } 779 pt := reply.GetPayload().GetType() 780 if pt != testpb.PayloadType_COMPRESSABLE { 781 t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) 782 } 783 size := len(reply.GetPayload().GetBody()) 784 if size != int(respSizes[index]) { 785 t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) 786 } 787 index++ 788 } 789 if err := stream.CloseSend(); err != nil { 790 t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) 791 } 792 if _, err := stream.Recv(); err != io.EOF { 793 t.Fatalf("%v failed to complele the ping pong test: %v", stream, err) 794 } 795 } 796 797 func TestMetadataStreamingRPC(t *testing.T) { 798 for _, e := range listTestEnv() { 799 testMetadataStreamingRPC(t, e) 800 } 801 } 802 803 func testMetadataStreamingRPC(t *testing.T, e env) { 804 s, cc := setUp(nil, math.MaxUint32, "", e) 805 tc := testpb.NewTestServiceClient(cc) 806 defer tearDown(s, cc) 807 ctx := metadata.NewContext(context.Background(), testMetadata) 808 stream, err := tc.FullDuplexCall(ctx) 809 if err != nil { 810 t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err) 811 } 812 go func() { 813 headerMD, err := stream.Header() 814 if e.security == "tls" { 815 delete(headerMD, "transport_security_type") 816 } 817 if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { 818 t.Errorf("#1 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata) 819 } 820 // test the cached value. 821 headerMD, err = stream.Header() 822 if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { 823 t.Errorf("#2 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata) 824 } 825 var index int 826 for index < len(reqSizes) { 827 respParam := []*testpb.ResponseParameters{ 828 { 829 Size: proto.Int32(int32(respSizes[index])), 830 }, 831 } 832 req := &testpb.StreamingOutputCallRequest{ 833 ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), 834 ResponseParameters: respParam, 835 Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])), 836 } 837 if err := stream.Send(req); err != nil { 838 t.Errorf("%v.Send(%v) = %v, want <nil>", stream, req, err) 839 return 840 } 841 index++ 842 } 843 // Tell the server we're done sending args. 844 stream.CloseSend() 845 }() 846 for { 847 if _, err := stream.Recv(); err != nil { 848 break 849 } 850 } 851 trailerMD := stream.Trailer() 852 if !reflect.DeepEqual(testMetadata, trailerMD) { 853 t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testMetadata) 854 } 855 } 856 857 func TestServerStreaming(t *testing.T) { 858 for _, e := range listTestEnv() { 859 testServerStreaming(t, e) 860 } 861 } 862 863 func testServerStreaming(t *testing.T, e env) { 864 s, cc := setUp(nil, math.MaxUint32, "", e) 865 tc := testpb.NewTestServiceClient(cc) 866 defer tearDown(s, cc) 867 respParam := make([]*testpb.ResponseParameters, len(respSizes)) 868 for i, s := range respSizes { 869 respParam[i] = &testpb.ResponseParameters{ 870 Size: proto.Int32(int32(s)), 871 } 872 } 873 req := &testpb.StreamingOutputCallRequest{ 874 ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), 875 ResponseParameters: respParam, 876 } 877 stream, err := tc.StreamingOutputCall(context.Background(), req) 878 if err != nil { 879 t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err) 880 } 881 var rpcStatus error 882 var respCnt int 883 var index int 884 for { 885 reply, err := stream.Recv() 886 if err != nil { 887 rpcStatus = err 888 break 889 } 890 pt := reply.GetPayload().GetType() 891 if pt != testpb.PayloadType_COMPRESSABLE { 892 t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) 893 } 894 size := len(reply.GetPayload().GetBody()) 895 if size != int(respSizes[index]) { 896 t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) 897 } 898 index++ 899 respCnt++ 900 } 901 if rpcStatus != io.EOF { 902 t.Fatalf("Failed to finish the server streaming rpc: %v, want <EOF>", rpcStatus) 903 } 904 if respCnt != len(respSizes) { 905 t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) 906 } 907 } 908 909 func TestFailedServerStreaming(t *testing.T) { 910 for _, e := range listTestEnv() { 911 testFailedServerStreaming(t, e) 912 } 913 } 914 915 func testFailedServerStreaming(t *testing.T, e env) { 916 s, cc := setUp(nil, math.MaxUint32, "", e) 917 tc := testpb.NewTestServiceClient(cc) 918 defer tearDown(s, cc) 919 respParam := make([]*testpb.ResponseParameters, len(respSizes)) 920 for i, s := range respSizes { 921 respParam[i] = &testpb.ResponseParameters{ 922 Size: proto.Int32(int32(s)), 923 } 924 } 925 req := &testpb.StreamingOutputCallRequest{ 926 ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), 927 ResponseParameters: respParam, 928 } 929 ctx := metadata.NewContext(context.Background(), testMetadata) 930 stream, err := tc.StreamingOutputCall(ctx, req) 931 if err != nil { 932 t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want <nil>", tc, err) 933 } 934 if _, err := stream.Recv(); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { 935 t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, grpc.Errorf(codes.DataLoss, "got extra metadata")) 936 } 937 } 938 939 func TestClientStreaming(t *testing.T) { 940 for _, e := range listTestEnv() { 941 testClientStreaming(t, e) 942 } 943 } 944 945 func testClientStreaming(t *testing.T, e env) { 946 s, cc := setUp(nil, math.MaxUint32, "", e) 947 tc := testpb.NewTestServiceClient(cc) 948 defer tearDown(s, cc) 949 stream, err := tc.StreamingInputCall(context.Background()) 950 if err != nil { 951 t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err) 952 } 953 var sum int 954 for _, s := range reqSizes { 955 pl := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s)) 956 req := &testpb.StreamingInputCallRequest{ 957 Payload: pl, 958 } 959 if err := stream.Send(req); err != nil { 960 t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err) 961 } 962 sum += s 963 } 964 reply, err := stream.CloseAndRecv() 965 if err != nil { 966 t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) 967 } 968 if reply.GetAggregatedPayloadSize() != int32(sum) { 969 t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) 970 } 971 } 972 973 func TestExceedMaxStreamsLimit(t *testing.T) { 974 for _, e := range listTestEnv() { 975 testExceedMaxStreamsLimit(t, e) 976 } 977 } 978 979 func testExceedMaxStreamsLimit(t *testing.T, e env) { 980 // Only allows 1 live stream per server transport. 981 s, cc := setUp(nil, 1, "", e) 982 tc := testpb.NewTestServiceClient(cc) 983 defer tearDown(s, cc) 984 done := make(chan struct{}) 985 ch := make(chan int) 986 go func() { 987 for { 988 select { 989 case <-time.After(5 * time.Millisecond): 990 ch <- 0 991 case <-time.After(5 * time.Second): 992 close(done) 993 return 994 } 995 } 996 }() 997 // Loop until a stream creation hangs due to the new max stream setting. 998 for { 999 select { 1000 case <-ch: 1001 ctx, _ := context.WithTimeout(context.Background(), time.Second) 1002 if _, err := tc.StreamingInputCall(ctx); err != nil { 1003 if grpc.Code(err) == codes.DeadlineExceeded { 1004 return 1005 } 1006 t.Fatalf("%v.StreamingInputCall(_) = %v, want <nil>", tc, err) 1007 } 1008 case <-done: 1009 t.Fatalf("Client has not received the max stream setting in 5 seconds.") 1010 } 1011 } 1012 }