gitee.com/ks-custle/core-gm@v0.0.0-20230922171213-b83bdd97b62c/grpc/internal/transport/keepalive_test.go (about) 1 /* 2 * 3 * Copyright 2019 gRPC authors. 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 * 17 */ 18 19 // This file contains tests related to the following proposals: 20 // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md 21 // https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md 22 // https://github.com/grpc/proposal/blob/master/A18-tcp-user-timeout.md 23 package transport 24 25 import ( 26 "context" 27 "fmt" 28 "io" 29 "net" 30 "testing" 31 "time" 32 33 "gitee.com/ks-custle/core-gm/grpc/internal/syscall" 34 "gitee.com/ks-custle/core-gm/grpc/keepalive" 35 "gitee.com/ks-custle/core-gm/net/http2" 36 ) 37 38 const defaultTestTimeout = 10 * time.Second 39 40 // TestMaxConnectionIdle tests that a server will send GoAway to an idle 41 // client. An idle client is one who doesn't make any RPC calls for a duration 42 // of MaxConnectionIdle time. 43 func (s) TestMaxConnectionIdle(t *testing.T) { 44 serverConfig := &ServerConfig{ 45 KeepaliveParams: keepalive.ServerParameters{ 46 MaxConnectionIdle: 2 * time.Second, 47 }, 48 } 49 server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) 50 defer func() { 51 client.Close(fmt.Errorf("closed manually by test")) 52 server.stop() 53 cancel() 54 }() 55 56 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 57 defer cancel() 58 stream, err := client.NewStream(ctx, &CallHdr{}) 59 if err != nil { 60 t.Fatalf("client.NewStream() failed: %v", err) 61 } 62 client.CloseStream(stream, io.EOF) 63 64 // Wait for the server's MaxConnectionIdle timeout to kick in, and for it 65 // to send a GoAway. 66 timeout := time.NewTimer(time.Second * 4) 67 select { 68 case <-client.Error(): 69 if !timeout.Stop() { 70 <-timeout.C 71 } 72 if reason, _ := client.GetGoAwayReason(); reason != GoAwayNoReason { 73 t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) 74 } 75 case <-timeout.C: 76 t.Fatalf("MaxConnectionIdle timeout expired, expected a GoAway from the server.") 77 } 78 } 79 80 // TestMaxConenctionIdleBusyClient tests that a server will not send GoAway to 81 // a busy client. 82 func (s) TestMaxConnectionIdleBusyClient(t *testing.T) { 83 serverConfig := &ServerConfig{ 84 KeepaliveParams: keepalive.ServerParameters{ 85 MaxConnectionIdle: 2 * time.Second, 86 }, 87 } 88 server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) 89 defer func() { 90 client.Close(fmt.Errorf("closed manually by test")) 91 server.stop() 92 cancel() 93 }() 94 95 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 96 defer cancel() 97 _, err := client.NewStream(ctx, &CallHdr{}) 98 if err != nil { 99 t.Fatalf("client.NewStream() failed: %v", err) 100 } 101 102 // Wait for double the MaxConnectionIdle time to make sure the server does 103 // not send a GoAway, as the client has an open stream. 104 timeout := time.NewTimer(time.Second * 4) 105 select { 106 case <-client.GoAway(): 107 if !timeout.Stop() { 108 <-timeout.C 109 } 110 t.Fatalf("A non-idle client received a GoAway.") 111 case <-timeout.C: 112 } 113 } 114 115 // TestMaxConnectionAge tests that a server will send GoAway after a duration 116 // of MaxConnectionAge. 117 func (s) TestMaxConnectionAge(t *testing.T) { 118 serverConfig := &ServerConfig{ 119 KeepaliveParams: keepalive.ServerParameters{ 120 MaxConnectionAge: 1 * time.Second, 121 MaxConnectionAgeGrace: 1 * time.Second, 122 }, 123 } 124 server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) 125 defer func() { 126 client.Close(fmt.Errorf("closed manually by test")) 127 server.stop() 128 cancel() 129 }() 130 131 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 132 defer cancel() 133 _, err := client.NewStream(ctx, &CallHdr{}) 134 if err != nil { 135 t.Fatalf("client.NewStream() failed: %v", err) 136 } 137 138 // Wait for the server's MaxConnectionAge timeout to kick in, and for it 139 // to send a GoAway. 140 timeout := time.NewTimer(4 * time.Second) 141 select { 142 case <-client.Error(): 143 if !timeout.Stop() { 144 <-timeout.C 145 } 146 if reason, _ := client.GetGoAwayReason(); reason != GoAwayNoReason { 147 t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) 148 } 149 case <-timeout.C: 150 t.Fatalf("MaxConnectionAge timeout expired, expected a GoAway from the server.") 151 } 152 } 153 154 const ( 155 defaultWriteBufSize = 32 * 1024 156 defaultReadBufSize = 32 * 1024 157 ) 158 159 // TestKeepaliveServerClosesUnresponsiveClient tests that a server closes 160 // the connection with a client that doesn't respond to keepalive pings. 161 // 162 // This test creates a regular net.Conn connection to the server and sends the 163 // clientPreface and the initial Settings frame, and then remains unresponsive. 164 func (s) TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) { 165 serverConfig := &ServerConfig{ 166 KeepaliveParams: keepalive.ServerParameters{ 167 Time: 1 * time.Second, 168 Timeout: 1 * time.Second, 169 }, 170 } 171 server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) 172 defer func() { 173 client.Close(fmt.Errorf("closed manually by test")) 174 server.stop() 175 cancel() 176 }() 177 178 addr := server.addr() 179 conn, err := net.Dial("tcp", addr) 180 if err != nil { 181 t.Fatalf("net.Dial(tcp, %v) failed: %v", addr, err) 182 } 183 defer conn.Close() 184 185 if n, err := conn.Write(clientPreface); err != nil || n != len(clientPreface) { 186 t.Fatalf("conn.Write(clientPreface) failed: n=%v, err=%v", n, err) 187 } 188 framer := newFramer(conn, defaultWriteBufSize, defaultReadBufSize, 0) 189 if err := framer.fr.WriteSettings(http2.Setting{}); err != nil { 190 t.Fatal("framer.WriteSettings(http2.Setting{}) failed:", err) 191 } 192 framer.writer.Flush() 193 194 // We read from the net.Conn till we get an error, which is expected when 195 // the server closes the connection as part of the keepalive logic. 196 errCh := make(chan error, 1) 197 go func() { 198 b := make([]byte, 24) 199 for { 200 if _, err = conn.Read(b); err != nil { 201 errCh <- err 202 return 203 } 204 } 205 }() 206 207 // Server waits for KeepaliveParams.Time seconds before sending out a ping, 208 // and then waits for KeepaliveParams.Timeout for a ping ack. 209 timeout := time.NewTimer(4 * time.Second) 210 select { 211 case err := <-errCh: 212 if err != io.EOF { 213 t.Fatalf("client.Read(_) = _,%v, want io.EOF", err) 214 215 } 216 case <-timeout.C: 217 t.Fatalf("keepalive timeout expired, server should have closed the connection.") 218 } 219 } 220 221 // TestKeepaliveServerWithResponsiveClient tests that a server doesn't close 222 // the connection with a client that responds to keepalive pings. 223 func (s) TestKeepaliveServerWithResponsiveClient(t *testing.T) { 224 serverConfig := &ServerConfig{ 225 KeepaliveParams: keepalive.ServerParameters{ 226 Time: 1 * time.Second, 227 Timeout: 1 * time.Second, 228 }, 229 } 230 server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) 231 defer func() { 232 client.Close(fmt.Errorf("closed manually by test")) 233 server.stop() 234 cancel() 235 }() 236 237 // Give keepalive logic some time by sleeping. 238 time.Sleep(4 * time.Second) 239 240 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 241 defer cancel() 242 // Make sure the client transport is healthy. 243 if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { 244 t.Fatalf("client.NewStream() failed: %v", err) 245 } 246 } 247 248 // TestKeepaliveClientClosesUnresponsiveServer creates a server which does not 249 // respond to keepalive pings, and makes sure that the client closes the 250 // transport once the keepalive logic kicks in. Here, we set the 251 // `PermitWithoutStream` parameter to true which ensures that the keepalive 252 // logic is running even without any active streams. 253 func (s) TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) { 254 connCh := make(chan net.Conn, 1) 255 client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ 256 Time: 1 * time.Second, 257 Timeout: 1 * time.Second, 258 PermitWithoutStream: true, 259 }}, connCh) 260 defer cancel() 261 defer client.Close(fmt.Errorf("closed manually by test")) 262 263 conn, ok := <-connCh 264 if !ok { 265 t.Fatalf("Server didn't return connection object") 266 } 267 defer conn.Close() 268 269 // Sleep for keepalive to close the connection. 270 time.Sleep(4 * time.Second) 271 272 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 273 defer cancel() 274 // Make sure the client transport is not healthy. 275 if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { 276 t.Fatal("client.NewStream() should have failed, but succeeded") 277 } 278 } 279 280 // TestKeepaliveClientOpenWithUnresponsiveServer creates a server which does 281 // not respond to keepalive pings, and makes sure that the client does not 282 // close the transport. Here, we do not set the `PermitWithoutStream` parameter 283 // to true which ensures that the keepalive logic is turned off without any 284 // active streams, and therefore the transport stays open. 285 func (s) TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) { 286 connCh := make(chan net.Conn, 1) 287 client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ 288 Time: 1 * time.Second, 289 Timeout: 1 * time.Second, 290 }}, connCh) 291 defer cancel() 292 defer client.Close(fmt.Errorf("closed manually by test")) 293 294 conn, ok := <-connCh 295 if !ok { 296 t.Fatalf("Server didn't return connection object") 297 } 298 defer conn.Close() 299 300 // Give keepalive some time. 301 time.Sleep(4 * time.Second) 302 303 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 304 defer cancel() 305 // Make sure the client transport is healthy. 306 if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { 307 t.Fatalf("client.NewStream() failed: %v", err) 308 } 309 } 310 311 // TestKeepaliveClientClosesWithActiveStreams creates a server which does not 312 // respond to keepalive pings, and makes sure that the client closes the 313 // transport even when there is an active stream. 314 func (s) TestKeepaliveClientClosesWithActiveStreams(t *testing.T) { 315 connCh := make(chan net.Conn, 1) 316 client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ 317 Time: 1 * time.Second, 318 Timeout: 1 * time.Second, 319 }}, connCh) 320 defer cancel() 321 defer client.Close(fmt.Errorf("closed manually by test")) 322 323 conn, ok := <-connCh 324 if !ok { 325 t.Fatalf("Server didn't return connection object") 326 } 327 defer conn.Close() 328 329 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 330 defer cancel() 331 // Create a stream, but send no data on it. 332 if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { 333 t.Fatalf("client.NewStream() failed: %v", err) 334 } 335 336 // Give keepalive some time. 337 time.Sleep(4 * time.Second) 338 339 // Make sure the client transport is not healthy. 340 if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { 341 t.Fatal("client.NewStream() should have failed, but succeeded") 342 } 343 } 344 345 // TestKeepaliveClientStaysHealthyWithResponsiveServer creates a server which 346 // responds to keepalive pings, and makes sure than a client transport stays 347 // healthy without any active streams. 348 func (s) TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) { 349 server, client, cancel := setUpWithOptions(t, 0, 350 &ServerConfig{ 351 KeepalivePolicy: keepalive.EnforcementPolicy{ 352 PermitWithoutStream: true, 353 }, 354 }, 355 normal, 356 ConnectOptions{ 357 KeepaliveParams: keepalive.ClientParameters{ 358 Time: 1 * time.Second, 359 Timeout: 1 * time.Second, 360 PermitWithoutStream: true, 361 }}) 362 defer func() { 363 client.Close(fmt.Errorf("closed manually by test")) 364 server.stop() 365 cancel() 366 }() 367 368 // Give keepalive some time. 369 time.Sleep(4 * time.Second) 370 371 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 372 defer cancel() 373 // Make sure the client transport is healthy. 374 if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { 375 t.Fatalf("client.NewStream() failed: %v", err) 376 } 377 } 378 379 // TestKeepaliveClientFrequency creates a server which expects at most 1 client 380 // ping for every 1.2 seconds, while the client is configured to send a ping 381 // every 1 second. So, this configuration should end up with the client 382 // transport being closed. But we had a bug wherein the client was sending one 383 // ping every [Time+Timeout] instead of every [Time] period, and this test 384 // explicitly makes sure the fix works and the client sends a ping every [Time] 385 // period. 386 func (s) TestKeepaliveClientFrequency(t *testing.T) { 387 serverConfig := &ServerConfig{ 388 KeepalivePolicy: keepalive.EnforcementPolicy{ 389 MinTime: 1200 * time.Millisecond, // 1.2 seconds 390 PermitWithoutStream: true, 391 }, 392 } 393 clientOptions := ConnectOptions{ 394 KeepaliveParams: keepalive.ClientParameters{ 395 Time: 1 * time.Second, 396 Timeout: 2 * time.Second, 397 PermitWithoutStream: true, 398 }, 399 } 400 server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) 401 defer func() { 402 client.Close(fmt.Errorf("closed manually by test")) 403 server.stop() 404 cancel() 405 }() 406 407 timeout := time.NewTimer(6 * time.Second) 408 select { 409 case <-client.Error(): 410 if !timeout.Stop() { 411 <-timeout.C 412 } 413 if reason, _ := client.GetGoAwayReason(); reason != GoAwayTooManyPings { 414 t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) 415 } 416 case <-timeout.C: 417 t.Fatalf("client transport still healthy; expected GoAway from the server.") 418 } 419 420 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 421 defer cancel() 422 // Make sure the client transport is not healthy. 423 if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { 424 t.Fatal("client.NewStream() should have failed, but succeeded") 425 } 426 } 427 428 // TestKeepaliveServerEnforcementWithAbusiveClientNoRPC verifies that the 429 // server closes a client transport when it sends too many keepalive pings 430 // (when there are no active streams), based on the configured 431 // EnforcementPolicy. 432 func (s) TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) { 433 serverConfig := &ServerConfig{ 434 KeepalivePolicy: keepalive.EnforcementPolicy{ 435 MinTime: 2 * time.Second, 436 }, 437 } 438 clientOptions := ConnectOptions{ 439 KeepaliveParams: keepalive.ClientParameters{ 440 Time: 50 * time.Millisecond, 441 Timeout: 1 * time.Second, 442 PermitWithoutStream: true, 443 }, 444 } 445 server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) 446 defer func() { 447 client.Close(fmt.Errorf("closed manually by test")) 448 server.stop() 449 cancel() 450 }() 451 452 timeout := time.NewTimer(4 * time.Second) 453 select { 454 case <-client.Error(): 455 if !timeout.Stop() { 456 <-timeout.C 457 } 458 if reason, _ := client.GetGoAwayReason(); reason != GoAwayTooManyPings { 459 t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) 460 } 461 case <-timeout.C: 462 t.Fatalf("client transport still healthy; expected GoAway from the server.") 463 } 464 465 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 466 defer cancel() 467 // Make sure the client transport is not healthy. 468 if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { 469 t.Fatal("client.NewStream() should have failed, but succeeded") 470 } 471 } 472 473 // TestKeepaliveServerEnforcementWithAbusiveClientWithRPC verifies that the 474 // server closes a client transport when it sends too many keepalive pings 475 // (even when there is an active stream), based on the configured 476 // EnforcementPolicy. 477 func (s) TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { 478 serverConfig := &ServerConfig{ 479 KeepalivePolicy: keepalive.EnforcementPolicy{ 480 MinTime: 2 * time.Second, 481 }, 482 } 483 clientOptions := ConnectOptions{ 484 KeepaliveParams: keepalive.ClientParameters{ 485 Time: 50 * time.Millisecond, 486 Timeout: 1 * time.Second, 487 }, 488 } 489 server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) 490 defer func() { 491 client.Close(fmt.Errorf("closed manually by test")) 492 server.stop() 493 cancel() 494 }() 495 496 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 497 defer cancel() 498 if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { 499 t.Fatalf("client.NewStream() failed: %v", err) 500 } 501 502 timeout := time.NewTimer(4 * time.Second) 503 select { 504 case <-client.Error(): 505 if !timeout.Stop() { 506 <-timeout.C 507 } 508 if reason, _ := client.GetGoAwayReason(); reason != GoAwayTooManyPings { 509 t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) 510 } 511 case <-timeout.C: 512 t.Fatalf("client transport still healthy; expected GoAway from the server.") 513 } 514 515 // Make sure the client transport is not healthy. 516 if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { 517 t.Fatal("client.NewStream() should have failed, but succeeded") 518 } 519 } 520 521 // TestKeepaliveServerEnforcementWithObeyingClientNoRPC verifies that the 522 // server does not close a client transport (with no active streams) which 523 // sends keepalive pings in accordance to the configured keepalive 524 // EnforcementPolicy. 525 func (s) TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) { 526 serverConfig := &ServerConfig{ 527 KeepalivePolicy: keepalive.EnforcementPolicy{ 528 MinTime: 100 * time.Millisecond, 529 PermitWithoutStream: true, 530 }, 531 } 532 clientOptions := ConnectOptions{ 533 KeepaliveParams: keepalive.ClientParameters{ 534 Time: 101 * time.Millisecond, 535 Timeout: 1 * time.Second, 536 PermitWithoutStream: true, 537 }, 538 } 539 server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) 540 defer func() { 541 client.Close(fmt.Errorf("closed manually by test")) 542 server.stop() 543 cancel() 544 }() 545 546 // Give keepalive enough time. 547 time.Sleep(3 * time.Second) 548 549 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 550 defer cancel() 551 // Make sure the client transport is healthy. 552 if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { 553 t.Fatalf("client.NewStream() failed: %v", err) 554 } 555 } 556 557 // TestKeepaliveServerEnforcementWithObeyingClientWithRPC verifies that the 558 // server does not close a client transport (with active streams) which 559 // sends keepalive pings in accordance to the configured keepalive 560 // EnforcementPolicy. 561 func (s) TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) { 562 serverConfig := &ServerConfig{ 563 KeepalivePolicy: keepalive.EnforcementPolicy{ 564 MinTime: 100 * time.Millisecond, 565 }, 566 } 567 clientOptions := ConnectOptions{ 568 KeepaliveParams: keepalive.ClientParameters{ 569 Time: 101 * time.Millisecond, 570 Timeout: 1 * time.Second, 571 }, 572 } 573 server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) 574 defer func() { 575 client.Close(fmt.Errorf("closed manually by test")) 576 server.stop() 577 cancel() 578 }() 579 580 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 581 defer cancel() 582 if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { 583 t.Fatalf("client.NewStream() failed: %v", err) 584 } 585 586 // Give keepalive enough time. 587 time.Sleep(3 * time.Second) 588 589 // Make sure the client transport is healthy. 590 if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { 591 t.Fatalf("client.NewStream() failed: %v", err) 592 } 593 } 594 595 // TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient verifies that the 596 // server does not closes a client transport, which has been configured to send 597 // more pings than allowed by the server's EnforcementPolicy. This client 598 // transport does not have any active streams and `PermitWithoutStream` is set 599 // to false. This should ensure that the keepalive functionality on the client 600 // side enters a dormant state. 601 func (s) TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T) { 602 serverConfig := &ServerConfig{ 603 KeepalivePolicy: keepalive.EnforcementPolicy{ 604 MinTime: 2 * time.Second, 605 }, 606 } 607 clientOptions := ConnectOptions{ 608 KeepaliveParams: keepalive.ClientParameters{ 609 Time: 50 * time.Millisecond, 610 Timeout: 1 * time.Second, 611 }, 612 } 613 server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) 614 defer func() { 615 client.Close(fmt.Errorf("closed manually by test")) 616 server.stop() 617 cancel() 618 }() 619 620 // No active streams on the client. Give keepalive enough time. 621 time.Sleep(5 * time.Second) 622 623 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 624 defer cancel() 625 // Make sure the client transport is healthy. 626 if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { 627 t.Fatalf("client.NewStream() failed: %v", err) 628 } 629 } 630 631 // TestTCPUserTimeout tests that the TCP_USER_TIMEOUT socket option is set to 632 // the keepalive timeout, as detailed in proposal A18. 633 func (s) TestTCPUserTimeout(t *testing.T) { 634 tests := []struct { 635 time time.Duration 636 timeout time.Duration 637 wantTimeout time.Duration 638 }{ 639 { 640 10 * time.Second, 641 10 * time.Second, 642 10 * 1000 * time.Millisecond, 643 }, 644 { 645 0, 646 0, 647 0, 648 }, 649 } 650 for _, tt := range tests { 651 server, client, cancel := setUpWithOptions( 652 t, 653 0, 654 &ServerConfig{ 655 KeepaliveParams: keepalive.ServerParameters{ 656 Time: tt.timeout, 657 Timeout: tt.timeout, 658 }, 659 }, 660 normal, 661 ConnectOptions{ 662 KeepaliveParams: keepalive.ClientParameters{ 663 Time: tt.time, 664 Timeout: tt.timeout, 665 }, 666 }, 667 ) 668 defer func() { 669 client.Close(fmt.Errorf("closed manually by test")) 670 server.stop() 671 cancel() 672 }() 673 674 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) 675 defer cancel() 676 stream, err := client.NewStream(ctx, &CallHdr{}) 677 if err != nil { 678 t.Fatalf("client.NewStream() failed: %v", err) 679 } 680 client.CloseStream(stream, io.EOF) 681 682 opt, err := syscall.GetTCPUserTimeout(client.conn) 683 if err != nil { 684 t.Fatalf("syscall.GetTCPUserTimeout() failed: %v", err) 685 } 686 if opt < 0 { 687 t.Skipf("skipping test on unsupported environment") 688 } 689 if gotTimeout := time.Duration(opt) * time.Millisecond; gotTimeout != tt.wantTimeout { 690 t.Fatalf("syscall.GetTCPUserTimeout() = %d, want %d", gotTimeout, tt.wantTimeout) 691 } 692 } 693 }