github.com/zoomfoo/nomad@v0.8.5-0.20180907175415-f28fd3a1a056/nomad/client_fs_endpoint_test.go (about) 1 package nomad 2 3 import ( 4 "fmt" 5 "io" 6 "net" 7 "strings" 8 "testing" 9 "time" 10 11 msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" 12 "github.com/hashicorp/nomad/acl" 13 "github.com/hashicorp/nomad/client" 14 "github.com/hashicorp/nomad/client/config" 15 cstructs "github.com/hashicorp/nomad/client/structs" 16 "github.com/hashicorp/nomad/helper/uuid" 17 "github.com/hashicorp/nomad/nomad/mock" 18 "github.com/hashicorp/nomad/nomad/structs" 19 "github.com/hashicorp/nomad/testutil" 20 "github.com/stretchr/testify/require" 21 codec "github.com/ugorji/go/codec" 22 ) 23 24 func TestClientFS_List_Local(t *testing.T) { 25 t.Parallel() 26 require := require.New(t) 27 28 // Start a server and client 29 s := TestServer(t, nil) 30 defer s.Shutdown() 31 codec := rpcClient(t, s) 32 testutil.WaitForLeader(t, s.RPC) 33 34 c := client.TestClient(t, func(c *config.Config) { 35 c.Servers = []string{s.config.RPCAddr.String()} 36 }) 37 defer c.Shutdown() 38 39 // Force an allocation onto the node 40 a := mock.Alloc() 41 a.Job.Type = structs.JobTypeBatch 42 a.NodeID = c.NodeID() 43 a.Job.TaskGroups[0].Count = 1 44 a.Job.TaskGroups[0].Tasks[0] = &structs.Task{ 45 Name: "web", 46 Driver: "mock_driver", 47 Config: map[string]interface{}{ 48 "run_for": "2s", 49 }, 50 LogConfig: structs.DefaultLogConfig(), 51 Resources: &structs.Resources{ 52 CPU: 500, 53 MemoryMB: 256, 54 }, 55 } 56 57 // Wait for the client to connect 58 testutil.WaitForResult(func() (bool, error) { 59 nodes := s.connectedNodes() 60 return len(nodes) == 1, nil 61 }, func(err error) { 62 t.Fatalf("should have a clients") 63 }) 64 65 // Upsert the allocation 66 state := s.State() 67 require.Nil(state.UpsertJob(999, a.Job)) 68 require.Nil(state.UpsertAllocs(1003, []*structs.Allocation{a})) 69 70 // Wait for the client to run the allocation 71 testutil.WaitForResult(func() (bool, error) { 72 alloc, err := state.AllocByID(nil, a.ID) 73 if err != nil { 74 return false, err 75 } 76 if alloc == nil { 77 return false, fmt.Errorf("unknown alloc") 78 } 79 if alloc.ClientStatus != structs.AllocClientStatusComplete { 80 return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus) 81 } 82 83 return true, nil 84 }, func(err error) { 85 t.Fatalf("Alloc on node %q not finished: %v", c.NodeID(), err) 86 }) 87 88 // Make the request without having a node-id 89 req := &cstructs.FsListRequest{ 90 Path: "/", 91 QueryOptions: structs.QueryOptions{Region: "global"}, 92 } 93 94 // Fetch the response 95 var resp cstructs.FsListResponse 96 err := msgpackrpc.CallWithCodec(codec, "FileSystem.List", req, &resp) 97 require.NotNil(err) 98 require.Contains(err.Error(), "missing") 99 100 // Fetch the response setting the alloc id 101 req.AllocID = a.ID 102 var resp2 cstructs.FsListResponse 103 err = msgpackrpc.CallWithCodec(codec, "FileSystem.List", req, &resp2) 104 require.Nil(err) 105 require.NotEmpty(resp2.Files) 106 } 107 108 func TestClientFS_List_ACL(t *testing.T) { 109 t.Parallel() 110 require := require.New(t) 111 112 // Start a server 113 s, root := TestACLServer(t, nil) 114 defer s.Shutdown() 115 codec := rpcClient(t, s) 116 testutil.WaitForLeader(t, s.RPC) 117 118 // Create a bad token 119 policyBad := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityDeny}) 120 tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad) 121 122 policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadFS}) 123 tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyGood) 124 125 cases := []struct { 126 Name string 127 Token string 128 ExpectedError string 129 }{ 130 { 131 Name: "bad token", 132 Token: tokenBad.SecretID, 133 ExpectedError: structs.ErrPermissionDenied.Error(), 134 }, 135 { 136 Name: "good token", 137 Token: tokenGood.SecretID, 138 ExpectedError: structs.ErrUnknownAllocationPrefix, 139 }, 140 { 141 Name: "root token", 142 Token: root.SecretID, 143 ExpectedError: structs.ErrUnknownAllocationPrefix, 144 }, 145 } 146 147 for _, c := range cases { 148 t.Run(c.Name, func(t *testing.T) { 149 150 // Make the request 151 req := &cstructs.FsListRequest{ 152 AllocID: uuid.Generate(), 153 Path: "/", 154 QueryOptions: structs.QueryOptions{ 155 Region: "global", 156 Namespace: structs.DefaultNamespace, 157 AuthToken: c.Token, 158 }, 159 } 160 161 // Fetch the response 162 var resp cstructs.FsListResponse 163 err := msgpackrpc.CallWithCodec(codec, "FileSystem.List", req, &resp) 164 require.NotNil(err) 165 require.Contains(err.Error(), c.ExpectedError) 166 }) 167 } 168 } 169 170 func TestClientFS_List_Remote(t *testing.T) { 171 t.Parallel() 172 require := require.New(t) 173 174 // Start a server and client 175 s1 := TestServer(t, nil) 176 defer s1.Shutdown() 177 s2 := TestServer(t, func(c *Config) { 178 c.DevDisableBootstrap = true 179 }) 180 defer s2.Shutdown() 181 TestJoin(t, s1, s2) 182 testutil.WaitForLeader(t, s1.RPC) 183 testutil.WaitForLeader(t, s2.RPC) 184 codec := rpcClient(t, s2) 185 186 c := client.TestClient(t, func(c *config.Config) { 187 c.Servers = []string{s2.config.RPCAddr.String()} 188 }) 189 defer c.Shutdown() 190 191 // Force an allocation onto the node 192 a := mock.Alloc() 193 a.Job.Type = structs.JobTypeBatch 194 a.NodeID = c.NodeID() 195 a.Job.TaskGroups[0].Count = 1 196 a.Job.TaskGroups[0].Tasks[0] = &structs.Task{ 197 Name: "web", 198 Driver: "mock_driver", 199 Config: map[string]interface{}{ 200 "run_for": "2s", 201 }, 202 LogConfig: structs.DefaultLogConfig(), 203 Resources: &structs.Resources{ 204 CPU: 500, 205 MemoryMB: 256, 206 }, 207 } 208 209 // Wait for the client to connect 210 testutil.WaitForResult(func() (bool, error) { 211 nodes := s2.connectedNodes() 212 return len(nodes) == 1, nil 213 }, func(err error) { 214 t.Fatalf("should have a clients") 215 }) 216 217 // Upsert the allocation 218 state1 := s1.State() 219 state2 := s2.State() 220 require.Nil(state1.UpsertJob(999, a.Job)) 221 require.Nil(state1.UpsertAllocs(1003, []*structs.Allocation{a})) 222 require.Nil(state2.UpsertJob(999, a.Job)) 223 require.Nil(state2.UpsertAllocs(1003, []*structs.Allocation{a})) 224 225 // Wait for the client to run the allocation 226 testutil.WaitForResult(func() (bool, error) { 227 alloc, err := state2.AllocByID(nil, a.ID) 228 if err != nil { 229 return false, err 230 } 231 if alloc == nil { 232 return false, fmt.Errorf("unknown alloc") 233 } 234 if alloc.ClientStatus != structs.AllocClientStatusComplete { 235 return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus) 236 } 237 238 return true, nil 239 }, func(err error) { 240 t.Fatalf("Alloc on node %q not finished: %v", c.NodeID(), err) 241 }) 242 243 // Force remove the connection locally in case it exists 244 s1.nodeConnsLock.Lock() 245 delete(s1.nodeConns, c.NodeID()) 246 s1.nodeConnsLock.Unlock() 247 248 // Make the request without having a node-id 249 req := &cstructs.FsListRequest{ 250 AllocID: a.ID, 251 Path: "/", 252 QueryOptions: structs.QueryOptions{Region: "global"}, 253 } 254 255 // Fetch the response 256 var resp cstructs.FsListResponse 257 err := msgpackrpc.CallWithCodec(codec, "FileSystem.List", req, &resp) 258 require.Nil(err) 259 require.NotEmpty(resp.Files) 260 } 261 262 func TestClientFS_Stat_OldNode(t *testing.T) { 263 t.Parallel() 264 require := require.New(t) 265 266 // Start a server 267 s := TestServer(t, nil) 268 defer s.Shutdown() 269 state := s.State() 270 codec := rpcClient(t, s) 271 testutil.WaitForLeader(t, s.RPC) 272 273 // Test for an old version error 274 node := mock.Node() 275 node.Attributes["nomad.version"] = "0.7.1" 276 require.Nil(state.UpsertNode(1005, node)) 277 278 alloc := mock.Alloc() 279 alloc.NodeID = node.ID 280 require.Nil(state.UpsertAllocs(1006, []*structs.Allocation{alloc})) 281 282 req := &cstructs.FsStatRequest{ 283 AllocID: alloc.ID, 284 Path: "/", 285 QueryOptions: structs.QueryOptions{Region: "global"}, 286 } 287 288 var resp cstructs.FsStatResponse 289 err := msgpackrpc.CallWithCodec(codec, "FileSystem.Stat", req, &resp) 290 require.True(structs.IsErrNodeLacksRpc(err), err.Error()) 291 } 292 293 func TestClientFS_Stat_Local(t *testing.T) { 294 t.Parallel() 295 require := require.New(t) 296 297 // Start a server and client 298 s := TestServer(t, nil) 299 defer s.Shutdown() 300 codec := rpcClient(t, s) 301 testutil.WaitForLeader(t, s.RPC) 302 303 c := client.TestClient(t, func(c *config.Config) { 304 c.Servers = []string{s.config.RPCAddr.String()} 305 }) 306 defer c.Shutdown() 307 308 // Force an allocation onto the node 309 a := mock.Alloc() 310 a.Job.Type = structs.JobTypeBatch 311 a.NodeID = c.NodeID() 312 a.Job.TaskGroups[0].Count = 1 313 a.Job.TaskGroups[0].Tasks[0] = &structs.Task{ 314 Name: "web", 315 Driver: "mock_driver", 316 Config: map[string]interface{}{ 317 "run_for": "2s", 318 }, 319 LogConfig: structs.DefaultLogConfig(), 320 Resources: &structs.Resources{ 321 CPU: 500, 322 MemoryMB: 256, 323 }, 324 } 325 326 // Wait for the client to connect 327 testutil.WaitForResult(func() (bool, error) { 328 nodes := s.connectedNodes() 329 return len(nodes) == 1, nil 330 }, func(err error) { 331 t.Fatalf("should have a clients") 332 }) 333 334 // Upsert the allocation 335 state := s.State() 336 require.Nil(state.UpsertJob(999, a.Job)) 337 require.Nil(state.UpsertAllocs(1003, []*structs.Allocation{a})) 338 339 // Wait for the client to run the allocation 340 testutil.WaitForResult(func() (bool, error) { 341 alloc, err := state.AllocByID(nil, a.ID) 342 if err != nil { 343 return false, err 344 } 345 if alloc == nil { 346 return false, fmt.Errorf("unknown alloc") 347 } 348 if alloc.ClientStatus != structs.AllocClientStatusComplete { 349 return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus) 350 } 351 352 return true, nil 353 }, func(err error) { 354 t.Fatalf("Alloc on node %q not finished: %v", c.NodeID(), err) 355 }) 356 357 // Make the request without having a node-id 358 req := &cstructs.FsStatRequest{ 359 Path: "/", 360 QueryOptions: structs.QueryOptions{Region: "global"}, 361 } 362 363 // Fetch the response 364 var resp cstructs.FsStatResponse 365 err := msgpackrpc.CallWithCodec(codec, "FileSystem.Stat", req, &resp) 366 require.NotNil(err) 367 require.Contains(err.Error(), "missing") 368 369 // Fetch the response setting the alloc id 370 req.AllocID = a.ID 371 var resp2 cstructs.FsStatResponse 372 err = msgpackrpc.CallWithCodec(codec, "FileSystem.Stat", req, &resp2) 373 require.Nil(err) 374 require.NotNil(resp2.Info) 375 } 376 377 func TestClientFS_Stat_ACL(t *testing.T) { 378 t.Parallel() 379 require := require.New(t) 380 381 // Start a server 382 s, root := TestACLServer(t, nil) 383 defer s.Shutdown() 384 codec := rpcClient(t, s) 385 testutil.WaitForLeader(t, s.RPC) 386 387 // Create a bad token 388 policyBad := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityDeny}) 389 tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad) 390 391 policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadFS}) 392 tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyGood) 393 394 cases := []struct { 395 Name string 396 Token string 397 ExpectedError string 398 }{ 399 { 400 Name: "bad token", 401 Token: tokenBad.SecretID, 402 ExpectedError: structs.ErrPermissionDenied.Error(), 403 }, 404 { 405 Name: "good token", 406 Token: tokenGood.SecretID, 407 ExpectedError: structs.ErrUnknownAllocationPrefix, 408 }, 409 { 410 Name: "root token", 411 Token: root.SecretID, 412 ExpectedError: structs.ErrUnknownAllocationPrefix, 413 }, 414 } 415 416 for _, c := range cases { 417 t.Run(c.Name, func(t *testing.T) { 418 419 // Make the request 420 req := &cstructs.FsStatRequest{ 421 AllocID: uuid.Generate(), 422 Path: "/", 423 QueryOptions: structs.QueryOptions{ 424 Region: "global", 425 Namespace: structs.DefaultNamespace, 426 AuthToken: c.Token, 427 }, 428 } 429 430 // Fetch the response 431 var resp cstructs.FsStatResponse 432 err := msgpackrpc.CallWithCodec(codec, "FileSystem.Stat", req, &resp) 433 require.NotNil(err) 434 require.Contains(err.Error(), c.ExpectedError) 435 }) 436 } 437 } 438 439 func TestClientFS_Stat_Remote(t *testing.T) { 440 t.Parallel() 441 require := require.New(t) 442 443 // Start a server and client 444 s1 := TestServer(t, nil) 445 defer s1.Shutdown() 446 s2 := TestServer(t, func(c *Config) { 447 c.DevDisableBootstrap = true 448 }) 449 defer s2.Shutdown() 450 TestJoin(t, s1, s2) 451 testutil.WaitForLeader(t, s1.RPC) 452 testutil.WaitForLeader(t, s2.RPC) 453 codec := rpcClient(t, s2) 454 455 c := client.TestClient(t, func(c *config.Config) { 456 c.Servers = []string{s2.config.RPCAddr.String()} 457 }) 458 defer c.Shutdown() 459 460 // Force an allocation onto the node 461 a := mock.Alloc() 462 a.Job.Type = structs.JobTypeBatch 463 a.NodeID = c.NodeID() 464 a.Job.TaskGroups[0].Count = 1 465 a.Job.TaskGroups[0].Tasks[0] = &structs.Task{ 466 Name: "web", 467 Driver: "mock_driver", 468 Config: map[string]interface{}{ 469 "run_for": "2s", 470 }, 471 LogConfig: structs.DefaultLogConfig(), 472 Resources: &structs.Resources{ 473 CPU: 500, 474 MemoryMB: 256, 475 }, 476 } 477 478 // Wait for the client to connect 479 testutil.WaitForResult(func() (bool, error) { 480 nodes := s2.connectedNodes() 481 return len(nodes) == 1, nil 482 }, func(err error) { 483 t.Fatalf("should have a clients") 484 }) 485 486 // Upsert the allocation 487 state1 := s1.State() 488 state2 := s2.State() 489 require.Nil(state1.UpsertJob(999, a.Job)) 490 require.Nil(state1.UpsertAllocs(1003, []*structs.Allocation{a})) 491 require.Nil(state2.UpsertJob(999, a.Job)) 492 require.Nil(state2.UpsertAllocs(1003, []*structs.Allocation{a})) 493 494 // Wait for the client to run the allocation 495 testutil.WaitForResult(func() (bool, error) { 496 alloc, err := state2.AllocByID(nil, a.ID) 497 if err != nil { 498 return false, err 499 } 500 if alloc == nil { 501 return false, fmt.Errorf("unknown alloc") 502 } 503 if alloc.ClientStatus != structs.AllocClientStatusComplete { 504 return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus) 505 } 506 507 return true, nil 508 }, func(err error) { 509 t.Fatalf("Alloc on node %q not finished: %v", c.NodeID(), err) 510 }) 511 512 // Force remove the connection locally in case it exists 513 s1.nodeConnsLock.Lock() 514 delete(s1.nodeConns, c.NodeID()) 515 s1.nodeConnsLock.Unlock() 516 517 // Make the request without having a node-id 518 req := &cstructs.FsStatRequest{ 519 AllocID: a.ID, 520 Path: "/", 521 QueryOptions: structs.QueryOptions{Region: "global"}, 522 } 523 524 // Fetch the response 525 var resp cstructs.FsStatResponse 526 err := msgpackrpc.CallWithCodec(codec, "FileSystem.Stat", req, &resp) 527 require.Nil(err) 528 require.NotNil(resp.Info) 529 } 530 531 func TestClientFS_Streaming_NoAlloc(t *testing.T) { 532 t.Parallel() 533 require := require.New(t) 534 535 // Start a server and client 536 s := TestServer(t, nil) 537 defer s.Shutdown() 538 testutil.WaitForLeader(t, s.RPC) 539 540 // Make the request with bad allocation id 541 req := &cstructs.FsStreamRequest{ 542 AllocID: uuid.Generate(), 543 QueryOptions: structs.QueryOptions{Region: "global"}, 544 } 545 546 // Get the handler 547 handler, err := s.StreamingRpcHandler("FileSystem.Stream") 548 require.Nil(err) 549 550 // Create a pipe 551 p1, p2 := net.Pipe() 552 defer p1.Close() 553 defer p2.Close() 554 555 errCh := make(chan error) 556 streamMsg := make(chan *cstructs.StreamErrWrapper) 557 558 // Start the handler 559 go handler(p2) 560 561 // Start the decoder 562 go func() { 563 decoder := codec.NewDecoder(p1, structs.MsgpackHandle) 564 for { 565 var msg cstructs.StreamErrWrapper 566 if err := decoder.Decode(&msg); err != nil { 567 if err == io.EOF || strings.Contains(err.Error(), "closed") { 568 return 569 } 570 errCh <- fmt.Errorf("error decoding: %v", err) 571 } 572 573 streamMsg <- &msg 574 } 575 }() 576 577 // Send the request 578 encoder := codec.NewEncoder(p1, structs.MsgpackHandle) 579 require.Nil(encoder.Encode(req)) 580 581 timeout := time.After(5 * time.Second) 582 583 OUTER: 584 for { 585 select { 586 case <-timeout: 587 t.Fatal("timeout") 588 case err := <-errCh: 589 t.Fatal(err) 590 case msg := <-streamMsg: 591 if msg.Error == nil { 592 continue 593 } 594 595 if structs.IsErrUnknownAllocation(msg.Error) { 596 break OUTER 597 } 598 } 599 } 600 } 601 602 func TestClientFS_Streaming_ACL(t *testing.T) { 603 t.Parallel() 604 require := require.New(t) 605 606 // Start a server 607 s, root := TestACLServer(t, nil) 608 defer s.Shutdown() 609 testutil.WaitForLeader(t, s.RPC) 610 611 // Create a bad token 612 policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityReadFS}) 613 tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad) 614 615 policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "", 616 []string{acl.NamespaceCapabilityReadLogs, acl.NamespaceCapabilityReadFS}) 617 tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyGood) 618 619 cases := []struct { 620 Name string 621 Token string 622 ExpectedError string 623 }{ 624 { 625 Name: "bad token", 626 Token: tokenBad.SecretID, 627 ExpectedError: structs.ErrPermissionDenied.Error(), 628 }, 629 { 630 Name: "good token", 631 Token: tokenGood.SecretID, 632 ExpectedError: structs.ErrUnknownAllocationPrefix, 633 }, 634 { 635 Name: "root token", 636 Token: root.SecretID, 637 ExpectedError: structs.ErrUnknownAllocationPrefix, 638 }, 639 } 640 641 for _, c := range cases { 642 t.Run(c.Name, func(t *testing.T) { 643 // Make the request with bad allocation id 644 req := &cstructs.FsStreamRequest{ 645 AllocID: uuid.Generate(), 646 QueryOptions: structs.QueryOptions{ 647 Namespace: structs.DefaultNamespace, 648 Region: "global", 649 AuthToken: c.Token, 650 }, 651 } 652 653 // Get the handler 654 handler, err := s.StreamingRpcHandler("FileSystem.Stream") 655 require.Nil(err) 656 657 // Create a pipe 658 p1, p2 := net.Pipe() 659 defer p1.Close() 660 defer p2.Close() 661 662 errCh := make(chan error) 663 streamMsg := make(chan *cstructs.StreamErrWrapper) 664 665 // Start the handler 666 go handler(p2) 667 668 // Start the decoder 669 go func() { 670 decoder := codec.NewDecoder(p1, structs.MsgpackHandle) 671 for { 672 var msg cstructs.StreamErrWrapper 673 if err := decoder.Decode(&msg); err != nil { 674 if err == io.EOF || strings.Contains(err.Error(), "closed") { 675 return 676 } 677 errCh <- fmt.Errorf("error decoding: %v", err) 678 } 679 680 streamMsg <- &msg 681 } 682 }() 683 684 // Send the request 685 encoder := codec.NewEncoder(p1, structs.MsgpackHandle) 686 require.Nil(encoder.Encode(req)) 687 688 timeout := time.After(5 * time.Second) 689 690 OUTER: 691 for { 692 select { 693 case <-timeout: 694 t.Fatal("timeout") 695 case err := <-errCh: 696 t.Fatal(err) 697 case msg := <-streamMsg: 698 if msg.Error == nil { 699 continue 700 } 701 702 if strings.Contains(msg.Error.Error(), c.ExpectedError) { 703 break OUTER 704 } else { 705 t.Fatalf("Bad error: %v", msg.Error) 706 } 707 } 708 } 709 }) 710 } 711 } 712 713 func TestClientFS_Streaming_Local(t *testing.T) { 714 t.Parallel() 715 require := require.New(t) 716 717 // Start a server and client 718 s := TestServer(t, nil) 719 defer s.Shutdown() 720 testutil.WaitForLeader(t, s.RPC) 721 722 c := client.TestClient(t, func(c *config.Config) { 723 c.Servers = []string{s.config.RPCAddr.String()} 724 }) 725 defer c.Shutdown() 726 727 // Force an allocation onto the node 728 expected := "Hello from the other side" 729 a := mock.Alloc() 730 a.Job.Type = structs.JobTypeBatch 731 a.NodeID = c.NodeID() 732 a.Job.TaskGroups[0].Count = 1 733 a.Job.TaskGroups[0].Tasks[0] = &structs.Task{ 734 Name: "web", 735 Driver: "mock_driver", 736 Config: map[string]interface{}{ 737 "run_for": "2s", 738 "stdout_string": expected, 739 }, 740 LogConfig: structs.DefaultLogConfig(), 741 Resources: &structs.Resources{ 742 CPU: 500, 743 MemoryMB: 256, 744 }, 745 } 746 747 // Wait for the client to connect 748 testutil.WaitForResult(func() (bool, error) { 749 nodes := s.connectedNodes() 750 return len(nodes) == 1, nil 751 }, func(err error) { 752 t.Fatalf("should have a clients") 753 }) 754 755 // Upsert the allocation 756 state := s.State() 757 require.Nil(state.UpsertJob(999, a.Job)) 758 require.Nil(state.UpsertAllocs(1003, []*structs.Allocation{a})) 759 760 // Wait for the client to run the allocation 761 testutil.WaitForResult(func() (bool, error) { 762 alloc, err := state.AllocByID(nil, a.ID) 763 if err != nil { 764 return false, err 765 } 766 if alloc == nil { 767 return false, fmt.Errorf("unknown alloc") 768 } 769 if alloc.ClientStatus != structs.AllocClientStatusComplete { 770 return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus) 771 } 772 773 return true, nil 774 }, func(err error) { 775 t.Fatalf("Alloc on node %q not finished: %v", c.NodeID(), err) 776 }) 777 778 // Make the request 779 req := &cstructs.FsStreamRequest{ 780 AllocID: a.ID, 781 Path: "alloc/logs/web.stdout.0", 782 Origin: "start", 783 PlainText: true, 784 QueryOptions: structs.QueryOptions{Region: "global"}, 785 } 786 787 // Get the handler 788 handler, err := s.StreamingRpcHandler("FileSystem.Stream") 789 require.Nil(err) 790 791 // Create a pipe 792 p1, p2 := net.Pipe() 793 defer p1.Close() 794 defer p2.Close() 795 796 errCh := make(chan error) 797 streamMsg := make(chan *cstructs.StreamErrWrapper) 798 799 // Start the handler 800 go handler(p2) 801 802 // Start the decoder 803 go func() { 804 decoder := codec.NewDecoder(p1, structs.MsgpackHandle) 805 for { 806 var msg cstructs.StreamErrWrapper 807 if err := decoder.Decode(&msg); err != nil { 808 if err == io.EOF || strings.Contains(err.Error(), "closed") { 809 return 810 } 811 errCh <- fmt.Errorf("error decoding: %v", err) 812 } 813 814 streamMsg <- &msg 815 } 816 }() 817 818 // Send the request 819 encoder := codec.NewEncoder(p1, structs.MsgpackHandle) 820 require.Nil(encoder.Encode(req)) 821 822 timeout := time.After(3 * time.Second) 823 received := "" 824 OUTER: 825 for { 826 select { 827 case <-timeout: 828 t.Fatal("timeout") 829 case err := <-errCh: 830 t.Fatal(err) 831 case msg := <-streamMsg: 832 if msg.Error != nil { 833 t.Fatalf("Got error: %v", msg.Error.Error()) 834 } 835 836 // Add the payload 837 received += string(msg.Payload) 838 if received == expected { 839 break OUTER 840 } 841 } 842 } 843 } 844 845 func TestClientFS_Streaming_Local_Follow(t *testing.T) { 846 t.Parallel() 847 require := require.New(t) 848 849 // Start a server and client 850 s := TestServer(t, nil) 851 defer s.Shutdown() 852 testutil.WaitForLeader(t, s.RPC) 853 854 c := client.TestClient(t, func(c *config.Config) { 855 c.Servers = []string{s.config.RPCAddr.String()} 856 }) 857 defer c.Shutdown() 858 859 // Force an allocation onto the node 860 expectedBase := "Hello from the other side" 861 repeat := 10 862 863 a := mock.Alloc() 864 a.Job.Type = structs.JobTypeBatch 865 a.NodeID = c.NodeID() 866 a.Job.TaskGroups[0].Count = 1 867 a.Job.TaskGroups[0].Tasks[0] = &structs.Task{ 868 Name: "web", 869 Driver: "mock_driver", 870 Config: map[string]interface{}{ 871 "run_for": "20s", 872 "stdout_string": expectedBase, 873 "stdout_repeat": repeat, 874 "stdout_repeat_duration": 200 * time.Millisecond, 875 }, 876 LogConfig: structs.DefaultLogConfig(), 877 Resources: &structs.Resources{ 878 CPU: 500, 879 MemoryMB: 256, 880 }, 881 } 882 883 // Wait for the client to connect 884 testutil.WaitForResult(func() (bool, error) { 885 nodes := s.connectedNodes() 886 return len(nodes) == 1, nil 887 }, func(err error) { 888 t.Fatalf("should have a clients") 889 }) 890 891 // Upsert the allocation 892 state := s.State() 893 require.Nil(state.UpsertJob(999, a.Job)) 894 require.Nil(state.UpsertAllocs(1003, []*structs.Allocation{a})) 895 896 // Wait for the client to run the allocation 897 testutil.WaitForResult(func() (bool, error) { 898 alloc, err := state.AllocByID(nil, a.ID) 899 if err != nil { 900 return false, err 901 } 902 if alloc == nil { 903 return false, fmt.Errorf("unknown alloc") 904 } 905 if alloc.ClientStatus != structs.AllocClientStatusRunning { 906 return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus) 907 } 908 909 return true, nil 910 }, func(err error) { 911 t.Fatalf("Alloc on node %q not running: %v", c.NodeID(), err) 912 }) 913 914 // Make the request 915 req := &cstructs.FsStreamRequest{ 916 AllocID: a.ID, 917 Path: "alloc/logs/web.stdout.0", 918 Origin: "start", 919 PlainText: true, 920 Follow: true, 921 QueryOptions: structs.QueryOptions{Region: "global"}, 922 } 923 924 // Get the handler 925 handler, err := s.StreamingRpcHandler("FileSystem.Stream") 926 require.Nil(err) 927 928 // Create a pipe 929 p1, p2 := net.Pipe() 930 defer p1.Close() 931 defer p2.Close() 932 933 errCh := make(chan error) 934 streamMsg := make(chan *cstructs.StreamErrWrapper) 935 936 // Start the handler 937 go handler(p2) 938 939 // Start the decoder 940 go func() { 941 decoder := codec.NewDecoder(p1, structs.MsgpackHandle) 942 for { 943 var msg cstructs.StreamErrWrapper 944 if err := decoder.Decode(&msg); err != nil { 945 if err == io.EOF || strings.Contains(err.Error(), "closed") { 946 return 947 } 948 errCh <- fmt.Errorf("error decoding: %v", err) 949 } 950 951 streamMsg <- &msg 952 } 953 }() 954 955 // Send the request 956 encoder := codec.NewEncoder(p1, structs.MsgpackHandle) 957 require.Nil(encoder.Encode(req)) 958 959 timeout := time.After(20 * time.Second) 960 expected := strings.Repeat(expectedBase, repeat+1) 961 received := "" 962 OUTER: 963 for { 964 select { 965 case <-timeout: 966 t.Fatal("timeout") 967 case err := <-errCh: 968 t.Fatal(err) 969 case msg := <-streamMsg: 970 if msg.Error != nil { 971 t.Fatalf("Got error: %v", msg.Error.Error()) 972 } 973 974 // Add the payload 975 received += string(msg.Payload) 976 if received == expected { 977 break OUTER 978 } 979 } 980 } 981 } 982 983 func TestClientFS_Streaming_Remote_Server(t *testing.T) { 984 t.Parallel() 985 require := require.New(t) 986 987 // Start a server and client 988 s1 := TestServer(t, nil) 989 defer s1.Shutdown() 990 s2 := TestServer(t, func(c *Config) { 991 c.DevDisableBootstrap = true 992 }) 993 defer s2.Shutdown() 994 TestJoin(t, s1, s2) 995 testutil.WaitForLeader(t, s1.RPC) 996 testutil.WaitForLeader(t, s2.RPC) 997 998 c := client.TestClient(t, func(c *config.Config) { 999 c.Servers = []string{s2.config.RPCAddr.String()} 1000 }) 1001 defer c.Shutdown() 1002 1003 // Force an allocation onto the node 1004 expected := "Hello from the other side" 1005 a := mock.Alloc() 1006 a.Job.Type = structs.JobTypeBatch 1007 a.NodeID = c.NodeID() 1008 a.Job.TaskGroups[0].Count = 1 1009 a.Job.TaskGroups[0].Tasks[0] = &structs.Task{ 1010 Name: "web", 1011 Driver: "mock_driver", 1012 Config: map[string]interface{}{ 1013 "run_for": "2s", 1014 "stdout_string": expected, 1015 }, 1016 LogConfig: structs.DefaultLogConfig(), 1017 Resources: &structs.Resources{ 1018 CPU: 500, 1019 MemoryMB: 256, 1020 }, 1021 } 1022 1023 // Wait for the client to connect 1024 testutil.WaitForResult(func() (bool, error) { 1025 nodes := s2.connectedNodes() 1026 return len(nodes) == 1, nil 1027 }, func(err error) { 1028 t.Fatalf("should have a clients") 1029 }) 1030 1031 // Upsert the allocation 1032 state1 := s1.State() 1033 state2 := s2.State() 1034 require.Nil(state1.UpsertJob(999, a.Job)) 1035 require.Nil(state1.UpsertAllocs(1003, []*structs.Allocation{a})) 1036 require.Nil(state2.UpsertJob(999, a.Job)) 1037 require.Nil(state2.UpsertAllocs(1003, []*structs.Allocation{a})) 1038 1039 // Wait for the client to run the allocation 1040 testutil.WaitForResult(func() (bool, error) { 1041 alloc, err := state2.AllocByID(nil, a.ID) 1042 if err != nil { 1043 return false, err 1044 } 1045 if alloc == nil { 1046 return false, fmt.Errorf("unknown alloc") 1047 } 1048 if alloc.ClientStatus != structs.AllocClientStatusComplete { 1049 return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus) 1050 } 1051 1052 return true, nil 1053 }, func(err error) { 1054 t.Fatalf("Alloc on node %q not finished: %v", c.NodeID(), err) 1055 }) 1056 1057 // Force remove the connection locally in case it exists 1058 s1.nodeConnsLock.Lock() 1059 delete(s1.nodeConns, c.NodeID()) 1060 s1.nodeConnsLock.Unlock() 1061 1062 // Make the request 1063 req := &cstructs.FsStreamRequest{ 1064 AllocID: a.ID, 1065 Path: "alloc/logs/web.stdout.0", 1066 Origin: "start", 1067 PlainText: true, 1068 QueryOptions: structs.QueryOptions{Region: "global"}, 1069 } 1070 1071 // Get the handler 1072 handler, err := s1.StreamingRpcHandler("FileSystem.Stream") 1073 require.Nil(err) 1074 1075 // Create a pipe 1076 p1, p2 := net.Pipe() 1077 defer p1.Close() 1078 defer p2.Close() 1079 1080 errCh := make(chan error) 1081 streamMsg := make(chan *cstructs.StreamErrWrapper) 1082 1083 // Start the handler 1084 go handler(p2) 1085 1086 // Start the decoder 1087 go func() { 1088 decoder := codec.NewDecoder(p1, structs.MsgpackHandle) 1089 for { 1090 var msg cstructs.StreamErrWrapper 1091 if err := decoder.Decode(&msg); err != nil { 1092 if err == io.EOF || strings.Contains(err.Error(), "closed") { 1093 return 1094 } 1095 errCh <- fmt.Errorf("error decoding: %v", err) 1096 } 1097 1098 streamMsg <- &msg 1099 } 1100 }() 1101 1102 // Send the request 1103 encoder := codec.NewEncoder(p1, structs.MsgpackHandle) 1104 require.Nil(encoder.Encode(req)) 1105 1106 timeout := time.After(3 * time.Second) 1107 received := "" 1108 OUTER: 1109 for { 1110 select { 1111 case <-timeout: 1112 t.Fatal("timeout") 1113 case err := <-errCh: 1114 t.Fatal(err) 1115 case msg := <-streamMsg: 1116 if msg.Error != nil { 1117 t.Fatalf("Got error: %v", msg.Error.Error()) 1118 } 1119 1120 // Add the payload 1121 received += string(msg.Payload) 1122 if received == expected { 1123 break OUTER 1124 } 1125 } 1126 } 1127 } 1128 1129 func TestClientFS_Streaming_Remote_Region(t *testing.T) { 1130 t.Parallel() 1131 require := require.New(t) 1132 1133 // Start a server and client 1134 s1 := TestServer(t, nil) 1135 defer s1.Shutdown() 1136 s2 := TestServer(t, func(c *Config) { 1137 c.Region = "two" 1138 }) 1139 defer s2.Shutdown() 1140 TestJoin(t, s1, s2) 1141 testutil.WaitForLeader(t, s1.RPC) 1142 testutil.WaitForLeader(t, s2.RPC) 1143 1144 c := client.TestClient(t, func(c *config.Config) { 1145 c.Servers = []string{s2.config.RPCAddr.String()} 1146 c.Region = "two" 1147 }) 1148 defer c.Shutdown() 1149 1150 // Force an allocation onto the node 1151 expected := "Hello from the other side" 1152 a := mock.Alloc() 1153 a.Job.Type = structs.JobTypeBatch 1154 a.NodeID = c.NodeID() 1155 a.Job.TaskGroups[0].Count = 1 1156 a.Job.TaskGroups[0].Tasks[0] = &structs.Task{ 1157 Name: "web", 1158 Driver: "mock_driver", 1159 Config: map[string]interface{}{ 1160 "run_for": "2s", 1161 "stdout_string": expected, 1162 }, 1163 LogConfig: structs.DefaultLogConfig(), 1164 Resources: &structs.Resources{ 1165 CPU: 500, 1166 MemoryMB: 256, 1167 }, 1168 } 1169 1170 // Wait for the client to connect 1171 testutil.WaitForResult(func() (bool, error) { 1172 nodes := s2.connectedNodes() 1173 return len(nodes) == 1, nil 1174 }, func(err error) { 1175 t.Fatalf("should have a client") 1176 }) 1177 1178 // Upsert the allocation 1179 state2 := s2.State() 1180 require.Nil(state2.UpsertJob(999, a.Job)) 1181 require.Nil(state2.UpsertAllocs(1003, []*structs.Allocation{a})) 1182 1183 // Wait for the client to run the allocation 1184 testutil.WaitForResult(func() (bool, error) { 1185 alloc, err := state2.AllocByID(nil, a.ID) 1186 if err != nil { 1187 return false, err 1188 } 1189 if alloc == nil { 1190 return false, fmt.Errorf("unknown alloc") 1191 } 1192 if alloc.ClientStatus != structs.AllocClientStatusComplete { 1193 return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus) 1194 } 1195 1196 return true, nil 1197 }, func(err error) { 1198 t.Fatalf("Alloc on node %q not finished: %v", c.NodeID(), err) 1199 }) 1200 1201 // Force remove the connection locally in case it exists 1202 s1.nodeConnsLock.Lock() 1203 delete(s1.nodeConns, c.NodeID()) 1204 s1.nodeConnsLock.Unlock() 1205 1206 // Make the request 1207 req := &cstructs.FsStreamRequest{ 1208 AllocID: a.ID, 1209 Path: "alloc/logs/web.stdout.0", 1210 Origin: "start", 1211 PlainText: true, 1212 QueryOptions: structs.QueryOptions{Region: "two"}, 1213 } 1214 1215 // Get the handler 1216 handler, err := s1.StreamingRpcHandler("FileSystem.Stream") 1217 require.Nil(err) 1218 1219 // Create a pipe 1220 p1, p2 := net.Pipe() 1221 defer p1.Close() 1222 defer p2.Close() 1223 1224 errCh := make(chan error) 1225 streamMsg := make(chan *cstructs.StreamErrWrapper) 1226 1227 // Start the handler 1228 go handler(p2) 1229 1230 // Start the decoder 1231 go func() { 1232 decoder := codec.NewDecoder(p1, structs.MsgpackHandle) 1233 for { 1234 var msg cstructs.StreamErrWrapper 1235 if err := decoder.Decode(&msg); err != nil { 1236 if err == io.EOF || strings.Contains(err.Error(), "closed") { 1237 return 1238 } 1239 errCh <- fmt.Errorf("error decoding: %v", err) 1240 } 1241 1242 streamMsg <- &msg 1243 } 1244 }() 1245 1246 // Send the request 1247 encoder := codec.NewEncoder(p1, structs.MsgpackHandle) 1248 require.Nil(encoder.Encode(req)) 1249 1250 timeout := time.After(3 * time.Second) 1251 received := "" 1252 OUTER: 1253 for { 1254 select { 1255 case <-timeout: 1256 t.Fatal("timeout") 1257 case err := <-errCh: 1258 t.Fatal(err) 1259 case msg := <-streamMsg: 1260 if msg.Error != nil { 1261 t.Fatalf("Got error: %v", msg.Error.Error()) 1262 } 1263 1264 // Add the payload 1265 received += string(msg.Payload) 1266 if received == expected { 1267 break OUTER 1268 } 1269 } 1270 } 1271 } 1272 1273 func TestClientFS_Logs_NoAlloc(t *testing.T) { 1274 t.Parallel() 1275 require := require.New(t) 1276 1277 // Start a server and client 1278 s := TestServer(t, nil) 1279 defer s.Shutdown() 1280 testutil.WaitForLeader(t, s.RPC) 1281 1282 // Make the request with bad allocation id 1283 req := &cstructs.FsLogsRequest{ 1284 AllocID: uuid.Generate(), 1285 QueryOptions: structs.QueryOptions{Region: "global"}, 1286 } 1287 1288 // Get the handler 1289 handler, err := s.StreamingRpcHandler("FileSystem.Logs") 1290 require.Nil(err) 1291 1292 // Create a pipe 1293 p1, p2 := net.Pipe() 1294 defer p1.Close() 1295 defer p2.Close() 1296 1297 errCh := make(chan error) 1298 streamMsg := make(chan *cstructs.StreamErrWrapper) 1299 1300 // Start the handler 1301 go handler(p2) 1302 1303 // Start the decoder 1304 go func() { 1305 decoder := codec.NewDecoder(p1, structs.MsgpackHandle) 1306 for { 1307 var msg cstructs.StreamErrWrapper 1308 if err := decoder.Decode(&msg); err != nil { 1309 if err == io.EOF || strings.Contains(err.Error(), "closed") { 1310 return 1311 } 1312 errCh <- fmt.Errorf("error decoding: %v", err) 1313 } 1314 1315 streamMsg <- &msg 1316 } 1317 }() 1318 1319 // Send the request 1320 encoder := codec.NewEncoder(p1, structs.MsgpackHandle) 1321 require.Nil(encoder.Encode(req)) 1322 1323 timeout := time.After(5 * time.Second) 1324 1325 OUTER: 1326 for { 1327 select { 1328 case <-timeout: 1329 t.Fatal("timeout") 1330 case err := <-errCh: 1331 t.Fatal(err) 1332 case msg := <-streamMsg: 1333 if msg.Error == nil { 1334 continue 1335 } 1336 1337 if structs.IsErrUnknownAllocation(msg.Error) { 1338 break OUTER 1339 } 1340 } 1341 } 1342 } 1343 1344 func TestClientFS_Logs_OldNode(t *testing.T) { 1345 t.Parallel() 1346 require := require.New(t) 1347 1348 // Start a server 1349 s := TestServer(t, nil) 1350 defer s.Shutdown() 1351 state := s.State() 1352 testutil.WaitForLeader(t, s.RPC) 1353 1354 // Test for an old version error 1355 node := mock.Node() 1356 node.Attributes["nomad.version"] = "0.7.1" 1357 require.Nil(state.UpsertNode(1005, node)) 1358 1359 alloc := mock.Alloc() 1360 alloc.NodeID = node.ID 1361 require.Nil(state.UpsertAllocs(1006, []*structs.Allocation{alloc})) 1362 1363 req := &cstructs.FsLogsRequest{ 1364 AllocID: alloc.ID, 1365 QueryOptions: structs.QueryOptions{Region: "global"}, 1366 } 1367 1368 // Get the handler 1369 handler, err := s.StreamingRpcHandler("FileSystem.Logs") 1370 require.Nil(err) 1371 1372 // Create a pipe 1373 p1, p2 := net.Pipe() 1374 defer p1.Close() 1375 defer p2.Close() 1376 1377 errCh := make(chan error) 1378 streamMsg := make(chan *cstructs.StreamErrWrapper) 1379 1380 // Start the handler 1381 go handler(p2) 1382 1383 // Start the decoder 1384 go func() { 1385 decoder := codec.NewDecoder(p1, structs.MsgpackHandle) 1386 for { 1387 var msg cstructs.StreamErrWrapper 1388 if err := decoder.Decode(&msg); err != nil { 1389 if err == io.EOF || strings.Contains(err.Error(), "closed") { 1390 return 1391 } 1392 errCh <- fmt.Errorf("error decoding: %v", err) 1393 } 1394 1395 streamMsg <- &msg 1396 } 1397 }() 1398 1399 // Send the request 1400 encoder := codec.NewEncoder(p1, structs.MsgpackHandle) 1401 require.Nil(encoder.Encode(req)) 1402 1403 timeout := time.After(5 * time.Second) 1404 1405 OUTER: 1406 for { 1407 select { 1408 case <-timeout: 1409 t.Fatal("timeout") 1410 case err := <-errCh: 1411 t.Fatal(err) 1412 case msg := <-streamMsg: 1413 if msg.Error == nil { 1414 continue 1415 } 1416 1417 if structs.IsErrNodeLacksRpc(msg.Error) { 1418 break OUTER 1419 } 1420 } 1421 } 1422 } 1423 1424 func TestClientFS_Logs_ACL(t *testing.T) { 1425 t.Parallel() 1426 require := require.New(t) 1427 1428 // Start a server 1429 s, root := TestACLServer(t, nil) 1430 defer s.Shutdown() 1431 testutil.WaitForLeader(t, s.RPC) 1432 1433 // Create a bad token 1434 policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityReadFS}) 1435 tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad) 1436 1437 policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "", 1438 []string{acl.NamespaceCapabilityReadLogs, acl.NamespaceCapabilityReadFS}) 1439 tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyGood) 1440 1441 cases := []struct { 1442 Name string 1443 Token string 1444 ExpectedError string 1445 }{ 1446 { 1447 Name: "bad token", 1448 Token: tokenBad.SecretID, 1449 ExpectedError: structs.ErrPermissionDenied.Error(), 1450 }, 1451 { 1452 Name: "good token", 1453 Token: tokenGood.SecretID, 1454 ExpectedError: structs.ErrUnknownAllocationPrefix, 1455 }, 1456 { 1457 Name: "root token", 1458 Token: root.SecretID, 1459 ExpectedError: structs.ErrUnknownAllocationPrefix, 1460 }, 1461 } 1462 1463 for _, c := range cases { 1464 t.Run(c.Name, func(t *testing.T) { 1465 // Make the request with bad allocation id 1466 req := &cstructs.FsLogsRequest{ 1467 AllocID: uuid.Generate(), 1468 QueryOptions: structs.QueryOptions{ 1469 Namespace: structs.DefaultNamespace, 1470 Region: "global", 1471 AuthToken: c.Token, 1472 }, 1473 } 1474 1475 // Get the handler 1476 handler, err := s.StreamingRpcHandler("FileSystem.Logs") 1477 require.Nil(err) 1478 1479 // Create a pipe 1480 p1, p2 := net.Pipe() 1481 defer p1.Close() 1482 defer p2.Close() 1483 1484 errCh := make(chan error) 1485 streamMsg := make(chan *cstructs.StreamErrWrapper) 1486 1487 // Start the handler 1488 go handler(p2) 1489 1490 // Start the decoder 1491 go func() { 1492 decoder := codec.NewDecoder(p1, structs.MsgpackHandle) 1493 for { 1494 var msg cstructs.StreamErrWrapper 1495 if err := decoder.Decode(&msg); err != nil { 1496 if err == io.EOF || strings.Contains(err.Error(), "closed") { 1497 return 1498 } 1499 errCh <- fmt.Errorf("error decoding: %v", err) 1500 } 1501 1502 streamMsg <- &msg 1503 } 1504 }() 1505 1506 // Send the request 1507 encoder := codec.NewEncoder(p1, structs.MsgpackHandle) 1508 require.Nil(encoder.Encode(req)) 1509 1510 timeout := time.After(5 * time.Second) 1511 1512 OUTER: 1513 for { 1514 select { 1515 case <-timeout: 1516 t.Fatal("timeout") 1517 case err := <-errCh: 1518 t.Fatal(err) 1519 case msg := <-streamMsg: 1520 if msg.Error == nil { 1521 continue 1522 } 1523 1524 if strings.Contains(msg.Error.Error(), c.ExpectedError) { 1525 break OUTER 1526 } else { 1527 t.Fatalf("Bad error: %v", msg.Error) 1528 } 1529 } 1530 } 1531 }) 1532 } 1533 } 1534 1535 func TestClientFS_Logs_Local(t *testing.T) { 1536 t.Parallel() 1537 require := require.New(t) 1538 1539 // Start a server and client 1540 s := TestServer(t, nil) 1541 defer s.Shutdown() 1542 testutil.WaitForLeader(t, s.RPC) 1543 1544 c := client.TestClient(t, func(c *config.Config) { 1545 c.Servers = []string{s.config.RPCAddr.String()} 1546 }) 1547 defer c.Shutdown() 1548 1549 // Force an allocation onto the node 1550 expected := "Hello from the other side" 1551 a := mock.Alloc() 1552 a.Job.Type = structs.JobTypeBatch 1553 a.NodeID = c.NodeID() 1554 a.Job.TaskGroups[0].Count = 1 1555 a.Job.TaskGroups[0].Tasks[0] = &structs.Task{ 1556 Name: "web", 1557 Driver: "mock_driver", 1558 Config: map[string]interface{}{ 1559 "run_for": "2s", 1560 "stdout_string": expected, 1561 }, 1562 LogConfig: structs.DefaultLogConfig(), 1563 Resources: &structs.Resources{ 1564 CPU: 500, 1565 MemoryMB: 256, 1566 }, 1567 } 1568 1569 // Wait for the client to connect 1570 testutil.WaitForResult(func() (bool, error) { 1571 nodes := s.connectedNodes() 1572 return len(nodes) == 1, nil 1573 }, func(err error) { 1574 t.Fatalf("should have a clients") 1575 }) 1576 1577 // Upsert the allocation 1578 state := s.State() 1579 require.Nil(state.UpsertJob(999, a.Job)) 1580 require.Nil(state.UpsertAllocs(1003, []*structs.Allocation{a})) 1581 1582 // Wait for the client to run the allocation 1583 testutil.WaitForResult(func() (bool, error) { 1584 alloc, err := state.AllocByID(nil, a.ID) 1585 if err != nil { 1586 return false, err 1587 } 1588 if alloc == nil { 1589 return false, fmt.Errorf("unknown alloc") 1590 } 1591 if alloc.ClientStatus != structs.AllocClientStatusComplete { 1592 return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus) 1593 } 1594 1595 return true, nil 1596 }, func(err error) { 1597 t.Fatalf("Alloc on node %q not finished: %v", c.NodeID(), err) 1598 }) 1599 1600 // Make the request 1601 req := &cstructs.FsLogsRequest{ 1602 AllocID: a.ID, 1603 Task: a.Job.TaskGroups[0].Tasks[0].Name, 1604 LogType: "stdout", 1605 Origin: "start", 1606 PlainText: true, 1607 QueryOptions: structs.QueryOptions{Region: "global"}, 1608 } 1609 1610 // Get the handler 1611 handler, err := s.StreamingRpcHandler("FileSystem.Logs") 1612 require.Nil(err) 1613 1614 // Create a pipe 1615 p1, p2 := net.Pipe() 1616 defer p1.Close() 1617 defer p2.Close() 1618 1619 errCh := make(chan error) 1620 streamMsg := make(chan *cstructs.StreamErrWrapper) 1621 1622 // Start the handler 1623 go handler(p2) 1624 1625 // Start the decoder 1626 go func() { 1627 decoder := codec.NewDecoder(p1, structs.MsgpackHandle) 1628 for { 1629 var msg cstructs.StreamErrWrapper 1630 if err := decoder.Decode(&msg); err != nil { 1631 if err == io.EOF || strings.Contains(err.Error(), "closed") { 1632 return 1633 } 1634 errCh <- fmt.Errorf("error decoding: %v", err) 1635 } 1636 1637 streamMsg <- &msg 1638 } 1639 }() 1640 1641 // Send the request 1642 encoder := codec.NewEncoder(p1, structs.MsgpackHandle) 1643 require.Nil(encoder.Encode(req)) 1644 1645 timeout := time.After(3 * time.Second) 1646 received := "" 1647 OUTER: 1648 for { 1649 select { 1650 case <-timeout: 1651 t.Fatal("timeout") 1652 case err := <-errCh: 1653 t.Fatal(err) 1654 case msg := <-streamMsg: 1655 if msg.Error != nil { 1656 t.Fatalf("Got error: %v", msg.Error.Error()) 1657 } 1658 1659 // Add the payload 1660 received += string(msg.Payload) 1661 if received == expected { 1662 break OUTER 1663 } 1664 } 1665 } 1666 } 1667 1668 func TestClientFS_Logs_Local_Follow(t *testing.T) { 1669 t.Parallel() 1670 require := require.New(t) 1671 1672 // Start a server and client 1673 s := TestServer(t, nil) 1674 defer s.Shutdown() 1675 testutil.WaitForLeader(t, s.RPC) 1676 1677 c := client.TestClient(t, func(c *config.Config) { 1678 c.Servers = []string{s.config.RPCAddr.String()} 1679 }) 1680 defer c.Shutdown() 1681 1682 // Force an allocation onto the node 1683 expectedBase := "Hello from the other side" 1684 repeat := 10 1685 1686 a := mock.Alloc() 1687 a.Job.Type = structs.JobTypeBatch 1688 a.NodeID = c.NodeID() 1689 a.Job.TaskGroups[0].Count = 1 1690 a.Job.TaskGroups[0].Tasks[0] = &structs.Task{ 1691 Name: "web", 1692 Driver: "mock_driver", 1693 Config: map[string]interface{}{ 1694 "run_for": "20s", 1695 "stdout_string": expectedBase, 1696 "stdout_repeat": repeat, 1697 "stdout_repeat_duration": 200 * time.Millisecond, 1698 }, 1699 LogConfig: structs.DefaultLogConfig(), 1700 Resources: &structs.Resources{ 1701 CPU: 500, 1702 MemoryMB: 256, 1703 }, 1704 } 1705 1706 // Wait for the client to connect 1707 testutil.WaitForResult(func() (bool, error) { 1708 nodes := s.connectedNodes() 1709 return len(nodes) == 1, nil 1710 }, func(err error) { 1711 t.Fatalf("should have a clients") 1712 }) 1713 1714 // Upsert the allocation 1715 state := s.State() 1716 require.Nil(state.UpsertJob(999, a.Job)) 1717 require.Nil(state.UpsertAllocs(1003, []*structs.Allocation{a})) 1718 1719 // Wait for the client to run the allocation 1720 testutil.WaitForResult(func() (bool, error) { 1721 alloc, err := state.AllocByID(nil, a.ID) 1722 if err != nil { 1723 return false, err 1724 } 1725 if alloc == nil { 1726 return false, fmt.Errorf("unknown alloc") 1727 } 1728 if alloc.ClientStatus != structs.AllocClientStatusRunning { 1729 return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus) 1730 } 1731 1732 return true, nil 1733 }, func(err error) { 1734 t.Fatalf("Alloc on node %q not running: %v", c.NodeID(), err) 1735 }) 1736 1737 // Make the request 1738 req := &cstructs.FsLogsRequest{ 1739 AllocID: a.ID, 1740 Task: a.Job.TaskGroups[0].Tasks[0].Name, 1741 LogType: "stdout", 1742 Origin: "start", 1743 PlainText: true, 1744 Follow: true, 1745 QueryOptions: structs.QueryOptions{Region: "global"}, 1746 } 1747 1748 // Get the handler 1749 handler, err := s.StreamingRpcHandler("FileSystem.Logs") 1750 require.Nil(err) 1751 1752 // Create a pipe 1753 p1, p2 := net.Pipe() 1754 defer p1.Close() 1755 defer p2.Close() 1756 1757 errCh := make(chan error) 1758 streamMsg := make(chan *cstructs.StreamErrWrapper) 1759 1760 // Start the handler 1761 go handler(p2) 1762 1763 // Start the decoder 1764 go func() { 1765 decoder := codec.NewDecoder(p1, structs.MsgpackHandle) 1766 for { 1767 var msg cstructs.StreamErrWrapper 1768 if err := decoder.Decode(&msg); err != nil { 1769 if err == io.EOF || strings.Contains(err.Error(), "closed") { 1770 return 1771 } 1772 errCh <- fmt.Errorf("error decoding: %v", err) 1773 } 1774 1775 streamMsg <- &msg 1776 } 1777 }() 1778 1779 // Send the request 1780 encoder := codec.NewEncoder(p1, structs.MsgpackHandle) 1781 require.Nil(encoder.Encode(req)) 1782 1783 timeout := time.After(20 * time.Second) 1784 expected := strings.Repeat(expectedBase, repeat+1) 1785 received := "" 1786 OUTER: 1787 for { 1788 select { 1789 case <-timeout: 1790 t.Fatal("timeout") 1791 case err := <-errCh: 1792 t.Fatal(err) 1793 case msg := <-streamMsg: 1794 if msg.Error != nil { 1795 t.Fatalf("Got error: %v", msg.Error.Error()) 1796 } 1797 1798 // Add the payload 1799 received += string(msg.Payload) 1800 if received == expected { 1801 break OUTER 1802 } 1803 } 1804 } 1805 } 1806 1807 func TestClientFS_Logs_Remote_Server(t *testing.T) { 1808 t.Parallel() 1809 require := require.New(t) 1810 1811 // Start a server and client 1812 s1 := TestServer(t, nil) 1813 defer s1.Shutdown() 1814 s2 := TestServer(t, func(c *Config) { 1815 c.DevDisableBootstrap = true 1816 }) 1817 defer s2.Shutdown() 1818 TestJoin(t, s1, s2) 1819 testutil.WaitForLeader(t, s1.RPC) 1820 testutil.WaitForLeader(t, s2.RPC) 1821 1822 c := client.TestClient(t, func(c *config.Config) { 1823 c.Servers = []string{s2.config.RPCAddr.String()} 1824 }) 1825 defer c.Shutdown() 1826 1827 // Force an allocation onto the node 1828 expected := "Hello from the other side" 1829 a := mock.Alloc() 1830 a.Job.Type = structs.JobTypeBatch 1831 a.NodeID = c.NodeID() 1832 a.Job.TaskGroups[0].Count = 1 1833 a.Job.TaskGroups[0].Tasks[0] = &structs.Task{ 1834 Name: "web", 1835 Driver: "mock_driver", 1836 Config: map[string]interface{}{ 1837 "run_for": "2s", 1838 "stdout_string": expected, 1839 }, 1840 LogConfig: structs.DefaultLogConfig(), 1841 Resources: &structs.Resources{ 1842 CPU: 500, 1843 MemoryMB: 256, 1844 }, 1845 } 1846 1847 // Wait for the client to connect 1848 testutil.WaitForResult(func() (bool, error) { 1849 nodes := s2.connectedNodes() 1850 return len(nodes) == 1, nil 1851 }, func(err error) { 1852 t.Fatalf("should have a clients") 1853 }) 1854 1855 // Upsert the allocation 1856 state1 := s1.State() 1857 state2 := s2.State() 1858 require.Nil(state1.UpsertJob(999, a.Job)) 1859 require.Nil(state1.UpsertAllocs(1003, []*structs.Allocation{a})) 1860 require.Nil(state2.UpsertJob(999, a.Job)) 1861 require.Nil(state2.UpsertAllocs(1003, []*structs.Allocation{a})) 1862 1863 // Wait for the client to run the allocation 1864 testutil.WaitForResult(func() (bool, error) { 1865 alloc, err := state2.AllocByID(nil, a.ID) 1866 if err != nil { 1867 return false, err 1868 } 1869 if alloc == nil { 1870 return false, fmt.Errorf("unknown alloc") 1871 } 1872 if alloc.ClientStatus != structs.AllocClientStatusComplete { 1873 return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus) 1874 } 1875 1876 return true, nil 1877 }, func(err error) { 1878 t.Fatalf("Alloc on node %q not finished: %v", c.NodeID(), err) 1879 }) 1880 1881 // Force remove the connection locally in case it exists 1882 s1.nodeConnsLock.Lock() 1883 delete(s1.nodeConns, c.NodeID()) 1884 s1.nodeConnsLock.Unlock() 1885 1886 // Make the request 1887 req := &cstructs.FsLogsRequest{ 1888 AllocID: a.ID, 1889 Task: a.Job.TaskGroups[0].Tasks[0].Name, 1890 LogType: "stdout", 1891 Origin: "start", 1892 PlainText: true, 1893 QueryOptions: structs.QueryOptions{Region: "global"}, 1894 } 1895 1896 // Get the handler 1897 handler, err := s1.StreamingRpcHandler("FileSystem.Logs") 1898 require.Nil(err) 1899 1900 // Create a pipe 1901 p1, p2 := net.Pipe() 1902 defer p1.Close() 1903 defer p2.Close() 1904 1905 errCh := make(chan error) 1906 streamMsg := make(chan *cstructs.StreamErrWrapper) 1907 1908 // Start the handler 1909 go handler(p2) 1910 1911 // Start the decoder 1912 go func() { 1913 decoder := codec.NewDecoder(p1, structs.MsgpackHandle) 1914 for { 1915 var msg cstructs.StreamErrWrapper 1916 if err := decoder.Decode(&msg); err != nil { 1917 if err == io.EOF || strings.Contains(err.Error(), "closed") { 1918 return 1919 } 1920 errCh <- fmt.Errorf("error decoding: %v", err) 1921 } 1922 1923 streamMsg <- &msg 1924 } 1925 }() 1926 1927 // Send the request 1928 encoder := codec.NewEncoder(p1, structs.MsgpackHandle) 1929 require.Nil(encoder.Encode(req)) 1930 1931 timeout := time.After(3 * time.Second) 1932 received := "" 1933 OUTER: 1934 for { 1935 select { 1936 case <-timeout: 1937 t.Fatal("timeout") 1938 case err := <-errCh: 1939 t.Fatal(err) 1940 case msg := <-streamMsg: 1941 if msg.Error != nil { 1942 t.Fatalf("Got error: %v", msg.Error.Error()) 1943 } 1944 1945 // Add the payload 1946 received += string(msg.Payload) 1947 if received == expected { 1948 break OUTER 1949 } 1950 } 1951 } 1952 } 1953 1954 func TestClientFS_Logs_Remote_Region(t *testing.T) { 1955 t.Parallel() 1956 require := require.New(t) 1957 1958 // Start a server and client 1959 s1 := TestServer(t, nil) 1960 defer s1.Shutdown() 1961 s2 := TestServer(t, func(c *Config) { 1962 c.Region = "two" 1963 }) 1964 defer s2.Shutdown() 1965 TestJoin(t, s1, s2) 1966 testutil.WaitForLeader(t, s1.RPC) 1967 testutil.WaitForLeader(t, s2.RPC) 1968 1969 c := client.TestClient(t, func(c *config.Config) { 1970 c.Servers = []string{s2.config.RPCAddr.String()} 1971 c.Region = "two" 1972 }) 1973 defer c.Shutdown() 1974 1975 // Force an allocation onto the node 1976 expected := "Hello from the other side" 1977 a := mock.Alloc() 1978 a.Job.Type = structs.JobTypeBatch 1979 a.NodeID = c.NodeID() 1980 a.Job.TaskGroups[0].Count = 1 1981 a.Job.TaskGroups[0].Tasks[0] = &structs.Task{ 1982 Name: "web", 1983 Driver: "mock_driver", 1984 Config: map[string]interface{}{ 1985 "run_for": "2s", 1986 "stdout_string": expected, 1987 }, 1988 LogConfig: structs.DefaultLogConfig(), 1989 Resources: &structs.Resources{ 1990 CPU: 500, 1991 MemoryMB: 256, 1992 }, 1993 } 1994 1995 // Wait for the client to connect 1996 testutil.WaitForResult(func() (bool, error) { 1997 nodes := s2.connectedNodes() 1998 return len(nodes) == 1, nil 1999 }, func(err error) { 2000 t.Fatalf("should have a client") 2001 }) 2002 2003 // Upsert the allocation 2004 state2 := s2.State() 2005 require.Nil(state2.UpsertJob(999, a.Job)) 2006 require.Nil(state2.UpsertAllocs(1003, []*structs.Allocation{a})) 2007 2008 // Wait for the client to run the allocation 2009 testutil.WaitForResult(func() (bool, error) { 2010 alloc, err := state2.AllocByID(nil, a.ID) 2011 if err != nil { 2012 return false, err 2013 } 2014 if alloc == nil { 2015 return false, fmt.Errorf("unknown alloc") 2016 } 2017 if alloc.ClientStatus != structs.AllocClientStatusComplete { 2018 return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus) 2019 } 2020 2021 return true, nil 2022 }, func(err error) { 2023 t.Fatalf("Alloc on node %q not finished: %v", c.NodeID(), err) 2024 }) 2025 2026 // Force remove the connection locally in case it exists 2027 s1.nodeConnsLock.Lock() 2028 delete(s1.nodeConns, c.NodeID()) 2029 s1.nodeConnsLock.Unlock() 2030 2031 // Make the request 2032 req := &cstructs.FsLogsRequest{ 2033 AllocID: a.ID, 2034 Task: a.Job.TaskGroups[0].Tasks[0].Name, 2035 LogType: "stdout", 2036 Origin: "start", 2037 PlainText: true, 2038 QueryOptions: structs.QueryOptions{Region: "two"}, 2039 } 2040 2041 // Get the handler 2042 handler, err := s1.StreamingRpcHandler("FileSystem.Logs") 2043 require.Nil(err) 2044 2045 // Create a pipe 2046 p1, p2 := net.Pipe() 2047 defer p1.Close() 2048 defer p2.Close() 2049 2050 errCh := make(chan error) 2051 streamMsg := make(chan *cstructs.StreamErrWrapper) 2052 2053 // Start the handler 2054 go handler(p2) 2055 2056 // Start the decoder 2057 go func() { 2058 decoder := codec.NewDecoder(p1, structs.MsgpackHandle) 2059 for { 2060 var msg cstructs.StreamErrWrapper 2061 if err := decoder.Decode(&msg); err != nil { 2062 if err == io.EOF || strings.Contains(err.Error(), "closed") { 2063 return 2064 } 2065 errCh <- fmt.Errorf("error decoding: %v", err) 2066 } 2067 2068 streamMsg <- &msg 2069 } 2070 }() 2071 2072 // Send the request 2073 encoder := codec.NewEncoder(p1, structs.MsgpackHandle) 2074 require.Nil(encoder.Encode(req)) 2075 2076 timeout := time.After(3 * time.Second) 2077 received := "" 2078 OUTER: 2079 for { 2080 select { 2081 case <-timeout: 2082 t.Fatal("timeout") 2083 case err := <-errCh: 2084 t.Fatal(err) 2085 case msg := <-streamMsg: 2086 if msg.Error != nil { 2087 t.Fatalf("Got error: %v", msg.Error.Error()) 2088 } 2089 2090 // Add the payload 2091 received += string(msg.Payload) 2092 if received == expected { 2093 break OUTER 2094 } 2095 } 2096 } 2097 }