github.com/bazelbuild/remote-apis-sdks@v0.0.0-20240425170053-8a36686a6350/go/pkg/client/cas_test.go (about) 1 package client_test 2 3 import ( 4 "bytes" 5 "context" 6 "encoding/binary" 7 "errors" 8 "fmt" 9 "math/rand" 10 "net" 11 "os" 12 "path/filepath" 13 14 "strings" 15 "testing" 16 "time" 17 18 "github.com/bazelbuild/remote-apis-sdks/go/pkg/client" 19 "github.com/bazelbuild/remote-apis-sdks/go/pkg/digest" 20 "github.com/bazelbuild/remote-apis-sdks/go/pkg/fakes" 21 "github.com/bazelbuild/remote-apis-sdks/go/pkg/filemetadata" 22 "github.com/bazelbuild/remote-apis-sdks/go/pkg/portpicker" 23 "github.com/bazelbuild/remote-apis-sdks/go/pkg/uploadinfo" 24 "github.com/google/go-cmp/cmp" 25 "golang.org/x/sync/errgroup" 26 "google.golang.org/grpc" 27 "google.golang.org/grpc/codes" 28 "google.golang.org/grpc/status" 29 "google.golang.org/protobuf/proto" 30 31 // Redundant imports are required for the google3 mirror. Aliases should not be changed. 32 regrpc "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" 33 repb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" 34 bsgrpc "google.golang.org/genproto/googleapis/bytestream" 35 ) 36 37 const ( 38 instance = "instance" 39 defaultCASConcurrency = 50 40 reqMaxSleepDuration = 5 * time.Millisecond 41 ) 42 43 func TestSplitEndpoints(t *testing.T) { 44 t.Parallel() 45 ctx := context.Background() 46 l1, err := net.Listen("tcp", ":0") 47 if err != nil { 48 t.Fatalf("Cannot listen: %v", err) 49 } 50 port := portpicker.PickUnusedPortTB(t) 51 l2, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) 52 if err != nil { 53 t.Fatalf("Cannot listen: %v", err) 54 } 55 defer l1.Close() 56 defer l2.Close() 57 execServer := grpc.NewServer() 58 casServer := grpc.NewServer() 59 blob := []byte("foobar") 60 fake := &fakes.Reader{ 61 Blob: blob, 62 Chunks: []int{6}, 63 ExpectCompressed: false, 64 } 65 bsgrpc.RegisterByteStreamServer(casServer, fake) 66 go execServer.Serve(l1) 67 go casServer.Serve(l2) 68 defer casServer.Stop() 69 defer execServer.Stop() 70 c, err := client.NewClient(ctx, instance, client.DialParams{ 71 Service: l1.Addr().String(), 72 CASService: l2.Addr().String(), 73 NoSecurity: true, 74 }, client.StartupCapabilities(false)) 75 if err != nil { 76 t.Fatalf("Error connecting to server: %v", err) 77 } 78 defer c.Close() 79 80 got, _, err := c.ReadBlob(ctx, digest.NewFromBlob(blob)) 81 if err != nil { 82 t.Errorf("c.ReadBlob(ctx, digest) gave error %s, want nil", err) 83 } 84 if !bytes.Equal(blob, got) { 85 t.Errorf("c.ReadBlob(ctx, digest) gave diff: want %v, got %v", blob, got) 86 } 87 } 88 89 func TestReadEmptyBlobDoesNotCallServer(t *testing.T) { 90 t.Parallel() 91 ctx := context.Background() 92 e, cleanup := fakes.NewTestEnv(t) 93 defer cleanup() 94 fake := e.Server.CAS 95 c := e.Client.GrpcClient 96 97 got, _, err := c.ReadBlob(ctx, digest.Empty) 98 if err != nil { 99 t.Errorf("c.ReadBlob(ctx, Empty) gave error %s, want nil", err) 100 } 101 if len(got) != 0 { 102 t.Errorf("c.ReadBlob(ctx, Empty) gave diff: want nil, got %v", got) 103 } 104 reads := fake.BlobReads(digest.Empty) 105 if reads != 0 { 106 t.Errorf("expected no blob reads to the fake, got %v", reads) 107 } 108 } 109 110 func TestRead(t *testing.T) { 111 t.Parallel() 112 type testCase struct { 113 name string 114 fake fakes.Reader 115 offset int64 116 limit int64 117 compress bool 118 want []byte // If nil, fake.blob is expected by default. 119 } 120 tests := []testCase{ 121 { 122 name: "empty blob, 10 chunks", 123 fake: fakes.Reader{ 124 Blob: []byte{}, 125 Chunks: []int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 126 }, 127 }, 128 { 129 name: "blob 'foobar', 1 chunk", 130 fake: fakes.Reader{ 131 Blob: []byte("foobar"), 132 Chunks: []int{6}, 133 }, 134 }, 135 { 136 name: "blob 'foobar', 3 evenly sized chunks", 137 fake: fakes.Reader{ 138 Blob: []byte("foobar"), 139 Chunks: []int{2, 2, 2}, 140 }, 141 }, 142 { 143 name: "blob 'foobar', 3 unequal chunks", 144 fake: fakes.Reader{ 145 Blob: []byte("foobar"), 146 Chunks: []int{1, 3, 2}, 147 }, 148 }, 149 { 150 name: "blob 'foobar', 2 chunks with 0-sized chunk between", 151 fake: fakes.Reader{ 152 Blob: []byte("foobar"), 153 Chunks: []int{3, 0, 3}, 154 }, 155 }, 156 { 157 name: "blob 'foobarbaz', partial read spanning multiple chunks", 158 fake: fakes.Reader{ 159 Blob: []byte("foobarbaz"), 160 Chunks: []int{3, 0, 3, 3}, 161 }, 162 offset: 2, 163 limit: 5, 164 want: []byte("obarb"), 165 }, 166 { 167 name: "blob 'foobar', partial read within chunk", 168 fake: fakes.Reader{ 169 Blob: []byte("foobar"), 170 Chunks: []int{6}, 171 }, 172 offset: 2, 173 limit: 3, 174 want: []byte("oba"), 175 }, 176 { 177 name: "blob 'foobar', partial read from start", 178 fake: fakes.Reader{ 179 Blob: []byte("foobar"), 180 Chunks: []int{3, 3}, 181 }, 182 offset: 0, 183 limit: 5, 184 want: []byte("fooba"), 185 }, 186 { 187 name: "blob 'foobar', partial read with no limit", 188 fake: fakes.Reader{ 189 Blob: []byte("foobar"), 190 Chunks: []int{3, 3}, 191 }, 192 offset: 2, 193 limit: 0, 194 want: []byte("obar"), 195 }, 196 { 197 name: "blob 'foobar', partial read with limit extending beyond end of blob", 198 fake: fakes.Reader{ 199 Blob: []byte("foobar"), 200 Chunks: []int{3, 3}, 201 }, 202 offset: 2, 203 limit: 8, 204 want: []byte("obar"), 205 }, 206 } 207 var compressionTests []testCase 208 for _, tc := range tests { 209 if tc.limit == 0 { 210 // Limit tests don't work well with compression, as the limit refers to the compressed bytes 211 // while offset, per spec, refers to uncompressed bytes. 212 tc.compress = true 213 tc.name = tc.name + "_compressed" 214 compressionTests = append(compressionTests, tc) 215 } 216 } 217 tests = append(tests, compressionTests...) 218 219 for _, tc := range tests { 220 tc := tc 221 t.Run(tc.name, func(t *testing.T) { 222 t.Parallel() 223 ctx := context.Background() 224 listener, err := net.Listen("tcp", ":0") 225 if err != nil { 226 t.Fatalf("Cannot listen: %v", err) 227 } 228 defer listener.Close() 229 server := grpc.NewServer() 230 bsgrpc.RegisterByteStreamServer(server, &tc.fake) 231 go server.Serve(listener) 232 defer server.Stop() 233 c, err := client.NewClient(ctx, instance, client.DialParams{ 234 Service: listener.Addr().String(), 235 NoSecurity: true, 236 }, client.StartupCapabilities(false)) 237 if err != nil { 238 t.Fatalf("Error connecting to server: %v", err) 239 } 240 defer c.Close() 241 242 tc.fake.Validate(t) 243 244 c.CompressedBytestreamThreshold = -1 245 if tc.compress { 246 c.CompressedBytestreamThreshold = 0 247 } 248 tc.fake.ExpectCompressed = tc.compress 249 250 want := tc.want 251 if want == nil { 252 want = tc.fake.Blob 253 } 254 255 if tc.offset == 0 && tc.limit > int64(len(tc.fake.Blob)) { 256 got, stats, err := c.ReadBlob(ctx, digest.NewFromBlob(want)) 257 if err != nil { 258 t.Errorf("c.ReadBlob(ctx, digest) gave error %s, want nil", err) 259 } 260 if !bytes.Equal(want, got) { 261 t.Errorf("c.ReadBlob(ctx, digest) gave diff: want %v, got %v", want, got) 262 } 263 if int64(len(got)) != stats.LogicalMoved { 264 t.Errorf("c.ReadBlob(ctx, digest) = _, %v - logical bytes moved different than len of blob received", stats.LogicalMoved) 265 } 266 if tc.compress && len(tc.fake.Blob) > 0 && stats.LogicalMoved == stats.RealMoved { 267 t.Errorf("c.ReadBlob(ctx, digest) = %v - compression on but different real and logical bytes", stats) 268 } 269 } 270 271 got, stats, err := c.ReadBlobRange(ctx, digest.NewFromBlob(tc.fake.Blob), tc.offset, tc.limit) 272 if err != nil { 273 t.Errorf("c.ReadBlobRange(ctx, digest, %d, %d) gave error %s, want nil", tc.offset, tc.limit, err) 274 } 275 if !bytes.Equal(want, got) { 276 t.Errorf("c.ReadBlobRange(ctx, digest, %d, %d) gave diff: want %v, got %v", tc.offset, tc.limit, want, got) 277 } 278 if int64(len(got)) != stats.LogicalMoved { 279 t.Errorf("c.ReadBlob(ctx, digest) = _, %v - logical bytes moved different than len of blob received", stats.LogicalMoved) 280 } 281 if tc.compress && len(tc.fake.Blob) > 0 && stats.LogicalMoved == stats.RealMoved { 282 t.Errorf("c.ReadBlob(ctx, digest) = %v - compression on but same real and logical bytes", stats) 283 } 284 }) 285 } 286 } 287 288 func TestWrite(t *testing.T) { 289 t.Parallel() 290 type testcase struct { 291 name string 292 blob []byte 293 cmp client.CompressedBytestreamThreshold 294 } 295 tests := []testcase{ 296 { 297 name: "empty blob", 298 blob: []byte{}, 299 }, 300 { 301 name: "small blob", 302 blob: []byte("this is a pretty small blob comparatively"), 303 }, 304 { 305 name: "5MB zero blob", 306 blob: make([]byte, 5*1024*1024), 307 }, 308 } 309 var allTests []testcase 310 for _, tc := range tests { 311 for _, th := range []int{0, -1} { 312 t := tc 313 t.name += fmt.Sprintf("CompressionThreshold=%d", th) 314 t.cmp = client.CompressedBytestreamThreshold(th) 315 allTests = append(allTests, t) 316 } 317 } 318 319 for _, tc := range allTests { 320 tc := tc 321 t.Run(tc.name, func(t *testing.T) { 322 t.Parallel() 323 ctx := context.Background() 324 listener, err := net.Listen("tcp", ":0") 325 if err != nil { 326 t.Fatalf("Cannot listen: %v", err) 327 } 328 defer listener.Close() 329 server := grpc.NewServer() 330 fake := &fakes.Writer{} 331 bsgrpc.RegisterByteStreamServer(server, fake) 332 go server.Serve(listener) 333 defer server.Stop() 334 c, err := client.NewClient(ctx, instance, client.DialParams{ 335 Service: listener.Addr().String(), 336 NoSecurity: true, 337 }, client.StartupCapabilities(false), client.ChunkMaxSize(20)) // Use small write chunk size for tests. 338 if err != nil { 339 t.Fatalf("Error connecting to server: %v", err) 340 } 341 defer c.Close() 342 343 fake.ExpectCompressed = int(tc.cmp) == 0 344 tc.cmp.Apply(c) 345 346 gotDg, err := c.WriteBlob(ctx, tc.blob) 347 if err != nil { 348 t.Errorf("c.WriteBlob(ctx, blob) gave error %s, wanted nil", err) 349 } 350 if fake.Err != nil { 351 t.Errorf("c.WriteBlob(ctx, blob) caused the server to return error %s (possibly unseen by c)", fake.Err) 352 } 353 if !bytes.Equal(tc.blob, fake.Buf) { 354 t.Errorf("c.WriteBlob(ctx, blob) had diff on blobs, want %v, got %v:", tc.blob, fake.Buf) 355 } 356 dg := digest.NewFromBlob(tc.blob) 357 if dg != gotDg { 358 t.Errorf("c.WriteBlob(ctx, blob) had diff on digest returned (want %s, got %s)", dg, gotDg) 359 } 360 }) 361 } 362 } 363 364 func TestMissingBlobs(t *testing.T) { 365 t.Parallel() 366 tests := []struct { 367 name string 368 // present is the blobs present in the CAS. 369 present []string 370 // input is the digests given to MissingBlobs. 371 input []digest.Digest 372 // want is the returned list of digests. 373 want []digest.Digest 374 }{ 375 { 376 name: "none present", 377 present: nil, 378 input: []digest.Digest{ 379 digest.NewFromBlob([]byte("foo")), 380 digest.NewFromBlob([]byte("bar")), 381 digest.NewFromBlob([]byte("baz")), 382 }, 383 want: []digest.Digest{ 384 digest.NewFromBlob([]byte("foo")), 385 digest.NewFromBlob([]byte("bar")), 386 digest.NewFromBlob([]byte("baz")), 387 }, 388 }, 389 { 390 name: "all present", 391 present: []string{"foo", "bar", "baz"}, 392 input: []digest.Digest{ 393 digest.NewFromBlob([]byte("foo")), 394 digest.NewFromBlob([]byte("bar")), 395 digest.NewFromBlob([]byte("baz")), 396 }, 397 want: nil, 398 }, 399 { 400 name: "some present", 401 present: []string{"foo", "bar"}, 402 input: []digest.Digest{ 403 digest.NewFromBlob([]byte("foo")), 404 digest.NewFromBlob([]byte("bar")), 405 digest.NewFromBlob([]byte("baz")), 406 }, 407 want: []digest.Digest{ 408 digest.NewFromBlob([]byte("baz")), 409 }, 410 }, 411 } 412 413 for _, tc := range tests { 414 tc := tc 415 t.Run(tc.name, func(t *testing.T) { 416 t.Parallel() 417 ctx := context.Background() 418 e, cleanup := fakes.NewTestEnv(t) 419 defer cleanup() 420 fake := e.Server.CAS 421 c := e.Client.GrpcClient 422 for _, s := range tc.present { 423 fake.Put([]byte(s)) 424 } 425 t.Logf("CAS contains digests of %s", tc.present) 426 got, err := c.MissingBlobs(ctx, tc.input) 427 if err != nil { 428 t.Errorf("c.MissingBlobs(ctx, %v) gave error %s, expected nil", tc.input, err) 429 } 430 if diff := cmp.Diff(tc.want, got); diff != "" { 431 t.Errorf("c.MissingBlobs(ctx, %v) gave diff (want -> got):\n%s", tc.input, diff) 432 } 433 }) 434 } 435 } 436 437 func TestUploadConcurrent(t *testing.T) { 438 t.Parallel() 439 blobs := make([][]byte, 50) 440 for i := range blobs { 441 blobs[i] = []byte(fmt.Sprint(i)) 442 } 443 type testCase struct { 444 name string 445 // Whether to use batching. 446 batching client.UseBatchOps 447 // Whether to use background CAS ops. 448 unified client.UnifiedUploads 449 // The batch size. 450 maxBatchDigests client.MaxBatchDigests 451 // The CAS concurrency for uploading the blobs. 452 concurrency client.CASConcurrency 453 } 454 var tests []testCase 455 for _, ub := range []client.UseBatchOps{false, true} { 456 for _, cb := range []client.UnifiedUploads{false, true} { 457 for _, conc := range []client.CASConcurrency{3, 100} { 458 tc := testCase{ 459 name: fmt.Sprintf("batch:%t,unified:%t,conc:%d", ub, cb, conc), 460 batching: ub, 461 unified: cb, 462 maxBatchDigests: client.MaxBatchDigests(9), 463 concurrency: conc, 464 } 465 tests = append(tests, tc) 466 } 467 } 468 } 469 for _, tc := range tests { 470 tc := tc 471 t.Run(tc.name, func(t *testing.T) { 472 t.Parallel() 473 ctx := context.Background() 474 e, cleanup := fakes.NewTestEnv(t) 475 defer cleanup() 476 fake := e.Server.CAS 477 fake.ReqSleepDuration = reqMaxSleepDuration 478 fake.ReqSleepRandomize = true 479 c := e.Client.GrpcClient 480 for _, opt := range []client.Opt{tc.batching, tc.maxBatchDigests, tc.concurrency, tc.unified} { 481 opt.Apply(c) 482 } 483 c.RunBackgroundTasks(ctx) 484 485 eg, eCtx := errgroup.WithContext(ctx) 486 for i := 0; i < 100; i++ { 487 eg.Go(func() error { 488 var input []*uploadinfo.Entry 489 for _, blob := range append(blobs, blobs...) { 490 input = append(input, uploadinfo.EntryFromBlob(blob)) 491 } 492 if _, _, err := c.UploadIfMissing(eCtx, input...); err != nil { 493 return fmt.Errorf("c.UploadIfMissing(ctx, input) gave error %v, expected nil", err) 494 } 495 return nil 496 }) 497 } 498 if err := eg.Wait(); err != nil { 499 t.Error(err) 500 } 501 // Verify everything was written exactly once. 502 for i, blob := range blobs { 503 dg := digest.NewFromBlob(blob) 504 if tc.unified { 505 if fake.BlobWrites(dg) != 1 { 506 t.Errorf("wanted 1 write for blob %v: %v, got %v", i, dg, fake.BlobWrites(dg)) 507 } 508 if fake.BlobMissingReqs(dg) != 100 { 509 // 100 requests per blob. 510 t.Errorf("wanted 100 missing request for blob %v: %v, got %v", i, dg, fake.BlobMissingReqs(dg)) 511 } 512 } 513 } 514 }) 515 } 516 } 517 518 func TestUploadConcurrentBatch(t *testing.T) { 519 t.Parallel() 520 blobs := make([][]byte, 100) 521 for i := range blobs { 522 blobs[i] = []byte(fmt.Sprint(i)) 523 } 524 ctx := context.Background() 525 for _, uo := range []client.UnifiedUploads{false, true} { 526 uo := uo 527 t.Run(fmt.Sprintf("unified:%t", uo), func(t *testing.T) { 528 t.Parallel() 529 e, cleanup := fakes.NewTestEnv(t) 530 defer cleanup() 531 fake := e.Server.CAS 532 fake.ReqSleepDuration = reqMaxSleepDuration 533 fake.ReqSleepRandomize = true 534 c := e.Client.GrpcClient 535 c.MaxBatchDigests = 50 536 client.UnifiedUploadTickDuration(500 * time.Millisecond).Apply(c) 537 uo.Apply(c) 538 c.RunBackgroundTasks(ctx) 539 540 eg, eCtx := errgroup.WithContext(ctx) 541 for i := 0; i < 10; i++ { 542 i := i 543 eg.Go(func() error { 544 var input []*uploadinfo.Entry 545 // Upload 15 digests in a sliding window. 546 for j := i * 10; j < i*10+15 && j < len(blobs); j++ { 547 input = append(input, uploadinfo.EntryFromBlob(blobs[j])) 548 // Twice to have the same upload in same call, in addition to between calls. 549 input = append(input, uploadinfo.EntryFromBlob(blobs[j])) 550 } 551 if _, _, err := c.UploadIfMissing(eCtx, input...); err != nil { 552 return fmt.Errorf("c.UploadIfMissing(ctx, input) gave error %v, expected nil", err) 553 } 554 return nil 555 }) 556 } 557 if err := eg.Wait(); err != nil { 558 t.Error(err) 559 } 560 // Verify everything was written exactly once. 561 for i, blob := range blobs { 562 dg := digest.NewFromBlob(blob) 563 if c.UnifiedUploads { 564 if fake.BlobWrites(dg) != 1 { 565 t.Errorf("wanted 1 write for blob %v: %v, got %v", i, dg, fake.BlobWrites(dg)) 566 } 567 } 568 } 569 expectedReqs := 10 570 if c.UnifiedUploads { 571 // All the 100 digests will be batched into two batches, together. 572 expectedReqs = 2 573 } 574 if fake.BatchReqs() != expectedReqs { 575 t.Errorf("%d requests were made to BatchUpdateBlobs, wanted %v", fake.BatchReqs(), expectedReqs) 576 } 577 }) 578 } 579 } 580 581 func TestUploadCancel(t *testing.T) { 582 t.Parallel() 583 ctx := context.Background() 584 blob := []byte{1, 2, 3} 585 dg := digest.NewFromBlob(blob) 586 for _, uo := range []client.UnifiedUploads{false, true} { 587 uo := uo 588 t.Run(fmt.Sprintf("unified:%t", uo), func(t *testing.T) { 589 t.Parallel() 590 e, cleanup := fakes.NewTestEnv(t) 591 defer cleanup() 592 fake := e.Server.CAS 593 wait := make(chan bool) 594 fake.PerDigestBlockFn[dg] = func() { 595 <-wait 596 } 597 c := e.Client.GrpcClient 598 uo.Apply(c) 599 client.UseBatchOps(false).Apply(c) 600 c.RunBackgroundTasks(ctx) 601 602 cCtx, cancel := context.WithCancel(ctx) 603 eg, _ := errgroup.WithContext(cCtx) 604 ue := uploadinfo.EntryFromBlob(blob) 605 eg.Go(func() error { 606 if _, _, err := c.UploadIfMissing(cCtx, ue); !errors.Is(err, context.Canceled) { 607 return fmt.Errorf("c.UploadIfMissing(ctx, input) gave error %v, expected to wrap context.Canceled", err) 608 } 609 return nil 610 }) 611 eg.Go(func() error { 612 time.Sleep(60 * time.Millisecond) // Enough time to trigger upload cycle. 613 cancel() 614 time.Sleep(10 * time.Millisecond) 615 return nil 616 }) 617 if err := eg.Wait(); err != nil { 618 t.Error(err) 619 } 620 // Verify that nothing was written. 621 if fake.BlobWrites(ue.Digest) != 0 { 622 t.Errorf("Blob was written, expected cancellation.") 623 } 624 close(wait) 625 }) 626 } 627 } 628 629 func TestUploadConcurrentCancel(t *testing.T) { 630 t.Parallel() 631 blobs := make([][]byte, 50) 632 for i := range blobs { 633 blobs[i] = []byte(fmt.Sprint(i)) 634 } 635 var input []*uploadinfo.Entry 636 for _, blob := range blobs { 637 input = append(input, uploadinfo.EntryFromBlob(blob)) 638 } 639 input = append(input, input...) 640 type testCase struct { 641 name string 642 // Whether to use batching. 643 batching client.UseBatchOps 644 // Whether to use background CAS ops. 645 unified client.UnifiedUploads 646 // The batch size. 647 maxBatchDigests client.MaxBatchDigests 648 // The CAS concurrency for uploading the blobs. 649 concurrency client.CASConcurrency 650 } 651 var tests []testCase 652 for _, ub := range []client.UseBatchOps{false, true} { 653 for _, uo := range []client.UnifiedUploads{false, true} { 654 for _, conc := range []client.CASConcurrency{3, 20} { 655 tc := testCase{ 656 name: fmt.Sprintf("batch:%t,unified:%t,conc:%d", ub, uo, conc), 657 batching: ub, 658 unified: uo, 659 maxBatchDigests: client.MaxBatchDigests(9), 660 concurrency: conc, 661 } 662 tests = append(tests, tc) 663 } 664 } 665 } 666 for _, tc := range tests { 667 tc := tc 668 t.Run(tc.name, func(t *testing.T) { 669 t.Parallel() 670 ctx := context.Background() 671 e, cleanup := fakes.NewTestEnv(t) 672 defer cleanup() 673 fake := e.Server.CAS 674 fake.ReqSleepDuration = reqMaxSleepDuration 675 fake.ReqSleepRandomize = true 676 c := e.Client.GrpcClient 677 for _, opt := range []client.Opt{tc.batching, tc.maxBatchDigests, tc.concurrency, tc.unified} { 678 opt.Apply(c) 679 } 680 c.RunBackgroundTasks(ctx) 681 682 eg, eCtx := errgroup.WithContext(ctx) 683 eg.Go(func() error { 684 if _, _, err := c.UploadIfMissing(eCtx, input...); err != nil { 685 return fmt.Errorf("c.UploadIfMissing(ctx, input) gave error %v, expected nil", err) 686 } 687 return nil 688 }) 689 cCtx, cancel := context.WithCancel(eCtx) 690 for i := 0; i < 50; i++ { 691 eg.Go(func() error { 692 // Verify that we got a context cancellation error. Sometimes, the request can succeed, if the original thread takes a while to run. 693 if _, _, err := c.UploadIfMissing(cCtx, input...); err != nil && !errors.Is(err, context.Canceled) { 694 return fmt.Errorf("c.UploadIfMissing(ctx, input) gave error %+v!, expected context canceled", err) 695 } 696 return nil 697 }) 698 } 699 eg.Go(func() error { 700 time.Sleep(time.Duration(20*rand.Float32()) * time.Microsecond) 701 cancel() 702 return nil 703 }) 704 if err := eg.Wait(); err != nil { 705 t.Error(err) 706 } 707 if tc.unified { 708 // Verify everything was written exactly once, despite the context being canceled. 709 for i, blob := range blobs { 710 dg := digest.NewFromBlob(blob) 711 if fake.BlobWrites(dg) != 1 { 712 t.Errorf("wanted 1 write for blob %v: %v, got %v", i, dg, fake.BlobWrites(dg)) 713 } 714 } 715 } 716 }) 717 } 718 } 719 720 func TestUpload(t *testing.T) { 721 t.Parallel() 722 var twoThousandBlobs [][]byte 723 var thousandBlobs [][]byte 724 for i := 0; i < 2000; i++ { 725 var buf = new(bytes.Buffer) 726 binary.Write(buf, binary.LittleEndian, i) 727 // Write a few extra bytes so that we have > chunkSize sized blobs. 728 for j := 0; j < 10; j++ { 729 binary.Write(buf, binary.LittleEndian, 0) 730 } 731 twoThousandBlobs = append(twoThousandBlobs, buf.Bytes()) 732 if i%2 == 0 { 733 thousandBlobs = append(thousandBlobs, buf.Bytes()) 734 } 735 } 736 737 type testcase struct { 738 name string 739 // input is the blobs to try to store; they're converted to a file map by the test 740 input [][]byte 741 // present is the blobs already present in the CAS; they're pre-loaded into the fakes.CAS object 742 // and the test verifies no attempt was made to upload them. 743 present [][]byte 744 opts []client.Opt 745 } 746 tests := []testcase{ 747 { 748 name: "No blobs", 749 input: nil, 750 present: nil, 751 }, 752 { 753 name: "None present", 754 input: [][]byte{[]byte("foo"), []byte("bar"), []byte("baz")}, 755 present: nil, 756 }, 757 { 758 name: "All present", 759 input: [][]byte{[]byte("foo"), []byte("bar"), []byte("baz")}, 760 present: [][]byte{[]byte("foo"), []byte("bar"), []byte("baz")}, 761 }, 762 { 763 name: "Some present", 764 input: [][]byte{[]byte("foo"), []byte("bar"), []byte("baz")}, 765 present: [][]byte{[]byte("bar")}, 766 }, 767 { 768 name: "2000 blobs heavy concurrency", 769 input: twoThousandBlobs, 770 present: thousandBlobs, 771 opts: []client.Opt{client.CASConcurrency(500)}, 772 }, 773 } 774 775 var allTests []testcase 776 for _, tc := range tests { 777 for _, ub := range []client.UseBatchOps{false, true} { 778 for _, uo := range []client.UnifiedUploads{false, true} { 779 for _, cmp := range []client.CompressedBytestreamThreshold{-1, 0} { 780 t := tc 781 t.name = fmt.Sprintf("%s_UsingBatch:%t,UnifiedUploads:%t,CompressionThresh:%d", tc.name, ub, uo, cmp) 782 t.opts = append(t.opts, []client.Opt{ub, uo, cmp}...) 783 allTests = append(allTests, t) 784 } 785 } 786 } 787 } 788 for _, tc := range allTests { 789 tc := tc 790 t.Run(tc.name, func(t *testing.T) { 791 t.Parallel() 792 ctx := context.Background() 793 e, cleanup := fakes.NewTestEnv(t) 794 defer cleanup() 795 fake := e.Server.CAS 796 c := e.Client.GrpcClient 797 for _, o := range tc.opts { 798 o.Apply(c) 799 } 800 c.RunBackgroundTasks(ctx) 801 802 present := make(map[digest.Digest]bool) 803 for _, blob := range tc.present { 804 fake.Put(blob) 805 present[digest.NewFromBlob(blob)] = true 806 } 807 var input []*uploadinfo.Entry 808 for _, blob := range tc.input { 809 input = append(input, uploadinfo.EntryFromBlob(blob)) 810 } 811 812 missing, bMoved, err := c.UploadIfMissing(ctx, input...) 813 if err != nil { 814 t.Errorf("c.UploadIfMissing(ctx, input) gave error %v, expected nil", err) 815 } 816 817 missingSet := make(map[digest.Digest]struct{}) 818 totalBytes := int64(0) 819 for _, dg := range missing { 820 missingSet[dg] = struct{}{} 821 totalBytes += dg.Size 822 } 823 824 // It's much harder to check the case where compression is on as we also have to ignore batch ops, 825 // so we just don't. 826 if int(c.CompressedBytestreamThreshold) < 0 && bMoved != totalBytes { 827 t.Errorf("c.UploadIfMissing(ctx, input) = %v, expected %v (reported different bytes moved and digest size despite no compression)", bMoved, totalBytes) 828 } 829 830 for i, ue := range input { 831 dg := ue.Digest 832 blob := tc.input[i] 833 if present[dg] { 834 if fake.BlobWrites(dg) > 0 { 835 t.Errorf("blob %v with digest %s was uploaded even though it was already present in the CAS", blob, dg) 836 } 837 if _, ok := missingSet[dg]; ok { 838 t.Errorf("Stats said that blob %v with digest %s was missing in the CAS", blob, dg) 839 } 840 continue 841 } 842 if gotBlob, ok := fake.Get(dg); !ok { 843 t.Errorf("blob %v with digest %s was not uploaded, expected it to be present in the CAS", blob, dg) 844 } else if !bytes.Equal(blob, gotBlob) { 845 t.Errorf("blob digest %s had diff on uploaded blob: want %v, got %v", dg, blob, gotBlob) 846 } 847 if _, ok := missingSet[dg]; !ok { 848 t.Errorf("Stats said that blob %v with digest %s was present in the CAS", blob, dg) 849 } 850 } 851 if fake.MaxConcurrency() > defaultCASConcurrency { 852 t.Errorf("CAS concurrency %v was higher than max %v", fake.MaxConcurrency(), defaultCASConcurrency) 853 } 854 }) 855 } 856 } 857 858 func TestWriteBlobsBatching(t *testing.T) { 859 t.Parallel() 860 ctx := context.Background() 861 tests := []struct { 862 name string 863 sizes []int 864 batchReqs int 865 writeReqs int 866 }{ 867 { 868 name: "single small blob", 869 sizes: []int{1}, 870 batchReqs: 0, 871 writeReqs: 1, 872 }, 873 { 874 name: "large and small blobs hitting max exactly", 875 sizes: []int{338, 338, 338, 1, 1, 1}, 876 batchReqs: 3, 877 writeReqs: 0, 878 }, 879 { 880 name: "small batches of big blobs", 881 sizes: []int{88, 88, 88, 88, 88, 88, 88}, 882 batchReqs: 2, 883 writeReqs: 1, 884 }, 885 { 886 name: "batch with blob that's too big", 887 sizes: []int{400, 88, 88, 88}, 888 batchReqs: 1, 889 writeReqs: 1, 890 }, 891 { 892 name: "many small blobs hitting max digests", 893 sizes: []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 894 batchReqs: 4, 895 writeReqs: 0, 896 }, 897 } 898 899 for _, tc := range tests { 900 tc := tc 901 t.Run(tc.name, func(t *testing.T) { 902 t.Parallel() 903 e, cleanup := fakes.NewTestEnv(t) 904 defer cleanup() 905 fake := e.Server.CAS 906 c := e.Client.GrpcClient 907 c.MaxBatchSize = 500 908 c.MaxBatchDigests = 4 909 // Each batch request frame overhead is 13 bytes. 910 // A per-blob overhead is 74 bytes. 911 912 blobs := make(map[digest.Digest][]byte) 913 for i, sz := range tc.sizes { 914 blob := make([]byte, int(sz)) 915 blob[0] = byte(i) // Ensure blobs are distinct 916 blobs[digest.NewFromBlob(blob)] = blob 917 } 918 919 err := c.WriteBlobs(ctx, blobs) 920 if err != nil { 921 t.Fatalf("c.WriteBlobs(ctx, inputs) gave error %s, expected nil", err) 922 } 923 924 for d, blob := range blobs { 925 if gotBlob, ok := fake.Get(d); !ok { 926 t.Errorf("blob with digest %s was not uploaded, expected it to be present in the CAS", d) 927 } else if !bytes.Equal(blob, gotBlob) { 928 t.Errorf("blob with digest %s had diff on uploaded blob: wanted %v, got %v", d, blob, gotBlob) 929 } 930 } 931 if fake.BatchReqs() != tc.batchReqs { 932 t.Errorf("%d requests were made to BatchUpdateBlobs, wanted %d", fake.BatchReqs(), tc.batchReqs) 933 } 934 if fake.WriteReqs() != tc.writeReqs { 935 t.Errorf("%d requests were made to Write, wanted %d", fake.WriteReqs(), tc.writeReqs) 936 } 937 }) 938 } 939 } 940 941 func TestFlattenActionOutputs(t *testing.T) { 942 t.Parallel() 943 ctx := context.Background() 944 e, cleanup := fakes.NewTestEnv(t) 945 defer cleanup() 946 fake := e.Server.CAS 947 c := e.Client.GrpcClient 948 949 fooDigest := digest.TestNew("1001", 1) 950 barDigest := digest.TestNew("1002", 2) 951 dirB := &repb.Directory{ 952 Files: []*repb.FileNode{ 953 {Name: "foo", Digest: fooDigest.ToProto(), IsExecutable: true}, 954 }, 955 } 956 bDigest := digest.TestNewFromMessage(dirB) 957 dirA := &repb.Directory{ 958 Directories: []*repb.DirectoryNode{ 959 {Name: "b", Digest: bDigest.ToProto()}, 960 }, 961 Files: []*repb.FileNode{ 962 {Name: "bar", Digest: barDigest.ToProto()}, 963 }, 964 } 965 aDigest := digest.TestNewFromMessage(dirA) 966 root := &repb.Directory{ 967 Directories: []*repb.DirectoryNode{ 968 {Name: "a", Digest: aDigest.ToProto()}, 969 {Name: "b", Digest: bDigest.ToProto()}, 970 }, 971 } 972 tr := &repb.Tree{ 973 Root: root, 974 Children: []*repb.Directory{dirA, dirB}, 975 } 976 treeBlob, err := proto.Marshal(tr) 977 if err != nil { 978 t.Errorf("failed marshalling Tree: %s", err) 979 } 980 treeA := &repb.Tree{ 981 Root: dirA, 982 Children: []*repb.Directory{dirB}, 983 } 984 treeABlob, err := proto.Marshal(treeA) 985 if err != nil { 986 t.Errorf("failed marshalling Tree: %s", err) 987 } 988 treeDigest := fake.Put(treeBlob) 989 treeADigest := fake.Put(treeABlob) 990 ar := &repb.ActionResult{ 991 OutputFiles: []*repb.OutputFile{ 992 &repb.OutputFile{Path: "foo", Digest: fooDigest.ToProto()}}, 993 OutputFileSymlinks: []*repb.OutputSymlink{ 994 &repb.OutputSymlink{Path: "x/bar", Target: "../dir/a/bar"}}, 995 OutputDirectorySymlinks: []*repb.OutputSymlink{ 996 &repb.OutputSymlink{Path: "x/a", Target: "../dir/a"}}, 997 OutputDirectories: []*repb.OutputDirectory{ 998 &repb.OutputDirectory{Path: "dir", TreeDigest: treeDigest.ToProto()}, 999 &repb.OutputDirectory{Path: "dir2", TreeDigest: treeADigest.ToProto()}, 1000 }, 1001 } 1002 outputs, err := c.FlattenActionOutputs(ctx, ar) 1003 if err != nil { 1004 t.Errorf("error in FlattenActionOutputs: %s", err) 1005 } 1006 wantOutputs := map[string]*client.TreeOutput{ 1007 "dir/a/b/foo": &client.TreeOutput{Digest: fooDigest, IsExecutable: true}, 1008 "dir/a/bar": &client.TreeOutput{Digest: barDigest}, 1009 "dir/b/foo": &client.TreeOutput{Digest: fooDigest, IsExecutable: true}, 1010 "dir2/b/foo": &client.TreeOutput{Digest: fooDigest, IsExecutable: true}, 1011 "dir2/bar": &client.TreeOutput{Digest: barDigest}, 1012 "foo": &client.TreeOutput{Digest: fooDigest}, 1013 "x/a": &client.TreeOutput{SymlinkTarget: "../dir/a"}, 1014 "x/bar": &client.TreeOutput{SymlinkTarget: "../dir/a/bar"}, 1015 } 1016 if len(outputs) != len(wantOutputs) { 1017 t.Errorf("FlattenActionOutputs gave wrong number of outputs: want %d, got %d", len(wantOutputs), len(outputs)) 1018 } 1019 for path, wantOut := range wantOutputs { 1020 got, ok := outputs[path] 1021 if !ok { 1022 t.Errorf("expected output %s is missing", path) 1023 } 1024 if got.Path != path { 1025 t.Errorf("FlattenActionOutputs keyed %s output with %s path", got.Path, path) 1026 } 1027 if wantOut.Digest != got.Digest { 1028 t.Errorf("FlattenActionOutputs gave digest diff on %s: want %v, got: %v", path, wantOut.Digest, got.Digest) 1029 } 1030 if wantOut.IsExecutable != got.IsExecutable { 1031 t.Errorf("FlattenActionOutputs gave IsExecutable diff on %s: want %v, got: %v", path, wantOut.IsExecutable, got.IsExecutable) 1032 } 1033 if wantOut.SymlinkTarget != got.SymlinkTarget { 1034 t.Errorf("FlattenActionOutputs gave symlink target diff on %s: want %s, got: %s", path, wantOut.SymlinkTarget, got.SymlinkTarget) 1035 } 1036 } 1037 } 1038 1039 func TestDownloadActionOutputs(t *testing.T) { 1040 t.Parallel() 1041 ctx := context.Background() 1042 e, cleanup := fakes.NewTestEnv(t) 1043 defer cleanup() 1044 fake := e.Server.CAS 1045 c := e.Client.GrpcClient 1046 cache := filemetadata.NewSingleFlightCache() 1047 1048 fooDigest := fake.Put([]byte("foo")) 1049 barDigest := fake.Put([]byte("bar")) 1050 dirB := &repb.Directory{ 1051 Files: []*repb.FileNode{ 1052 {Name: "foo", Digest: fooDigest.ToProto(), IsExecutable: true}, 1053 }, 1054 } 1055 bDigest := digest.TestNewFromMessage(dirB) 1056 dirA := &repb.Directory{ 1057 Directories: []*repb.DirectoryNode{ 1058 {Name: "b", Digest: bDigest.ToProto()}, 1059 {Name: "e2", Digest: digest.Empty.ToProto()}, 1060 }, 1061 Files: []*repb.FileNode{ 1062 {Name: "bar", Digest: barDigest.ToProto()}, 1063 }, 1064 } 1065 aDigest := digest.TestNewFromMessage(dirA) 1066 root := &repb.Directory{ 1067 Directories: []*repb.DirectoryNode{ 1068 {Name: "a", Digest: aDigest.ToProto()}, 1069 {Name: "b", Digest: bDigest.ToProto()}, 1070 {Name: "e1", Digest: digest.Empty.ToProto()}, 1071 }, 1072 } 1073 tree := &repb.Tree{ 1074 Root: root, 1075 Children: []*repb.Directory{dirA, dirB, &repb.Directory{}}, 1076 } 1077 treeBlob, err := proto.Marshal(tree) 1078 if err != nil { 1079 t.Fatalf("failed marshalling Tree: %s", err) 1080 } 1081 treeA := &repb.Tree{ 1082 Root: dirA, 1083 Children: []*repb.Directory{dirB, &repb.Directory{}}, 1084 } 1085 treeABlob, err := proto.Marshal(treeA) 1086 if err != nil { 1087 t.Fatalf("failed marshalling Tree: %s", err) 1088 } 1089 treeDigest := fake.Put(treeBlob) 1090 treeADigest := fake.Put(treeABlob) 1091 ar := &repb.ActionResult{ 1092 OutputFiles: []*repb.OutputFile{ 1093 &repb.OutputFile{Path: "../foo", Digest: fooDigest.ToProto()}}, 1094 OutputFileSymlinks: []*repb.OutputSymlink{ 1095 &repb.OutputSymlink{Path: "x/bar", Target: "../dir/a/bar"}}, 1096 OutputDirectorySymlinks: []*repb.OutputSymlink{ 1097 &repb.OutputSymlink{Path: "x/a", Target: "../dir/a"}}, 1098 OutputDirectories: []*repb.OutputDirectory{ 1099 &repb.OutputDirectory{Path: "dir", TreeDigest: treeDigest.ToProto()}, 1100 &repb.OutputDirectory{Path: "dir2", TreeDigest: treeADigest.ToProto()}, 1101 }, 1102 } 1103 execRoot := t.TempDir() 1104 wd := "wd" 1105 if err := os.Mkdir(filepath.Join(execRoot, wd), os.ModePerm); err != nil { 1106 t.Fatalf("failed to create working directory %v: %v", wd, err) 1107 } 1108 _, err = c.DownloadActionOutputs(ctx, ar, filepath.Join(execRoot, wd), cache) 1109 if err != nil { 1110 t.Errorf("error in DownloadActionOutputs: %s", err) 1111 } 1112 wantOutputs := []struct { 1113 path string 1114 isExecutable bool 1115 contents []byte 1116 symlinkTarget string 1117 isEmptyDirectory bool 1118 fileDigest *digest.Digest 1119 }{ 1120 { 1121 path: "wd/dir/e1", 1122 isEmptyDirectory: true, 1123 }, 1124 { 1125 path: "wd/dir/a/e2", 1126 isEmptyDirectory: true, 1127 }, 1128 { 1129 path: "wd/dir/a/b/foo", 1130 isExecutable: true, 1131 contents: []byte("foo"), 1132 fileDigest: &fooDigest, 1133 }, 1134 { 1135 path: "wd/dir/a/bar", 1136 contents: []byte("bar"), 1137 }, 1138 { 1139 path: "wd/dir/b/foo", 1140 isExecutable: true, 1141 contents: []byte("foo"), 1142 fileDigest: &fooDigest, 1143 }, 1144 { 1145 path: "wd/dir2/e2", 1146 isEmptyDirectory: true, 1147 }, 1148 { 1149 path: "wd/dir2/b/foo", 1150 isExecutable: true, 1151 contents: []byte("foo"), 1152 fileDigest: &fooDigest, 1153 }, 1154 { 1155 path: "wd/dir2/bar", 1156 contents: []byte("bar"), 1157 }, 1158 { 1159 path: "foo", 1160 contents: []byte("foo"), 1161 fileDigest: &fooDigest, 1162 }, 1163 { 1164 path: "wd/x/a", 1165 symlinkTarget: "../dir/a", 1166 }, 1167 { 1168 path: "wd/x/bar", 1169 symlinkTarget: "../dir/a/bar", 1170 }, 1171 } 1172 for _, out := range wantOutputs { 1173 path := filepath.Join(execRoot, out.path) 1174 fi, err := os.Lstat(path) 1175 if err != nil { 1176 t.Errorf("expected output %s is missing", path) 1177 } 1178 if out.fileDigest != nil { 1179 fmd := cache.Get(path) 1180 if fmd == nil { 1181 t.Errorf("cache does not contain metadata for path: %v", path) 1182 } else { 1183 if diff := cmp.Diff(*out.fileDigest, fmd.Digest); diff != "" { 1184 t.Errorf("invalid digeset in cache for path %v, (-want +got): %v", path, diff) 1185 } 1186 } 1187 } 1188 if out.symlinkTarget != "" { 1189 if fi.Mode()&os.ModeSymlink == 0 { 1190 t.Errorf("expected %s to be a symlink, got %v", path, fi.Mode()) 1191 } 1192 target, e := os.Readlink(path) 1193 if e != nil { 1194 t.Errorf("expected %s to be a symlink, got error reading symlink: %v", path, err) 1195 } 1196 if target != out.symlinkTarget { 1197 t.Errorf("expected %s to be a symlink to %s, got %s", path, out.symlinkTarget, target) 1198 } 1199 } else if out.isEmptyDirectory { 1200 if !fi.Mode().IsDir() { 1201 t.Errorf("expected %s to be a directory, got %s", path, fi.Mode()) 1202 } 1203 files, err := os.ReadDir(path) 1204 if err != nil { 1205 t.Errorf("expected %s to be a directory, got error reading directory: %v", path, err) 1206 } 1207 if len(files) != 0 { 1208 t.Errorf("expected %s to be an empty directory, got contents: %v", path, files) 1209 } 1210 } else { 1211 contents, err := os.ReadFile(path) 1212 if err != nil { 1213 t.Errorf("error reading from %s: %v", path, err) 1214 } 1215 if !bytes.Equal(contents, out.contents) { 1216 t.Errorf("expected %s to contain %v, got %v", path, out.contents, contents) 1217 } 1218 // TODO(olaola): verify the file is executable, if required. 1219 // Doing this naively failed go test in CI. 1220 } 1221 } 1222 } 1223 1224 func TestDownloadActionOutputs_TestFileModifiedTimestamp(t *testing.T) { 1225 t.Parallel() 1226 ctx := context.Background() 1227 e, cleanup := fakes.NewTestEnv(t) 1228 defer cleanup() 1229 fake := e.Server.CAS 1230 c := e.Client.GrpcClient 1231 cache := filemetadata.NewSingleFlightCache() 1232 1233 fooDigest := fake.Put([]byte("foo")) 1234 barDigest := fake.Put([]byte("bar")) 1235 emptyDigest := fake.Put([]byte("")) 1236 1237 // Expected tree structure: 1238 // root 1239 // --> a/ 1240 // --> b/ 1241 // --> foo 1242 // --> empty 1243 // --> bar 1244 // --> empty 1245 // --> b/ 1246 // --> foo 1247 // --> empty 1248 1249 ar := &repb.ActionResult{ 1250 OutputFiles: []*repb.OutputFile{ 1251 &repb.OutputFile{Path: "dir/a/b/foo", Digest: fooDigest.ToProto()}, 1252 &repb.OutputFile{Path: "dir/a/b/empty", Digest: emptyDigest.ToProto()}, 1253 &repb.OutputFile{Path: "dir/a/bar", Digest: barDigest.ToProto()}, 1254 &repb.OutputFile{Path: "dir/a/empty", Digest: emptyDigest.ToProto()}, 1255 &repb.OutputFile{Path: "dir/b/foo", Digest: fooDigest.ToProto()}, 1256 &repb.OutputFile{Path: "dir/b/empty", Digest: emptyDigest.ToProto()}, 1257 }, 1258 } 1259 execRoot := t.TempDir() 1260 wd := "wd" 1261 if err := os.Mkdir(filepath.Join(execRoot, wd), os.ModePerm); err != nil { 1262 t.Fatalf("failed to create working directory %v: %v", wd, err) 1263 } 1264 1265 // Pre-create some output files to make sure file modified timestamps get updated post 1266 // file download. 1267 outputFilesToPrecreate := []string{ 1268 filepath.Join(execRoot, "wd/dir/a/b/empty"), 1269 filepath.Join(execRoot, "wd/dir/a/empty"), 1270 } 1271 for _, p := range outputFilesToPrecreate { 1272 dir := filepath.Dir(p) 1273 if err := os.MkdirAll(dir, 0750); err != nil { 1274 t.Fatalf("Unable to precreate dirs for file %v: %v", dir, err) 1275 } 1276 f, err := os.Create(p) 1277 f.Close() 1278 if err != nil { 1279 t.Fatalf("Unable to precreate file %v: %v", p, err) 1280 } 1281 } 1282 wantMinModTime := time.Now().Local() 1283 time.Sleep(2 * time.Second) // system mtimes aren't super accurate - https://apenwarr.ca/log/20181113 1284 1285 _, err := c.DownloadActionOutputs(ctx, ar, filepath.Join(execRoot, wd), cache) 1286 if err != nil { 1287 t.Errorf("error in DownloadActionOutputs: %s", err) 1288 } 1289 wantOutputs := []struct { 1290 path string 1291 isExecutable bool 1292 contents []byte 1293 fileDigest *digest.Digest 1294 }{ 1295 { 1296 path: "wd/dir/a/b/foo", 1297 isExecutable: true, 1298 contents: []byte("foo"), 1299 fileDigest: &fooDigest, 1300 }, 1301 { 1302 path: "wd/dir/a/bar", 1303 contents: []byte("bar"), 1304 }, 1305 { 1306 path: "wd/dir/b/foo", 1307 isExecutable: true, 1308 contents: []byte("foo"), 1309 fileDigest: &fooDigest, 1310 }, 1311 { 1312 path: "wd/dir/a/b/empty", 1313 contents: []byte(""), 1314 }, 1315 { 1316 path: "wd/dir/a/empty", 1317 contents: []byte(""), 1318 }, 1319 { 1320 path: "wd/dir/b/empty", 1321 contents: []byte(""), 1322 }, 1323 } 1324 for _, out := range wantOutputs { 1325 path := filepath.Join(execRoot, out.path) 1326 fi, err := os.Lstat(path) 1327 if err != nil { 1328 t.Errorf("expected output %s is missing", path) 1329 } 1330 if fi.ModTime().Before(wantMinModTime) { 1331 t.Errorf("File %v has old timestamp, want >= %v, got %v", path, wantMinModTime, fi.ModTime()) 1332 } 1333 if out.fileDigest != nil { 1334 fmd := cache.Get(path) 1335 if fmd == nil { 1336 t.Errorf("cache does not contain metadata for path: %v", path) 1337 } else { 1338 if diff := cmp.Diff(*out.fileDigest, fmd.Digest); diff != "" { 1339 t.Errorf("invalid digeset in cache for path %v, (-want +got): %v", path, diff) 1340 } 1341 } 1342 } else { 1343 contents, err := os.ReadFile(path) 1344 if err != nil { 1345 t.Errorf("error reading from %s: %v", path, err) 1346 } 1347 if !bytes.Equal(contents, out.contents) { 1348 t.Errorf("expected %s to contain %v, got %v", path, out.contents, contents) 1349 } 1350 } 1351 } 1352 } 1353 1354 func TestDownloadDirectory(t *testing.T) { 1355 t.Parallel() 1356 ctx := context.Background() 1357 e, cleanup := fakes.NewTestEnv(t) 1358 defer cleanup() 1359 fake := e.Server.CAS 1360 c := e.Client.GrpcClient 1361 cache := filemetadata.NewSingleFlightCache() 1362 1363 fooDigest := fake.Put([]byte("foo")) 1364 dir := &repb.Directory{ 1365 Files: []*repb.FileNode{ 1366 {Name: "foo", Digest: fooDigest.ToProto(), IsExecutable: true}, 1367 }, 1368 Directories: []*repb.DirectoryNode{ 1369 {Name: "empty", Digest: digest.Empty.ToProto()}, 1370 }, 1371 } 1372 dirBlob, err := proto.Marshal(dir) 1373 if err != nil { 1374 t.Fatalf("failed marshalling Tree: %s", err) 1375 } 1376 fake.Put(dirBlob) 1377 1378 d := digest.TestNewFromMessage(dir) 1379 execRoot := t.TempDir() 1380 1381 outputs, _, err := c.DownloadDirectory(ctx, d, execRoot, cache) 1382 if err != nil { 1383 t.Errorf("error in DownloadActionOutputs: %s", err) 1384 } 1385 1386 if diff := cmp.Diff(outputs, map[string]*client.TreeOutput{ 1387 "empty": { 1388 Digest: digest.Empty, 1389 Path: "empty", 1390 IsEmptyDirectory: true, 1391 }, 1392 "foo": { 1393 Digest: fooDigest, 1394 Path: "foo", 1395 IsExecutable: true, 1396 }}); diff != "" { 1397 t.Fatalf("DownloadDirectory() mismatch (-want +got):\n%s", diff) 1398 } 1399 1400 b, err := os.ReadFile(filepath.Join(execRoot, "foo")) 1401 if err != nil { 1402 t.Fatalf("failed to read foo: %s", err) 1403 } 1404 if want, got := []byte("foo"), b; !bytes.Equal(want, got) { 1405 t.Errorf("want %s, got %s", want, got) 1406 } 1407 } 1408 1409 func TestDownloadActionOutputsErrors(t *testing.T) { 1410 ar := &repb.ActionResult{} 1411 ar.OutputFiles = append(ar.OutputFiles, &repb.OutputFile{Path: "foo", Digest: digest.NewFromBlob([]byte("foo")).ToProto()}) 1412 ar.OutputFiles = append(ar.OutputFiles, &repb.OutputFile{Path: "bar", Digest: digest.NewFromBlob([]byte("bar")).ToProto()}) 1413 execRoot := t.TempDir() 1414 1415 for _, ub := range []client.UseBatchOps{false, true} { 1416 ub := ub 1417 t.Run(fmt.Sprintf("%sUsingBatch:%t", t.Name(), ub), func(t *testing.T) { 1418 t.Parallel() 1419 ctx := context.Background() 1420 e, cleanup := fakes.NewTestEnv(t) 1421 defer cleanup() 1422 c := e.Client.GrpcClient 1423 ub.Apply(c) 1424 1425 _, err := c.DownloadActionOutputs(ctx, ar, execRoot, filemetadata.NewSingleFlightCache()) 1426 if status.Code(err) != codes.NotFound && !strings.Contains(err.Error(), "not found") { 1427 t.Errorf("expected 'not found' error in DownloadActionOutputs, got: %v", err) 1428 } 1429 }) 1430 } 1431 } 1432 1433 func TestDownloadActionOutputsBatching(t *testing.T) { 1434 tests := []struct { 1435 name string 1436 sizes []int 1437 locality bool 1438 batchReqs int 1439 }{ 1440 { 1441 name: "single small blob", 1442 sizes: []int{1}, 1443 batchReqs: 0, 1444 }, 1445 { 1446 name: "large and small blobs hitting max exactly", 1447 sizes: []int{338, 338, 338, 1, 1, 1}, 1448 batchReqs: 3, 1449 }, 1450 { 1451 name: "small batches of big blobs", 1452 sizes: []int{88, 88, 88, 88, 88, 88, 88}, 1453 batchReqs: 2, 1454 }, 1455 { 1456 name: "batch with blob that's too big", 1457 sizes: []int{400, 88, 88, 88}, 1458 batchReqs: 1, 1459 }, 1460 { 1461 name: "many small blobs hitting max digests", 1462 sizes: []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 1463 batchReqs: 4, 1464 }, 1465 { 1466 name: "single small blob locality", 1467 sizes: []int{1}, 1468 locality: true, 1469 batchReqs: 0, 1470 }, 1471 { 1472 name: "large and small blobs hitting max exactly locality", 1473 sizes: []int{338, 338, 338, 1, 1, 1}, 1474 locality: true, 1475 batchReqs: 2, 1476 }, 1477 { 1478 name: "small batches of big blobs locality", 1479 sizes: []int{88, 88, 88, 88, 88, 88, 88}, 1480 locality: true, 1481 batchReqs: 2, 1482 }, 1483 { 1484 name: "batch with blob that's too big locality", 1485 sizes: []int{400, 88, 88, 88}, 1486 locality: true, 1487 batchReqs: 1, 1488 }, 1489 { 1490 name: "many small blobs hitting max digests locality", 1491 sizes: []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 1492 locality: true, 1493 batchReqs: 4, 1494 }, 1495 } 1496 1497 for _, tc := range tests { 1498 tc := tc 1499 t.Run(tc.name, func(t *testing.T) { 1500 t.Parallel() 1501 ctx := context.Background() 1502 e, cleanup := fakes.NewTestEnv(t) 1503 defer cleanup() 1504 fake := e.Server.CAS 1505 c := e.Client.GrpcClient 1506 c.MaxBatchSize = 500 1507 c.MaxBatchDigests = 4 1508 // Each batch request frame overhead is 13 bytes. 1509 // A per-blob overhead is 74 bytes. 1510 1511 c.UtilizeLocality = client.UtilizeLocality(tc.locality) 1512 var dgs []digest.Digest 1513 blobs := make(map[digest.Digest][]byte) 1514 ar := &repb.ActionResult{} 1515 for i, sz := range tc.sizes { 1516 blob := make([]byte, int(sz)) 1517 if sz > 0 { 1518 blob[0] = byte(i) // Ensure blobs are distinct 1519 } 1520 dg := digest.NewFromBlob(blob) 1521 blobs[dg] = blob 1522 dgs = append(dgs, dg) 1523 if sz > 0 { 1524 // Don't seed fake with empty blob, because it should not be called. 1525 fake.Put(blob) 1526 } 1527 name := fmt.Sprintf("foo_%s", dg) 1528 ar.OutputFiles = append(ar.OutputFiles, &repb.OutputFile{Path: name, Digest: dg.ToProto()}) 1529 } 1530 execRoot := t.TempDir() 1531 _, err := c.DownloadActionOutputs(ctx, ar, execRoot, filemetadata.NewSingleFlightCache()) 1532 if err != nil { 1533 t.Errorf("error in DownloadActionOutputs: %s", err) 1534 } 1535 for dg, data := range blobs { 1536 path := filepath.Join(execRoot, fmt.Sprintf("foo_%s", dg)) 1537 contents, err := os.ReadFile(path) 1538 if err != nil { 1539 t.Errorf("error reading from %s: %v", path, err) 1540 } 1541 if !bytes.Equal(contents, data) { 1542 t.Errorf("expected %s to contain %v, got %v", path, contents, data) 1543 } 1544 } 1545 if fake.BatchReqs() != tc.batchReqs { 1546 t.Errorf("%d requests were made to BatchReadBlobs, wanted %d", fake.BatchReqs(), tc.batchReqs) 1547 } 1548 }) 1549 } 1550 } 1551 1552 func TestDownloadActionOutputsConcurrency(t *testing.T) { 1553 t.Parallel() 1554 ctx := context.Background() 1555 type testBlob struct { 1556 digest digest.Digest 1557 blob []byte 1558 } 1559 blobs := make([]*testBlob, 1000) 1560 for i := 0; i < 1000; i++ { 1561 blob := []byte(fmt.Sprint(i)) 1562 blobs[i] = &testBlob{ 1563 digest: digest.NewFromBlob(blob), 1564 blob: blob, 1565 } 1566 } 1567 1568 for _, ub := range []client.UseBatchOps{false, true} { 1569 for _, uo := range []client.UnifiedDownloads{false, true} { 1570 ub, uo := ub, uo 1571 t.Run(fmt.Sprintf("%sUsingBatch:%t,UnifiedDownloads:%t", t.Name(), ub, uo), func(t *testing.T) { 1572 t.Parallel() 1573 e, cleanup := fakes.NewTestEnv(t) 1574 defer cleanup() 1575 fake := e.Server.CAS 1576 fake.ReqSleepDuration = reqMaxSleepDuration 1577 fake.ReqSleepRandomize = true 1578 c := e.Client.GrpcClient 1579 client.CASConcurrency(defaultCASConcurrency).Apply(c) 1580 client.MaxBatchDigests(300).Apply(c) 1581 ub.Apply(c) 1582 uo.Apply(c) 1583 for _, b := range blobs { 1584 fake.Put(b.blob) 1585 } 1586 c.RunBackgroundTasks(ctx) 1587 1588 eg, eCtx := errgroup.WithContext(ctx) 1589 for i := 0; i < 100; i++ { 1590 i := i 1591 eg.Go(func() error { 1592 var input []*testBlob 1593 ar := &repb.ActionResult{} 1594 // Download 15 digests in a sliding window. 1595 for j := i * 10; j < i*10+15 && j < len(blobs); j++ { 1596 input = append(input, blobs[j]) 1597 } 1598 for _, i := range input { 1599 name := fmt.Sprintf("foo_%s", i.digest) 1600 dgPb := i.digest.ToProto() 1601 ar.OutputFiles = append(ar.OutputFiles, &repb.OutputFile{Path: name, Digest: dgPb}) 1602 ar.OutputFiles = append(ar.OutputFiles, &repb.OutputFile{Path: name + "_copy", Digest: dgPb}) 1603 } 1604 1605 execRoot := t.TempDir() 1606 if _, err := c.DownloadActionOutputs(eCtx, ar, execRoot, filemetadata.NewSingleFlightCache()); err != nil { 1607 return fmt.Errorf("error in DownloadActionOutputs: %s", err) 1608 } 1609 for _, i := range input { 1610 name := filepath.Join(execRoot, fmt.Sprintf("foo_%s", i.digest)) 1611 for _, path := range []string{name, name + "_copy"} { 1612 contents, err := os.ReadFile(path) 1613 if err != nil { 1614 return fmt.Errorf("error reading from %s: %v", path, err) 1615 } 1616 if !bytes.Equal(contents, i.blob) { 1617 return fmt.Errorf("expected %s to contain %v, got %v", path, contents, i.blob) 1618 } 1619 } 1620 } 1621 return nil 1622 }) 1623 } 1624 if err := eg.Wait(); err != nil { 1625 t.Error(err) 1626 } 1627 if fake.MaxConcurrency() > defaultCASConcurrency { 1628 t.Errorf("CAS concurrency %v was higher than max %v", fake.MaxConcurrency(), defaultCASConcurrency) 1629 } 1630 if ub { 1631 if uo { 1632 // Check that we batch requests from different Actions. 1633 if fake.BatchReqs() > 50 { 1634 t.Errorf("%d requests were made to BatchReadBlobs, wanted <= 50", fake.BatchReqs()) 1635 } 1636 } else { 1637 if fake.BatchReqs() != 100 { 1638 t.Errorf("%d requests were made to BatchReadBlobs, wanted 100", fake.BatchReqs()) 1639 } 1640 } 1641 } 1642 }) 1643 } 1644 } 1645 } 1646 1647 func TestDownloadActionOutputsOneSlowRead(t *testing.T) { 1648 t.Parallel() 1649 ctx := context.Background() 1650 type testBlob struct { 1651 digest digest.Digest 1652 blob []byte 1653 } 1654 blobs := make([]*testBlob, 20) 1655 for i := 0; i < len(blobs); i++ { 1656 blob := []byte(fmt.Sprint(i)) 1657 blobs[i] = &testBlob{ 1658 digest: digest.NewFromBlob(blob), 1659 blob: blob, 1660 } 1661 } 1662 problemBlob := make([]byte, 2000) // Will not be batched. 1663 problemDg := digest.NewFromBlob(problemBlob) 1664 1665 e, cleanup := fakes.NewTestEnv(t) 1666 defer cleanup() 1667 fake := e.Server.CAS 1668 fake.ReqSleepDuration = reqMaxSleepDuration 1669 fake.ReqSleepRandomize = true 1670 wait := make(chan bool) 1671 fake.PerDigestBlockFn[problemDg] = func() { 1672 <-wait 1673 } 1674 c := e.Client.GrpcClient 1675 client.MaxBatchSize(1000).Apply(c) 1676 for _, b := range blobs { 1677 fake.Put(b.blob) 1678 } 1679 fake.Put(problemBlob) 1680 1681 // Start downloading the problem digest. 1682 pg, pCtx := errgroup.WithContext(ctx) 1683 pg.Go(func() error { 1684 name := fmt.Sprintf("problem_%s", problemDg) 1685 dgPb := problemDg.ToProto() 1686 ar := &repb.ActionResult{} 1687 ar.OutputFiles = append(ar.OutputFiles, &repb.OutputFile{Path: name, Digest: dgPb}) 1688 ar.OutputFiles = append(ar.OutputFiles, &repb.OutputFile{Path: name + "_copy", Digest: dgPb}) 1689 1690 execRoot := t.TempDir() 1691 if _, err := c.DownloadActionOutputs(pCtx, ar, execRoot, filemetadata.NewSingleFlightCache()); err != nil { 1692 return fmt.Errorf("error in DownloadActionOutputs: %s", err) 1693 } 1694 for _, path := range []string{name, name + "_copy"} { 1695 contents, err := os.ReadFile(filepath.Join(execRoot, path)) 1696 if err != nil { 1697 return fmt.Errorf("error reading from %s: %v", path, err) 1698 } 1699 if !bytes.Equal(contents, problemBlob) { 1700 return fmt.Errorf("expected %s to contain %v, got %v", path, problemBlob, contents) 1701 } 1702 } 1703 return nil 1704 }) 1705 // Download a bunch of fast-downloading blobs. 1706 eg, eCtx := errgroup.WithContext(ctx) 1707 for i := 0; i < 100; i++ { 1708 i := i 1709 eg.Go(func() error { 1710 var input []*testBlob 1711 ar := &repb.ActionResult{} 1712 // Download 15 digests in a sliding window. 1713 for j := i * 10; j < i*10+15 && j < len(blobs); j++ { 1714 input = append(input, blobs[j]) 1715 } 1716 totalBytes := int64(0) 1717 for _, i := range input { 1718 name := fmt.Sprintf("foo_%s", i.digest) 1719 dgPb := i.digest.ToProto() 1720 ar.OutputFiles = append(ar.OutputFiles, &repb.OutputFile{Path: name, Digest: dgPb}) 1721 ar.OutputFiles = append(ar.OutputFiles, &repb.OutputFile{Path: name + "_copy", Digest: dgPb}) 1722 // Count only once due to dedup 1723 totalBytes += i.digest.Size 1724 } 1725 1726 execRoot := t.TempDir() 1727 stats, err := c.DownloadActionOutputs(eCtx, ar, execRoot, filemetadata.NewSingleFlightCache()) 1728 if err != nil { 1729 return fmt.Errorf("error in DownloadActionOutputs: %s", err) 1730 } 1731 if stats.LogicalMoved != stats.RealMoved { 1732 t.Errorf("c.DownloadActionOutputs: logical (%v) and real (%v) bytes moved different despite compression off", stats.LogicalMoved, stats.RealMoved) 1733 } 1734 if stats.LogicalMoved != totalBytes { 1735 t.Errorf("c.DownloadActionOutputs: logical (%v) bytes moved different from sum of digests (%v) despite downloaded", stats.LogicalMoved, stats.RealMoved) 1736 } 1737 for _, i := range input { 1738 name := filepath.Join(execRoot, fmt.Sprintf("foo_%s", i.digest)) 1739 for _, path := range []string{name, name + "_copy"} { 1740 contents, err := os.ReadFile(path) 1741 if err != nil { 1742 return fmt.Errorf("error reading from %s: %v", path, err) 1743 } 1744 if !bytes.Equal(contents, i.blob) { 1745 return fmt.Errorf("expected %s to contain %v, got %v", path, i.blob, contents) 1746 } 1747 } 1748 } 1749 return nil 1750 }) 1751 } 1752 if err := eg.Wait(); err != nil { 1753 t.Error(err) 1754 } 1755 // Now let the problem digest download finish. 1756 close(wait) 1757 if err := pg.Wait(); err != nil { 1758 t.Error(err) 1759 } 1760 } 1761 1762 func TestWriteAndReadProto(t *testing.T) { 1763 t.Parallel() 1764 ctx := context.Background() 1765 e, cleanup := fakes.NewTestEnv(t) 1766 defer cleanup() 1767 fake := e.Server.CAS 1768 c := e.Client.GrpcClient 1769 1770 fooDigest := fake.Put([]byte("foo")) 1771 dirA := &repb.Directory{ 1772 Files: []*repb.FileNode{ 1773 {Name: "foo", Digest: fooDigest.ToProto(), IsExecutable: true}, 1774 }, 1775 } 1776 d, err := c.WriteProto(ctx, dirA) 1777 if err != nil { 1778 t.Errorf("Failed writing proto: %s", err) 1779 } 1780 1781 dirB := &repb.Directory{} 1782 if _, err := c.ReadProto(ctx, d, dirB); err != nil { 1783 t.Errorf("Failed reading proto: %s", err) 1784 } 1785 if !proto.Equal(dirA, dirB) { 1786 t.Errorf("Protos not equal: %s / %s", dirA, dirB) 1787 } 1788 } 1789 1790 func TestDownloadFiles(t *testing.T) { 1791 t.Parallel() 1792 ctx := context.Background() 1793 e, cleanup := fakes.NewTestEnv(t) 1794 defer cleanup() 1795 fake := e.Server.CAS 1796 c := e.Client.GrpcClient 1797 1798 fooDigest := fake.Put([]byte("foo")) 1799 barDigest := fake.Put([]byte("bar")) 1800 1801 execRoot := t.TempDir() 1802 stats, err := c.DownloadFiles(ctx, execRoot, map[digest.Digest]*client.TreeOutput{ 1803 fooDigest: {Digest: fooDigest, Path: "foo", IsExecutable: true}, 1804 barDigest: {Digest: barDigest, Path: "bar"}, 1805 }) 1806 if err != nil { 1807 t.Errorf("Failed to run DownloadFiles: %v", err) 1808 } 1809 if stats.LogicalMoved != stats.RealMoved { 1810 t.Errorf("c.DownloadFiles: logical (%v) and real (%v) bytes moved different despite compression off", stats.LogicalMoved, stats.RealMoved) 1811 } 1812 if stats.LogicalMoved != fooDg.Size+barDigest.Size { 1813 t.Errorf("c.DownloadFiles: logical (%v) bytes moved different from sum of digests (%v) despite no duplication", stats.LogicalMoved, fooDg.Size+barDigest.Size) 1814 } 1815 1816 if b, err := os.ReadFile(filepath.Join(execRoot, "foo")); err != nil { 1817 t.Errorf("failed to read file: %v", err) 1818 } else if diff := cmp.Diff(b, []byte("foo")); diff != "" { 1819 t.Errorf("foo mismatch (-want +got):\n%s", diff) 1820 } 1821 1822 if b, err := os.ReadFile(filepath.Join(execRoot, "bar")); err != nil { 1823 t.Errorf("failed to read file: %v", err) 1824 } else if diff := cmp.Diff(b, []byte("bar")); diff != "" { 1825 t.Errorf("foo mismatch (-want +got):\n%s", diff) 1826 } 1827 } 1828 1829 func TestDownloadFilesCancel(t *testing.T) { 1830 t.Parallel() 1831 for _, uo := range []client.UnifiedDownloads{false, true} { 1832 uo := uo 1833 t.Run(fmt.Sprintf("UnifiedDownloads:%t", uo), func(t *testing.T) { 1834 t.Parallel() 1835 execRoot := t.TempDir() 1836 ctx := context.Background() 1837 e, cleanup := fakes.NewTestEnv(t) 1838 defer cleanup() 1839 fake := e.Server.CAS 1840 fooDigest := fake.Put([]byte{1, 2, 3}) 1841 wait := make(chan bool) 1842 fake.PerDigestBlockFn[fooDigest] = func() { 1843 <-wait 1844 } 1845 c := e.Client.GrpcClient 1846 uo.Apply(c) 1847 eg, eCtx := errgroup.WithContext(ctx) 1848 cCtx, cancel := context.WithCancel(eCtx) 1849 eg.Go(func() error { 1850 if _, err := c.DownloadFiles(cCtx, execRoot, map[digest.Digest]*client.TreeOutput{ 1851 fooDigest: {Digest: fooDigest, Path: "foo", IsExecutable: true}, 1852 }); err != context.Canceled { 1853 return fmt.Errorf("Failed to run DownloadFiles: expected context.Canceled, got %v", err) 1854 } 1855 return nil 1856 }) 1857 eg.Go(func() error { 1858 cancel() 1859 return nil 1860 }) 1861 if err := eg.Wait(); err != nil { 1862 t.Error(err) 1863 } 1864 if fake.BlobReads(fooDigest) != 0 { 1865 t.Errorf("Expected no reads for foo since request is cancelled, got %v.", fake.BlobReads(fooDigest)) 1866 } 1867 close(wait) 1868 }) 1869 } 1870 } 1871 1872 func TestBatchDownloadBlobsCompressed(t *testing.T) { 1873 t.Parallel() 1874 ctx := context.Background() 1875 listener, err := net.Listen("tcp", ":0") 1876 if err != nil { 1877 t.Fatalf("Cannot listen: %v", err) 1878 } 1879 fakeCAS := fakes.NewCAS() 1880 defer listener.Close() 1881 server := grpc.NewServer() 1882 regrpc.RegisterContentAddressableStorageServer(server, fakeCAS) 1883 go server.Serve(listener) 1884 defer server.Stop() 1885 c, err := client.NewClient(ctx, instance, client.DialParams{ 1886 Service: listener.Addr().String(), 1887 NoSecurity: true, 1888 }, client.StartupCapabilities(false)) 1889 if err != nil { 1890 t.Fatalf("Error connecting to server: %v", err) 1891 } 1892 defer c.Close() 1893 1894 fooDigest := fakeCAS.Put([]byte("foo")) 1895 barDigest := fakeCAS.Put([]byte("bar")) 1896 digests := []digest.Digest{fooDigest, barDigest} 1897 client.UseBatchCompression(true).Apply(c) 1898 1899 wantBlobs := map[digest.Digest]client.CompressedBlobInfo{ 1900 fooDigest: client.CompressedBlobInfo{ 1901 CompressedSize: 16, 1902 Data: []byte("foo"), 1903 }, 1904 barDigest: client.CompressedBlobInfo{ 1905 CompressedSize: 16, 1906 Data: []byte("bar"), 1907 }, 1908 } 1909 gotBlobs, err := c.BatchDownloadBlobsWithStats(ctx, digests) 1910 if err != nil { 1911 t.Errorf("client.BatchDownloadBlobs(ctx, digests) failed: %v", err) 1912 } 1913 if diff := cmp.Diff(wantBlobs, gotBlobs); diff != "" { 1914 t.Errorf("client.BatchDownloadBlobs(ctx, digests) had diff (want -> got):\n%s", diff) 1915 } 1916 }