github.com/ice-blockchain/go/src@v0.0.0-20240403114104-1564d284e521/archive/zip/zip_test.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Tests that involve both reading and writing. 6 7 package zip 8 9 import ( 10 "bytes" 11 "errors" 12 "fmt" 13 "hash" 14 "internal/testenv" 15 "io" 16 "runtime" 17 "sort" 18 "strings" 19 "testing" 20 "time" 21 ) 22 23 func TestOver65kFiles(t *testing.T) { 24 if testing.Short() && testenv.Builder() == "" { 25 t.Skip("skipping in short mode") 26 } 27 buf := new(strings.Builder) 28 w := NewWriter(buf) 29 const nFiles = (1 << 16) + 42 30 for i := 0; i < nFiles; i++ { 31 _, err := w.CreateHeader(&FileHeader{ 32 Name: fmt.Sprintf("%d.dat", i), 33 Method: Store, // Deflate is too slow when it is compiled with -race flag 34 }) 35 if err != nil { 36 t.Fatalf("creating file %d: %v", i, err) 37 } 38 } 39 if err := w.Close(); err != nil { 40 t.Fatalf("Writer.Close: %v", err) 41 } 42 s := buf.String() 43 zr, err := NewReader(strings.NewReader(s), int64(len(s))) 44 if err != nil { 45 t.Fatalf("NewReader: %v", err) 46 } 47 if got := len(zr.File); got != nFiles { 48 t.Fatalf("File contains %d files, want %d", got, nFiles) 49 } 50 for i := 0; i < nFiles; i++ { 51 want := fmt.Sprintf("%d.dat", i) 52 if zr.File[i].Name != want { 53 t.Fatalf("File(%d) = %q, want %q", i, zr.File[i].Name, want) 54 } 55 } 56 } 57 58 func TestModTime(t *testing.T) { 59 var testTime = time.Date(2009, time.November, 10, 23, 45, 58, 0, time.UTC) 60 fh := new(FileHeader) 61 fh.SetModTime(testTime) 62 outTime := fh.ModTime() 63 if !outTime.Equal(testTime) { 64 t.Errorf("times don't match: got %s, want %s", outTime, testTime) 65 } 66 } 67 68 func testHeaderRoundTrip(fh *FileHeader, wantUncompressedSize uint32, wantUncompressedSize64 uint64, t *testing.T) { 69 fi := fh.FileInfo() 70 fh2, err := FileInfoHeader(fi) 71 if err != nil { 72 t.Fatal(err) 73 } 74 if got, want := fh2.Name, fh.Name; got != want { 75 t.Errorf("Name: got %s, want %s\n", got, want) 76 } 77 if got, want := fh2.UncompressedSize, wantUncompressedSize; got != want { 78 t.Errorf("UncompressedSize: got %d, want %d\n", got, want) 79 } 80 if got, want := fh2.UncompressedSize64, wantUncompressedSize64; got != want { 81 t.Errorf("UncompressedSize64: got %d, want %d\n", got, want) 82 } 83 if got, want := fh2.ModifiedTime, fh.ModifiedTime; got != want { 84 t.Errorf("ModifiedTime: got %d, want %d\n", got, want) 85 } 86 if got, want := fh2.ModifiedDate, fh.ModifiedDate; got != want { 87 t.Errorf("ModifiedDate: got %d, want %d\n", got, want) 88 } 89 90 if sysfh, ok := fi.Sys().(*FileHeader); !ok && sysfh != fh { 91 t.Errorf("Sys didn't return original *FileHeader") 92 } 93 } 94 95 func TestFileHeaderRoundTrip(t *testing.T) { 96 fh := &FileHeader{ 97 Name: "foo.txt", 98 UncompressedSize: 987654321, 99 ModifiedTime: 1234, 100 ModifiedDate: 5678, 101 } 102 testHeaderRoundTrip(fh, fh.UncompressedSize, uint64(fh.UncompressedSize), t) 103 } 104 105 func TestFileHeaderRoundTrip64(t *testing.T) { 106 fh := &FileHeader{ 107 Name: "foo.txt", 108 UncompressedSize64: 9876543210, 109 ModifiedTime: 1234, 110 ModifiedDate: 5678, 111 } 112 testHeaderRoundTrip(fh, uint32max, fh.UncompressedSize64, t) 113 } 114 115 func TestFileHeaderRoundTripModified(t *testing.T) { 116 fh := &FileHeader{ 117 Name: "foo.txt", 118 UncompressedSize: 987654321, 119 Modified: time.Now().Local(), 120 ModifiedTime: 1234, 121 ModifiedDate: 5678, 122 } 123 fi := fh.FileInfo() 124 fh2, err := FileInfoHeader(fi) 125 if err != nil { 126 t.Fatal(err) 127 } 128 if got, want := fh2.Modified, fh.Modified.UTC(); got != want { 129 t.Errorf("Modified: got %s, want %s\n", got, want) 130 } 131 if got, want := fi.ModTime(), fh.Modified.UTC(); got != want { 132 t.Errorf("Modified: got %s, want %s\n", got, want) 133 } 134 } 135 136 func TestFileHeaderRoundTripWithoutModified(t *testing.T) { 137 fh := &FileHeader{ 138 Name: "foo.txt", 139 UncompressedSize: 987654321, 140 ModifiedTime: 1234, 141 ModifiedDate: 5678, 142 } 143 fi := fh.FileInfo() 144 fh2, err := FileInfoHeader(fi) 145 if err != nil { 146 t.Fatal(err) 147 } 148 if got, want := fh2.ModTime(), fh.ModTime(); got != want { 149 t.Errorf("Modified: got %s, want %s\n", got, want) 150 } 151 if got, want := fi.ModTime(), fh.ModTime(); got != want { 152 t.Errorf("Modified: got %s, want %s\n", got, want) 153 } 154 } 155 156 type repeatedByte struct { 157 off int64 158 b byte 159 n int64 160 } 161 162 // rleBuffer is a run-length-encoded byte buffer. 163 // It's an io.Writer (like a bytes.Buffer) and also an io.ReaderAt, 164 // allowing random-access reads. 165 type rleBuffer struct { 166 buf []repeatedByte 167 } 168 169 func (r *rleBuffer) Size() int64 { 170 if len(r.buf) == 0 { 171 return 0 172 } 173 last := &r.buf[len(r.buf)-1] 174 return last.off + last.n 175 } 176 177 func (r *rleBuffer) Write(p []byte) (n int, err error) { 178 var rp *repeatedByte 179 if len(r.buf) > 0 { 180 rp = &r.buf[len(r.buf)-1] 181 // Fast path, if p is entirely the same byte repeated. 182 if lastByte := rp.b; len(p) > 0 && p[0] == lastByte { 183 if bytes.Count(p, []byte{lastByte}) == len(p) { 184 rp.n += int64(len(p)) 185 return len(p), nil 186 } 187 } 188 } 189 190 for _, b := range p { 191 if rp == nil || rp.b != b { 192 r.buf = append(r.buf, repeatedByte{r.Size(), b, 1}) 193 rp = &r.buf[len(r.buf)-1] 194 } else { 195 rp.n++ 196 } 197 } 198 return len(p), nil 199 } 200 201 func memset(a []byte, b byte) { 202 if len(a) == 0 { 203 return 204 } 205 // Double, until we reach power of 2 >= len(a), same as bytes.Repeat, 206 // but without allocation. 207 a[0] = b 208 for i, l := 1, len(a); i < l; i *= 2 { 209 copy(a[i:], a[:i]) 210 } 211 } 212 213 func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) { 214 if len(p) == 0 { 215 return 216 } 217 skipParts := sort.Search(len(r.buf), func(i int) bool { 218 part := &r.buf[i] 219 return part.off+part.n > off 220 }) 221 parts := r.buf[skipParts:] 222 if len(parts) > 0 { 223 skipBytes := off - parts[0].off 224 for _, part := range parts { 225 repeat := int(min(part.n-skipBytes, int64(len(p)-n))) 226 memset(p[n:n+repeat], part.b) 227 n += repeat 228 if n == len(p) { 229 return 230 } 231 skipBytes = 0 232 } 233 } 234 if n != len(p) { 235 err = io.ErrUnexpectedEOF 236 } 237 return 238 } 239 240 // Just testing the rleBuffer used in the Zip64 test above. Not used by the zip code. 241 func TestRLEBuffer(t *testing.T) { 242 b := new(rleBuffer) 243 var all []byte 244 writes := []string{"abcdeee", "eeeeeee", "eeeefghaaiii"} 245 for _, w := range writes { 246 b.Write([]byte(w)) 247 all = append(all, w...) 248 } 249 if len(b.buf) != 10 { 250 t.Fatalf("len(b.buf) = %d; want 10", len(b.buf)) 251 } 252 253 for i := 0; i < len(all); i++ { 254 for j := 0; j < len(all)-i; j++ { 255 buf := make([]byte, j) 256 n, err := b.ReadAt(buf, int64(i)) 257 if err != nil || n != len(buf) { 258 t.Errorf("ReadAt(%d, %d) = %d, %v; want %d, nil", i, j, n, err, len(buf)) 259 } 260 if !bytes.Equal(buf, all[i:i+j]) { 261 t.Errorf("ReadAt(%d, %d) = %q; want %q", i, j, buf, all[i:i+j]) 262 } 263 } 264 } 265 } 266 267 // fakeHash32 is a dummy Hash32 that always returns 0. 268 type fakeHash32 struct { 269 hash.Hash32 270 } 271 272 func (fakeHash32) Write(p []byte) (int, error) { return len(p), nil } 273 func (fakeHash32) Sum32() uint32 { return 0 } 274 275 func TestZip64(t *testing.T) { 276 if testing.Short() { 277 t.Skip("slow test; skipping") 278 } 279 t.Parallel() 280 const size = 1 << 32 // before the "END\n" part 281 buf := testZip64(t, size) 282 testZip64DirectoryRecordLength(buf, t) 283 } 284 285 func TestZip64EdgeCase(t *testing.T) { 286 if testing.Short() { 287 t.Skip("slow test; skipping") 288 } 289 t.Parallel() 290 // Test a zip file with uncompressed size 0xFFFFFFFF. 291 // That's the magic marker for a 64-bit file, so even though 292 // it fits in a 32-bit field we must use the 64-bit field. 293 // Go 1.5 and earlier got this wrong, 294 // writing an invalid zip file. 295 const size = 1<<32 - 1 - int64(len("END\n")) // before the "END\n" part 296 buf := testZip64(t, size) 297 testZip64DirectoryRecordLength(buf, t) 298 } 299 300 // Tests that we generate a zip64 file if the directory at offset 301 // 0xFFFFFFFF, but not before. 302 func TestZip64DirectoryOffset(t *testing.T) { 303 if testing.Short() { 304 t.Skip("skipping in short mode") 305 } 306 t.Parallel() 307 const filename = "huge.txt" 308 gen := func(wantOff uint64) func(*Writer) { 309 return func(w *Writer) { 310 w.testHookCloseSizeOffset = func(size, off uint64) { 311 if off != wantOff { 312 t.Errorf("central directory offset = %d (%x); want %d", off, off, wantOff) 313 } 314 } 315 f, err := w.CreateHeader(&FileHeader{ 316 Name: filename, 317 Method: Store, 318 }) 319 if err != nil { 320 t.Fatal(err) 321 } 322 f.(*fileWriter).crc32 = fakeHash32{} 323 size := wantOff - fileHeaderLen - uint64(len(filename)) - dataDescriptorLen 324 if _, err := io.CopyN(f, zeros{}, int64(size)); err != nil { 325 t.Fatal(err) 326 } 327 if err := w.Close(); err != nil { 328 t.Fatal(err) 329 } 330 } 331 } 332 t.Run("uint32max-2_NoZip64", func(t *testing.T) { 333 t.Parallel() 334 if generatesZip64(t, gen(0xfffffffe)) { 335 t.Error("unexpected zip64") 336 } 337 }) 338 t.Run("uint32max-1_Zip64", func(t *testing.T) { 339 t.Parallel() 340 if !generatesZip64(t, gen(0xffffffff)) { 341 t.Error("expected zip64") 342 } 343 }) 344 } 345 346 // At 16k records, we need to generate a zip64 file. 347 func TestZip64ManyRecords(t *testing.T) { 348 if testing.Short() { 349 t.Skip("skipping in short mode") 350 } 351 t.Parallel() 352 gen := func(numRec int) func(*Writer) { 353 return func(w *Writer) { 354 for i := 0; i < numRec; i++ { 355 _, err := w.CreateHeader(&FileHeader{ 356 Name: "a.txt", 357 Method: Store, 358 }) 359 if err != nil { 360 t.Fatal(err) 361 } 362 } 363 if err := w.Close(); err != nil { 364 t.Fatal(err) 365 } 366 } 367 } 368 // 16k-1 records shouldn't make a zip64: 369 t.Run("uint16max-1_NoZip64", func(t *testing.T) { 370 t.Parallel() 371 if generatesZip64(t, gen(0xfffe)) { 372 t.Error("unexpected zip64") 373 } 374 }) 375 // 16k records should make a zip64: 376 t.Run("uint16max_Zip64", func(t *testing.T) { 377 t.Parallel() 378 if !generatesZip64(t, gen(0xffff)) { 379 t.Error("expected zip64") 380 } 381 }) 382 } 383 384 // suffixSaver is an io.Writer & io.ReaderAt that remembers the last 0 385 // to 'keep' bytes of data written to it. Call Suffix to get the 386 // suffix bytes. 387 type suffixSaver struct { 388 keep int 389 buf []byte 390 start int 391 size int64 392 } 393 394 func (ss *suffixSaver) Size() int64 { return ss.size } 395 396 var errDiscardedBytes = errors.New("ReadAt of discarded bytes") 397 398 func (ss *suffixSaver) ReadAt(p []byte, off int64) (n int, err error) { 399 back := ss.size - off 400 if back > int64(ss.keep) { 401 return 0, errDiscardedBytes 402 } 403 suf := ss.Suffix() 404 n = copy(p, suf[len(suf)-int(back):]) 405 if n != len(p) { 406 err = io.EOF 407 } 408 return 409 } 410 411 func (ss *suffixSaver) Suffix() []byte { 412 if len(ss.buf) < ss.keep { 413 return ss.buf 414 } 415 buf := make([]byte, ss.keep) 416 n := copy(buf, ss.buf[ss.start:]) 417 copy(buf[n:], ss.buf[:]) 418 return buf 419 } 420 421 func (ss *suffixSaver) Write(p []byte) (n int, err error) { 422 n = len(p) 423 ss.size += int64(len(p)) 424 if len(ss.buf) < ss.keep { 425 space := ss.keep - len(ss.buf) 426 add := len(p) 427 if add > space { 428 add = space 429 } 430 ss.buf = append(ss.buf, p[:add]...) 431 p = p[add:] 432 } 433 for len(p) > 0 { 434 n := copy(ss.buf[ss.start:], p) 435 p = p[n:] 436 ss.start += n 437 if ss.start == ss.keep { 438 ss.start = 0 439 } 440 } 441 return 442 } 443 444 // generatesZip64 reports whether f wrote a zip64 file. 445 // f is also responsible for closing w. 446 func generatesZip64(t *testing.T, f func(w *Writer)) bool { 447 ss := &suffixSaver{keep: 10 << 20} 448 w := NewWriter(ss) 449 f(w) 450 return suffixIsZip64(t, ss) 451 } 452 453 type sizedReaderAt interface { 454 io.ReaderAt 455 Size() int64 456 } 457 458 func suffixIsZip64(t *testing.T, zip sizedReaderAt) bool { 459 d := make([]byte, 1024) 460 if _, err := zip.ReadAt(d, zip.Size()-int64(len(d))); err != nil { 461 t.Fatalf("ReadAt: %v", err) 462 } 463 464 sigOff := findSignatureInBlock(d) 465 if sigOff == -1 { 466 t.Errorf("failed to find signature in block") 467 return false 468 } 469 470 dirOff, err := findDirectory64End(zip, zip.Size()-int64(len(d))+int64(sigOff)) 471 if err != nil { 472 t.Fatalf("findDirectory64End: %v", err) 473 } 474 if dirOff == -1 { 475 return false 476 } 477 478 d = make([]byte, directory64EndLen) 479 if _, err := zip.ReadAt(d, dirOff); err != nil { 480 t.Fatalf("ReadAt(off=%d): %v", dirOff, err) 481 } 482 483 b := readBuf(d) 484 if sig := b.uint32(); sig != directory64EndSignature { 485 return false 486 } 487 488 size := b.uint64() 489 if size != directory64EndLen-12 { 490 t.Errorf("expected length of %d, got %d", directory64EndLen-12, size) 491 } 492 return true 493 } 494 495 // Zip64 is required if the total size of the records is uint32max. 496 func TestZip64LargeDirectory(t *testing.T) { 497 if runtime.GOARCH == "wasm" { 498 t.Skip("too slow on wasm") 499 } 500 if testing.Short() { 501 t.Skip("skipping in short mode") 502 } 503 t.Parallel() 504 // gen returns a func that writes a zip with a wantLen bytes 505 // of central directory. 506 gen := func(wantLen int64) func(*Writer) { 507 return func(w *Writer) { 508 w.testHookCloseSizeOffset = func(size, off uint64) { 509 if size != uint64(wantLen) { 510 t.Errorf("Close central directory size = %d; want %d", size, wantLen) 511 } 512 } 513 514 uint16string := strings.Repeat(".", uint16max) 515 remain := wantLen 516 for remain > 0 { 517 commentLen := int(uint16max) - directoryHeaderLen - 1 518 thisRecLen := directoryHeaderLen + int(uint16max) + commentLen 519 if int64(thisRecLen) > remain { 520 remove := thisRecLen - int(remain) 521 commentLen -= remove 522 thisRecLen -= remove 523 } 524 remain -= int64(thisRecLen) 525 f, err := w.CreateHeader(&FileHeader{ 526 Name: uint16string, 527 Comment: uint16string[:commentLen], 528 }) 529 if err != nil { 530 t.Fatalf("CreateHeader: %v", err) 531 } 532 f.(*fileWriter).crc32 = fakeHash32{} 533 } 534 if err := w.Close(); err != nil { 535 t.Fatalf("Close: %v", err) 536 } 537 } 538 } 539 t.Run("uint32max-1_NoZip64", func(t *testing.T) { 540 t.Parallel() 541 if generatesZip64(t, gen(uint32max-1)) { 542 t.Error("unexpected zip64") 543 } 544 }) 545 t.Run("uint32max_HasZip64", func(t *testing.T) { 546 t.Parallel() 547 if !generatesZip64(t, gen(uint32max)) { 548 t.Error("expected zip64") 549 } 550 }) 551 } 552 553 func testZip64(t testing.TB, size int64) *rleBuffer { 554 const chunkSize = 1024 555 chunks := int(size / chunkSize) 556 // write size bytes plus "END\n" to a zip file 557 buf := new(rleBuffer) 558 w := NewWriter(buf) 559 f, err := w.CreateHeader(&FileHeader{ 560 Name: "huge.txt", 561 Method: Store, 562 }) 563 if err != nil { 564 t.Fatal(err) 565 } 566 f.(*fileWriter).crc32 = fakeHash32{} 567 chunk := make([]byte, chunkSize) 568 for i := range chunk { 569 chunk[i] = '.' 570 } 571 for i := 0; i < chunks; i++ { 572 _, err := f.Write(chunk) 573 if err != nil { 574 t.Fatal("write chunk:", err) 575 } 576 } 577 if frag := int(size % chunkSize); frag > 0 { 578 _, err := f.Write(chunk[:frag]) 579 if err != nil { 580 t.Fatal("write chunk:", err) 581 } 582 } 583 end := []byte("END\n") 584 _, err = f.Write(end) 585 if err != nil { 586 t.Fatal("write end:", err) 587 } 588 if err := w.Close(); err != nil { 589 t.Fatal(err) 590 } 591 592 // read back zip file and check that we get to the end of it 593 r, err := NewReader(buf, buf.Size()) 594 if err != nil { 595 t.Fatal("reader:", err) 596 } 597 f0 := r.File[0] 598 rc, err := f0.Open() 599 if err != nil { 600 t.Fatal("opening:", err) 601 } 602 rc.(*checksumReader).hash = fakeHash32{} 603 for i := 0; i < chunks; i++ { 604 _, err := io.ReadFull(rc, chunk) 605 if err != nil { 606 t.Fatal("read:", err) 607 } 608 } 609 if frag := int(size % chunkSize); frag > 0 { 610 _, err := io.ReadFull(rc, chunk[:frag]) 611 if err != nil { 612 t.Fatal("read:", err) 613 } 614 } 615 gotEnd, err := io.ReadAll(rc) 616 if err != nil { 617 t.Fatal("read end:", err) 618 } 619 if !bytes.Equal(gotEnd, end) { 620 t.Errorf("End of zip64 archive %q, want %q", gotEnd, end) 621 } 622 err = rc.Close() 623 if err != nil { 624 t.Fatal("closing:", err) 625 } 626 if size+int64(len("END\n")) >= 1<<32-1 { 627 if got, want := f0.UncompressedSize, uint32(uint32max); got != want { 628 t.Errorf("UncompressedSize %#x, want %#x", got, want) 629 } 630 } 631 632 if got, want := f0.UncompressedSize64, uint64(size)+uint64(len(end)); got != want { 633 t.Errorf("UncompressedSize64 %#x, want %#x", got, want) 634 } 635 636 return buf 637 } 638 639 // Issue 9857 640 func testZip64DirectoryRecordLength(buf *rleBuffer, t *testing.T) { 641 if !suffixIsZip64(t, buf) { 642 t.Fatal("not a zip64") 643 } 644 } 645 646 func testValidHeader(h *FileHeader, t *testing.T) { 647 var buf bytes.Buffer 648 z := NewWriter(&buf) 649 650 f, err := z.CreateHeader(h) 651 if err != nil { 652 t.Fatalf("error creating header: %v", err) 653 } 654 if _, err := f.Write([]byte("hi")); err != nil { 655 t.Fatalf("error writing content: %v", err) 656 } 657 if err := z.Close(); err != nil { 658 t.Fatalf("error closing zip writer: %v", err) 659 } 660 661 b := buf.Bytes() 662 zf, err := NewReader(bytes.NewReader(b), int64(len(b))) 663 if err != nil { 664 t.Fatalf("got %v, expected nil", err) 665 } 666 zh := zf.File[0].FileHeader 667 if zh.Name != h.Name || zh.Method != h.Method || zh.UncompressedSize64 != uint64(len("hi")) { 668 t.Fatalf("got %q/%d/%d expected %q/%d/%d", zh.Name, zh.Method, zh.UncompressedSize64, h.Name, h.Method, len("hi")) 669 } 670 } 671 672 // Issue 4302. 673 func TestHeaderInvalidTagAndSize(t *testing.T) { 674 const timeFormat = "20060102T150405.000.txt" 675 676 ts := time.Now() 677 filename := ts.Format(timeFormat) 678 679 h := FileHeader{ 680 Name: filename, 681 Method: Deflate, 682 Extra: []byte(ts.Format(time.RFC3339Nano)), // missing tag and len, but Extra is best-effort parsing 683 } 684 h.SetModTime(ts) 685 686 testValidHeader(&h, t) 687 } 688 689 func TestHeaderTooShort(t *testing.T) { 690 h := FileHeader{ 691 Name: "foo.txt", 692 Method: Deflate, 693 Extra: []byte{zip64ExtraID}, // missing size and second half of tag, but Extra is best-effort parsing 694 } 695 testValidHeader(&h, t) 696 } 697 698 func TestHeaderTooLongErr(t *testing.T) { 699 var headerTests = []struct { 700 name string 701 extra []byte 702 wanterr error 703 }{ 704 { 705 name: strings.Repeat("x", 1<<16), 706 extra: []byte{}, 707 wanterr: errLongName, 708 }, 709 { 710 name: "long_extra", 711 extra: bytes.Repeat([]byte{0xff}, 1<<16), 712 wanterr: errLongExtra, 713 }, 714 } 715 716 // write a zip file 717 buf := new(bytes.Buffer) 718 w := NewWriter(buf) 719 720 for _, test := range headerTests { 721 h := &FileHeader{ 722 Name: test.name, 723 Extra: test.extra, 724 } 725 _, err := w.CreateHeader(h) 726 if err != test.wanterr { 727 t.Errorf("error=%v, want %v", err, test.wanterr) 728 } 729 } 730 731 if err := w.Close(); err != nil { 732 t.Fatal(err) 733 } 734 } 735 736 func TestHeaderIgnoredSize(t *testing.T) { 737 h := FileHeader{ 738 Name: "foo.txt", 739 Method: Deflate, 740 Extra: []byte{zip64ExtraID & 0xFF, zip64ExtraID >> 8, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, // bad size but shouldn't be consulted 741 } 742 testValidHeader(&h, t) 743 } 744 745 // Issue 4393. It is valid to have an extra data header 746 // which contains no body. 747 func TestZeroLengthHeader(t *testing.T) { 748 h := FileHeader{ 749 Name: "extadata.txt", 750 Method: Deflate, 751 Extra: []byte{ 752 85, 84, 5, 0, 3, 154, 144, 195, 77, // tag 21589 size 5 753 85, 120, 0, 0, // tag 30805 size 0 754 }, 755 } 756 testValidHeader(&h, t) 757 } 758 759 // Just benchmarking how fast the Zip64 test above is. Not related to 760 // our zip performance, since the test above disabled CRC32 and flate. 761 func BenchmarkZip64Test(b *testing.B) { 762 for i := 0; i < b.N; i++ { 763 testZip64(b, 1<<26) 764 } 765 } 766 767 func BenchmarkZip64TestSizes(b *testing.B) { 768 for _, size := range []int64{1 << 12, 1 << 20, 1 << 26} { 769 b.Run(fmt.Sprint(size), func(b *testing.B) { 770 b.RunParallel(func(pb *testing.PB) { 771 for pb.Next() { 772 testZip64(b, size) 773 } 774 }) 775 }) 776 } 777 } 778 779 func TestSuffixSaver(t *testing.T) { 780 const keep = 10 781 ss := &suffixSaver{keep: keep} 782 ss.Write([]byte("abc")) 783 if got := string(ss.Suffix()); got != "abc" { 784 t.Errorf("got = %q; want abc", got) 785 } 786 ss.Write([]byte("defghijklmno")) 787 if got := string(ss.Suffix()); got != "fghijklmno" { 788 t.Errorf("got = %q; want fghijklmno", got) 789 } 790 if got, want := ss.Size(), int64(len("abc")+len("defghijklmno")); got != want { 791 t.Errorf("Size = %d; want %d", got, want) 792 } 793 buf := make([]byte, ss.Size()) 794 for off := int64(0); off < ss.Size(); off++ { 795 for size := 1; size <= int(ss.Size()-off); size++ { 796 readBuf := buf[:size] 797 n, err := ss.ReadAt(readBuf, off) 798 if off < ss.Size()-keep { 799 if err != errDiscardedBytes { 800 t.Errorf("off %d, size %d = %v, %v (%q); want errDiscardedBytes", off, size, n, err, readBuf[:n]) 801 } 802 continue 803 } 804 want := "abcdefghijklmno"[off : off+int64(size)] 805 got := string(readBuf[:n]) 806 if err != nil || got != want { 807 t.Errorf("off %d, size %d = %v, %v (%q); want %q", off, size, n, err, got, want) 808 } 809 } 810 } 811 812 } 813 814 type zeros struct{} 815 816 func (zeros) Read(p []byte) (int, error) { 817 for i := range p { 818 p[i] = 0 819 } 820 return len(p), nil 821 }