github.com/rafaeltorres324/go/src@v0.0.0-20210519164414-9fdf653a9838/archive/zip/reader.go (about)

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package zip
     6  
     7  import (
     8  	"bufio"
     9  	"encoding/binary"
    10  	"errors"
    11  	"hash"
    12  	"hash/crc32"
    13  	"io"
    14  	"io/fs"
    15  	"os"
    16  	"path"
    17  	"sort"
    18  	"strings"
    19  	"sync"
    20  	"time"
    21  )
    22  
    23  var (
    24  	ErrFormat    = errors.New("zip: not a valid zip file")
    25  	ErrAlgorithm = errors.New("zip: unsupported compression algorithm")
    26  	ErrChecksum  = errors.New("zip: checksum error")
    27  )
    28  
    29  // A Reader serves content from a ZIP archive.
    30  type Reader struct {
    31  	r             io.ReaderAt
    32  	File          []*File
    33  	Comment       string
    34  	decompressors map[uint16]Decompressor
    35  
    36  	// fileList is a list of files sorted by ename,
    37  	// for use by the Open method.
    38  	fileListOnce sync.Once
    39  	fileList     []fileListEntry
    40  }
    41  
    42  // A ReadCloser is a Reader that must be closed when no longer needed.
    43  type ReadCloser struct {
    44  	f *os.File
    45  	Reader
    46  }
    47  
    48  // A File is a single file in a ZIP archive.
    49  // The file information is in the embedded FileHeader.
    50  // The file content can be accessed by calling Open.
    51  type File struct {
    52  	FileHeader
    53  	zip          *Reader
    54  	zipr         io.ReaderAt
    55  	zipsize      int64
    56  	headerOffset int64
    57  }
    58  
    59  func (f *File) hasDataDescriptor() bool {
    60  	return f.Flags&0x8 != 0
    61  }
    62  
    63  // OpenReader will open the Zip file specified by name and return a ReadCloser.
    64  func OpenReader(name string) (*ReadCloser, error) {
    65  	f, err := os.Open(name)
    66  	if err != nil {
    67  		return nil, err
    68  	}
    69  	fi, err := f.Stat()
    70  	if err != nil {
    71  		f.Close()
    72  		return nil, err
    73  	}
    74  	r := new(ReadCloser)
    75  	if err := r.init(f, fi.Size()); err != nil {
    76  		f.Close()
    77  		return nil, err
    78  	}
    79  	r.f = f
    80  	return r, nil
    81  }
    82  
    83  // NewReader returns a new Reader reading from r, which is assumed to
    84  // have the given size in bytes.
    85  func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
    86  	if size < 0 {
    87  		return nil, errors.New("zip: size cannot be negative")
    88  	}
    89  	zr := new(Reader)
    90  	if err := zr.init(r, size); err != nil {
    91  		return nil, err
    92  	}
    93  	return zr, nil
    94  }
    95  
    96  func (z *Reader) init(r io.ReaderAt, size int64) error {
    97  	end, err := readDirectoryEnd(r, size)
    98  	if err != nil {
    99  		return err
   100  	}
   101  	z.r = r
   102  	z.File = make([]*File, 0, end.directoryRecords)
   103  	z.Comment = end.comment
   104  	rs := io.NewSectionReader(r, 0, size)
   105  	if _, err = rs.Seek(int64(end.directoryOffset), io.SeekStart); err != nil {
   106  		return err
   107  	}
   108  	buf := bufio.NewReader(rs)
   109  
   110  	// The count of files inside a zip is truncated to fit in a uint16.
   111  	// Gloss over this by reading headers until we encounter
   112  	// a bad one, and then only report an ErrFormat or UnexpectedEOF if
   113  	// the file count modulo 65536 is incorrect.
   114  	for {
   115  		f := &File{zip: z, zipr: r, zipsize: size}
   116  		err = readDirectoryHeader(f, buf)
   117  		if err == ErrFormat || err == io.ErrUnexpectedEOF {
   118  			break
   119  		}
   120  		if err != nil {
   121  			return err
   122  		}
   123  		z.File = append(z.File, f)
   124  	}
   125  	if uint16(len(z.File)) != uint16(end.directoryRecords) { // only compare 16 bits here
   126  		// Return the readDirectoryHeader error if we read
   127  		// the wrong number of directory entries.
   128  		return err
   129  	}
   130  	return nil
   131  }
   132  
   133  // RegisterDecompressor registers or overrides a custom decompressor for a
   134  // specific method ID. If a decompressor for a given method is not found,
   135  // Reader will default to looking up the decompressor at the package level.
   136  func (z *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) {
   137  	if z.decompressors == nil {
   138  		z.decompressors = make(map[uint16]Decompressor)
   139  	}
   140  	z.decompressors[method] = dcomp
   141  }
   142  
   143  func (z *Reader) decompressor(method uint16) Decompressor {
   144  	dcomp := z.decompressors[method]
   145  	if dcomp == nil {
   146  		dcomp = decompressor(method)
   147  	}
   148  	return dcomp
   149  }
   150  
   151  // Close closes the Zip file, rendering it unusable for I/O.
   152  func (rc *ReadCloser) Close() error {
   153  	return rc.f.Close()
   154  }
   155  
   156  // DataOffset returns the offset of the file's possibly-compressed
   157  // data, relative to the beginning of the zip file.
   158  //
   159  // Most callers should instead use Open, which transparently
   160  // decompresses data and verifies checksums.
   161  func (f *File) DataOffset() (offset int64, err error) {
   162  	bodyOffset, err := f.findBodyOffset()
   163  	if err != nil {
   164  		return
   165  	}
   166  	return f.headerOffset + bodyOffset, nil
   167  }
   168  
   169  // Open returns a ReadCloser that provides access to the File's contents.
   170  // Multiple files may be read concurrently.
   171  func (f *File) Open() (io.ReadCloser, error) {
   172  	bodyOffset, err := f.findBodyOffset()
   173  	if err != nil {
   174  		return nil, err
   175  	}
   176  	size := int64(f.CompressedSize64)
   177  	r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size)
   178  	dcomp := f.zip.decompressor(f.Method)
   179  	if dcomp == nil {
   180  		return nil, ErrAlgorithm
   181  	}
   182  	var rc io.ReadCloser = dcomp(r)
   183  	var desr io.Reader
   184  	if f.hasDataDescriptor() {
   185  		desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen)
   186  	}
   187  	rc = &checksumReader{
   188  		rc:   rc,
   189  		hash: crc32.NewIEEE(),
   190  		f:    f,
   191  		desr: desr,
   192  	}
   193  	return rc, nil
   194  }
   195  
   196  type checksumReader struct {
   197  	rc    io.ReadCloser
   198  	hash  hash.Hash32
   199  	nread uint64 // number of bytes read so far
   200  	f     *File
   201  	desr  io.Reader // if non-nil, where to read the data descriptor
   202  	err   error     // sticky error
   203  }
   204  
   205  func (r *checksumReader) Stat() (fs.FileInfo, error) {
   206  	return headerFileInfo{&r.f.FileHeader}, nil
   207  }
   208  
   209  func (r *checksumReader) Read(b []byte) (n int, err error) {
   210  	if r.err != nil {
   211  		return 0, r.err
   212  	}
   213  	n, err = r.rc.Read(b)
   214  	r.hash.Write(b[:n])
   215  	r.nread += uint64(n)
   216  	if err == nil {
   217  		return
   218  	}
   219  	if err == io.EOF {
   220  		if r.nread != r.f.UncompressedSize64 {
   221  			return 0, io.ErrUnexpectedEOF
   222  		}
   223  		if r.desr != nil {
   224  			if err1 := readDataDescriptor(r.desr, r.f); err1 != nil {
   225  				if err1 == io.EOF {
   226  					err = io.ErrUnexpectedEOF
   227  				} else {
   228  					err = err1
   229  				}
   230  			} else if r.hash.Sum32() != r.f.CRC32 {
   231  				err = ErrChecksum
   232  			}
   233  		} else {
   234  			// If there's not a data descriptor, we still compare
   235  			// the CRC32 of what we've read against the file header
   236  			// or TOC's CRC32, if it seems like it was set.
   237  			if r.f.CRC32 != 0 && r.hash.Sum32() != r.f.CRC32 {
   238  				err = ErrChecksum
   239  			}
   240  		}
   241  	}
   242  	r.err = err
   243  	return
   244  }
   245  
   246  func (r *checksumReader) Close() error { return r.rc.Close() }
   247  
   248  // findBodyOffset does the minimum work to verify the file has a header
   249  // and returns the file body offset.
   250  func (f *File) findBodyOffset() (int64, error) {
   251  	var buf [fileHeaderLen]byte
   252  	if _, err := f.zipr.ReadAt(buf[:], f.headerOffset); err != nil {
   253  		return 0, err
   254  	}
   255  	b := readBuf(buf[:])
   256  	if sig := b.uint32(); sig != fileHeaderSignature {
   257  		return 0, ErrFormat
   258  	}
   259  	b = b[22:] // skip over most of the header
   260  	filenameLen := int(b.uint16())
   261  	extraLen := int(b.uint16())
   262  	return int64(fileHeaderLen + filenameLen + extraLen), nil
   263  }
   264  
   265  // readDirectoryHeader attempts to read a directory header from r.
   266  // It returns io.ErrUnexpectedEOF if it cannot read a complete header,
   267  // and ErrFormat if it doesn't find a valid header signature.
   268  func readDirectoryHeader(f *File, r io.Reader) error {
   269  	var buf [directoryHeaderLen]byte
   270  	if _, err := io.ReadFull(r, buf[:]); err != nil {
   271  		return err
   272  	}
   273  	b := readBuf(buf[:])
   274  	if sig := b.uint32(); sig != directoryHeaderSignature {
   275  		return ErrFormat
   276  	}
   277  	f.CreatorVersion = b.uint16()
   278  	f.ReaderVersion = b.uint16()
   279  	f.Flags = b.uint16()
   280  	f.Method = b.uint16()
   281  	f.ModifiedTime = b.uint16()
   282  	f.ModifiedDate = b.uint16()
   283  	f.CRC32 = b.uint32()
   284  	f.CompressedSize = b.uint32()
   285  	f.UncompressedSize = b.uint32()
   286  	f.CompressedSize64 = uint64(f.CompressedSize)
   287  	f.UncompressedSize64 = uint64(f.UncompressedSize)
   288  	filenameLen := int(b.uint16())
   289  	extraLen := int(b.uint16())
   290  	commentLen := int(b.uint16())
   291  	b = b[4:] // skipped start disk number and internal attributes (2x uint16)
   292  	f.ExternalAttrs = b.uint32()
   293  	f.headerOffset = int64(b.uint32())
   294  	d := make([]byte, filenameLen+extraLen+commentLen)
   295  	if _, err := io.ReadFull(r, d); err != nil {
   296  		return err
   297  	}
   298  	f.Name = string(d[:filenameLen])
   299  	f.Extra = d[filenameLen : filenameLen+extraLen]
   300  	f.Comment = string(d[filenameLen+extraLen:])
   301  
   302  	// Determine the character encoding.
   303  	utf8Valid1, utf8Require1 := detectUTF8(f.Name)
   304  	utf8Valid2, utf8Require2 := detectUTF8(f.Comment)
   305  	switch {
   306  	case !utf8Valid1 || !utf8Valid2:
   307  		// Name and Comment definitely not UTF-8.
   308  		f.NonUTF8 = true
   309  	case !utf8Require1 && !utf8Require2:
   310  		// Name and Comment use only single-byte runes that overlap with UTF-8.
   311  		f.NonUTF8 = false
   312  	default:
   313  		// Might be UTF-8, might be some other encoding; preserve existing flag.
   314  		// Some ZIP writers use UTF-8 encoding without setting the UTF-8 flag.
   315  		// Since it is impossible to always distinguish valid UTF-8 from some
   316  		// other encoding (e.g., GBK or Shift-JIS), we trust the flag.
   317  		f.NonUTF8 = f.Flags&0x800 == 0
   318  	}
   319  
   320  	needUSize := f.UncompressedSize == ^uint32(0)
   321  	needCSize := f.CompressedSize == ^uint32(0)
   322  	needHeaderOffset := f.headerOffset == int64(^uint32(0))
   323  
   324  	// Best effort to find what we need.
   325  	// Other zip authors might not even follow the basic format,
   326  	// and we'll just ignore the Extra content in that case.
   327  	var modified time.Time
   328  parseExtras:
   329  	for extra := readBuf(f.Extra); len(extra) >= 4; { // need at least tag and size
   330  		fieldTag := extra.uint16()
   331  		fieldSize := int(extra.uint16())
   332  		if len(extra) < fieldSize {
   333  			break
   334  		}
   335  		fieldBuf := extra.sub(fieldSize)
   336  
   337  		switch fieldTag {
   338  		case zip64ExtraID:
   339  			// update directory values from the zip64 extra block.
   340  			// They should only be consulted if the sizes read earlier
   341  			// are maxed out.
   342  			// See golang.org/issue/13367.
   343  			if needUSize {
   344  				needUSize = false
   345  				if len(fieldBuf) < 8 {
   346  					return ErrFormat
   347  				}
   348  				f.UncompressedSize64 = fieldBuf.uint64()
   349  			}
   350  			if needCSize {
   351  				needCSize = false
   352  				if len(fieldBuf) < 8 {
   353  					return ErrFormat
   354  				}
   355  				f.CompressedSize64 = fieldBuf.uint64()
   356  			}
   357  			if needHeaderOffset {
   358  				needHeaderOffset = false
   359  				if len(fieldBuf) < 8 {
   360  					return ErrFormat
   361  				}
   362  				f.headerOffset = int64(fieldBuf.uint64())
   363  			}
   364  		case ntfsExtraID:
   365  			if len(fieldBuf) < 4 {
   366  				continue parseExtras
   367  			}
   368  			fieldBuf.uint32()        // reserved (ignored)
   369  			for len(fieldBuf) >= 4 { // need at least tag and size
   370  				attrTag := fieldBuf.uint16()
   371  				attrSize := int(fieldBuf.uint16())
   372  				if len(fieldBuf) < attrSize {
   373  					continue parseExtras
   374  				}
   375  				attrBuf := fieldBuf.sub(attrSize)
   376  				if attrTag != 1 || attrSize != 24 {
   377  					continue // Ignore irrelevant attributes
   378  				}
   379  
   380  				const ticksPerSecond = 1e7    // Windows timestamp resolution
   381  				ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
   382  				secs := int64(ts / ticksPerSecond)
   383  				nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond)
   384  				epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
   385  				modified = time.Unix(epoch.Unix()+secs, nsecs)
   386  			}
   387  		case unixExtraID, infoZipUnixExtraID:
   388  			if len(fieldBuf) < 8 {
   389  				continue parseExtras
   390  			}
   391  			fieldBuf.uint32()              // AcTime (ignored)
   392  			ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
   393  			modified = time.Unix(ts, 0)
   394  		case extTimeExtraID:
   395  			if len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 {
   396  				continue parseExtras
   397  			}
   398  			ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
   399  			modified = time.Unix(ts, 0)
   400  		}
   401  	}
   402  
   403  	msdosModified := msDosTimeToTime(f.ModifiedDate, f.ModifiedTime)
   404  	f.Modified = msdosModified
   405  	if !modified.IsZero() {
   406  		f.Modified = modified.UTC()
   407  
   408  		// If legacy MS-DOS timestamps are set, we can use the delta between
   409  		// the legacy and extended versions to estimate timezone offset.
   410  		//
   411  		// A non-UTC timezone is always used (even if offset is zero).
   412  		// Thus, FileHeader.Modified.Location() == time.UTC is useful for
   413  		// determining whether extended timestamps are present.
   414  		// This is necessary for users that need to do additional time
   415  		// calculations when dealing with legacy ZIP formats.
   416  		if f.ModifiedTime != 0 || f.ModifiedDate != 0 {
   417  			f.Modified = modified.In(timeZone(msdosModified.Sub(modified)))
   418  		}
   419  	}
   420  
   421  	// Assume that uncompressed size 2³²-1 could plausibly happen in
   422  	// an old zip32 file that was sharding inputs into the largest chunks
   423  	// possible (or is just malicious; search the web for 42.zip).
   424  	// If needUSize is true still, it means we didn't see a zip64 extension.
   425  	// As long as the compressed size is not also 2³²-1 (implausible)
   426  	// and the header is not also 2³²-1 (equally implausible),
   427  	// accept the uncompressed size 2³²-1 as valid.
   428  	// If nothing else, this keeps archive/zip working with 42.zip.
   429  	_ = needUSize
   430  
   431  	if needCSize || needHeaderOffset {
   432  		return ErrFormat
   433  	}
   434  
   435  	return nil
   436  }
   437  
   438  func readDataDescriptor(r io.Reader, f *File) error {
   439  	var buf [dataDescriptorLen]byte
   440  
   441  	// The spec says: "Although not originally assigned a
   442  	// signature, the value 0x08074b50 has commonly been adopted
   443  	// as a signature value for the data descriptor record.
   444  	// Implementers should be aware that ZIP files may be
   445  	// encountered with or without this signature marking data
   446  	// descriptors and should account for either case when reading
   447  	// ZIP files to ensure compatibility."
   448  	//
   449  	// dataDescriptorLen includes the size of the signature but
   450  	// first read just those 4 bytes to see if it exists.
   451  	if _, err := io.ReadFull(r, buf[:4]); err != nil {
   452  		return err
   453  	}
   454  	off := 0
   455  	maybeSig := readBuf(buf[:4])
   456  	if maybeSig.uint32() != dataDescriptorSignature {
   457  		// No data descriptor signature. Keep these four
   458  		// bytes.
   459  		off += 4
   460  	}
   461  	if _, err := io.ReadFull(r, buf[off:12]); err != nil {
   462  		return err
   463  	}
   464  	b := readBuf(buf[:12])
   465  	if b.uint32() != f.CRC32 {
   466  		return ErrChecksum
   467  	}
   468  
   469  	// The two sizes that follow here can be either 32 bits or 64 bits
   470  	// but the spec is not very clear on this and different
   471  	// interpretations has been made causing incompatibilities. We
   472  	// already have the sizes from the central directory so we can
   473  	// just ignore these.
   474  
   475  	return nil
   476  }
   477  
   478  func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error) {
   479  	// look for directoryEndSignature in the last 1k, then in the last 65k
   480  	var buf []byte
   481  	var directoryEndOffset int64
   482  	for i, bLen := range []int64{1024, 65 * 1024} {
   483  		if bLen > size {
   484  			bLen = size
   485  		}
   486  		buf = make([]byte, int(bLen))
   487  		if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF {
   488  			return nil, err
   489  		}
   490  		if p := findSignatureInBlock(buf); p >= 0 {
   491  			buf = buf[p:]
   492  			directoryEndOffset = size - bLen + int64(p)
   493  			break
   494  		}
   495  		if i == 1 || bLen == size {
   496  			return nil, ErrFormat
   497  		}
   498  	}
   499  
   500  	// read header into struct
   501  	b := readBuf(buf[4:]) // skip signature
   502  	d := &directoryEnd{
   503  		diskNbr:            uint32(b.uint16()),
   504  		dirDiskNbr:         uint32(b.uint16()),
   505  		dirRecordsThisDisk: uint64(b.uint16()),
   506  		directoryRecords:   uint64(b.uint16()),
   507  		directorySize:      uint64(b.uint32()),
   508  		directoryOffset:    uint64(b.uint32()),
   509  		commentLen:         b.uint16(),
   510  	}
   511  	l := int(d.commentLen)
   512  	if l > len(b) {
   513  		return nil, errors.New("zip: invalid comment length")
   514  	}
   515  	d.comment = string(b[:l])
   516  
   517  	// These values mean that the file can be a zip64 file
   518  	if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff {
   519  		p, err := findDirectory64End(r, directoryEndOffset)
   520  		if err == nil && p >= 0 {
   521  			err = readDirectory64End(r, p, d)
   522  		}
   523  		if err != nil {
   524  			return nil, err
   525  		}
   526  	}
   527  	// Make sure directoryOffset points to somewhere in our file.
   528  	if o := int64(d.directoryOffset); o < 0 || o >= size {
   529  		return nil, ErrFormat
   530  	}
   531  	return d, nil
   532  }
   533  
   534  // findDirectory64End tries to read the zip64 locator just before the
   535  // directory end and returns the offset of the zip64 directory end if
   536  // found.
   537  func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error) {
   538  	locOffset := directoryEndOffset - directory64LocLen
   539  	if locOffset < 0 {
   540  		return -1, nil // no need to look for a header outside the file
   541  	}
   542  	buf := make([]byte, directory64LocLen)
   543  	if _, err := r.ReadAt(buf, locOffset); err != nil {
   544  		return -1, err
   545  	}
   546  	b := readBuf(buf)
   547  	if sig := b.uint32(); sig != directory64LocSignature {
   548  		return -1, nil
   549  	}
   550  	if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory
   551  		return -1, nil // the file is not a valid zip64-file
   552  	}
   553  	p := b.uint64()      // relative offset of the zip64 end of central directory record
   554  	if b.uint32() != 1 { // total number of disks
   555  		return -1, nil // the file is not a valid zip64-file
   556  	}
   557  	return int64(p), nil
   558  }
   559  
   560  // readDirectory64End reads the zip64 directory end and updates the
   561  // directory end with the zip64 directory end values.
   562  func readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) {
   563  	buf := make([]byte, directory64EndLen)
   564  	if _, err := r.ReadAt(buf, offset); err != nil {
   565  		return err
   566  	}
   567  
   568  	b := readBuf(buf)
   569  	if sig := b.uint32(); sig != directory64EndSignature {
   570  		return ErrFormat
   571  	}
   572  
   573  	b = b[12:]                        // skip dir size, version and version needed (uint64 + 2x uint16)
   574  	d.diskNbr = b.uint32()            // number of this disk
   575  	d.dirDiskNbr = b.uint32()         // number of the disk with the start of the central directory
   576  	d.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk
   577  	d.directoryRecords = b.uint64()   // total number of entries in the central directory
   578  	d.directorySize = b.uint64()      // size of the central directory
   579  	d.directoryOffset = b.uint64()    // offset of start of central directory with respect to the starting disk number
   580  
   581  	return nil
   582  }
   583  
   584  func findSignatureInBlock(b []byte) int {
   585  	for i := len(b) - directoryEndLen; i >= 0; i-- {
   586  		// defined from directoryEndSignature in struct.go
   587  		if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 {
   588  			// n is length of comment
   589  			n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8
   590  			if n+directoryEndLen+i <= len(b) {
   591  				return i
   592  			}
   593  		}
   594  	}
   595  	return -1
   596  }
   597  
   598  type readBuf []byte
   599  
   600  func (b *readBuf) uint8() uint8 {
   601  	v := (*b)[0]
   602  	*b = (*b)[1:]
   603  	return v
   604  }
   605  
   606  func (b *readBuf) uint16() uint16 {
   607  	v := binary.LittleEndian.Uint16(*b)
   608  	*b = (*b)[2:]
   609  	return v
   610  }
   611  
   612  func (b *readBuf) uint32() uint32 {
   613  	v := binary.LittleEndian.Uint32(*b)
   614  	*b = (*b)[4:]
   615  	return v
   616  }
   617  
   618  func (b *readBuf) uint64() uint64 {
   619  	v := binary.LittleEndian.Uint64(*b)
   620  	*b = (*b)[8:]
   621  	return v
   622  }
   623  
   624  func (b *readBuf) sub(n int) readBuf {
   625  	b2 := (*b)[:n]
   626  	*b = (*b)[n:]
   627  	return b2
   628  }
   629  
   630  // A fileListEntry is a File and its ename.
   631  // If file == nil, the fileListEntry describes a directory, without metadata.
   632  type fileListEntry struct {
   633  	name string
   634  	file *File // nil for directories
   635  }
   636  
   637  type fileInfoDirEntry interface {
   638  	fs.FileInfo
   639  	fs.DirEntry
   640  }
   641  
   642  func (e *fileListEntry) stat() fileInfoDirEntry {
   643  	if e.file != nil {
   644  		return headerFileInfo{&e.file.FileHeader}
   645  	}
   646  	return e
   647  }
   648  
   649  // Only used for directories.
   650  func (f *fileListEntry) Name() string       { _, elem, _ := split(f.name); return elem }
   651  func (f *fileListEntry) Size() int64        { return 0 }
   652  func (f *fileListEntry) ModTime() time.Time { return time.Time{} }
   653  func (f *fileListEntry) Mode() fs.FileMode  { return fs.ModeDir | 0555 }
   654  func (f *fileListEntry) Type() fs.FileMode  { return fs.ModeDir }
   655  func (f *fileListEntry) IsDir() bool        { return true }
   656  func (f *fileListEntry) Sys() interface{}   { return nil }
   657  
   658  func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil }
   659  
   660  // toValidName coerces name to be a valid name for fs.FS.Open.
   661  func toValidName(name string) string {
   662  	name = strings.ReplaceAll(name, `\`, `/`)
   663  	p := path.Clean(name)
   664  	if strings.HasPrefix(p, "/") {
   665  		p = p[len("/"):]
   666  	}
   667  	for strings.HasPrefix(name, "../") {
   668  		p = p[len("../"):]
   669  	}
   670  	return p
   671  }
   672  
   673  func (r *Reader) initFileList() {
   674  	r.fileListOnce.Do(func() {
   675  		dirs := make(map[string]bool)
   676  		for _, file := range r.File {
   677  			name := toValidName(file.Name)
   678  			for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) {
   679  				dirs[dir] = true
   680  			}
   681  			r.fileList = append(r.fileList, fileListEntry{name, file})
   682  		}
   683  		for dir := range dirs {
   684  			r.fileList = append(r.fileList, fileListEntry{dir + "/", nil})
   685  		}
   686  
   687  		sort.Slice(r.fileList, func(i, j int) bool { return fileEntryLess(r.fileList[i].name, r.fileList[j].name) })
   688  	})
   689  }
   690  
   691  func fileEntryLess(x, y string) bool {
   692  	xdir, xelem, _ := split(x)
   693  	ydir, yelem, _ := split(y)
   694  	return xdir < ydir || xdir == ydir && xelem < yelem
   695  }
   696  
   697  // Open opens the named file in the ZIP archive,
   698  // using the semantics of fs.FS.Open:
   699  // paths are always slash separated, with no
   700  // leading / or ../ elements.
   701  func (r *Reader) Open(name string) (fs.File, error) {
   702  	r.initFileList()
   703  
   704  	e := r.openLookup(name)
   705  	if e == nil || !fs.ValidPath(name) {
   706  		return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
   707  	}
   708  	if e.file == nil || strings.HasSuffix(e.file.Name, "/") {
   709  		return &openDir{e, r.openReadDir(name), 0}, nil
   710  	}
   711  	rc, err := e.file.Open()
   712  	if err != nil {
   713  		return nil, err
   714  	}
   715  	return rc.(fs.File), nil
   716  }
   717  
   718  func split(name string) (dir, elem string, isDir bool) {
   719  	if name[len(name)-1] == '/' {
   720  		isDir = true
   721  		name = name[:len(name)-1]
   722  	}
   723  	i := len(name) - 1
   724  	for i >= 0 && name[i] != '/' {
   725  		i--
   726  	}
   727  	if i < 0 {
   728  		return ".", name, isDir
   729  	}
   730  	return name[:i], name[i+1:], isDir
   731  }
   732  
   733  var dotFile = &fileListEntry{name: "./"}
   734  
   735  func (r *Reader) openLookup(name string) *fileListEntry {
   736  	if name == "." {
   737  		return dotFile
   738  	}
   739  
   740  	dir, elem, _ := split(name)
   741  	files := r.fileList
   742  	i := sort.Search(len(files), func(i int) bool {
   743  		idir, ielem, _ := split(files[i].name)
   744  		return idir > dir || idir == dir && ielem >= elem
   745  	})
   746  	if i < len(files) {
   747  		fname := files[i].name
   748  		if fname == name || len(fname) == len(name)+1 && fname[len(name)] == '/' && fname[:len(name)] == name {
   749  			return &files[i]
   750  		}
   751  	}
   752  	return nil
   753  }
   754  
   755  func (r *Reader) openReadDir(dir string) []fileListEntry {
   756  	files := r.fileList
   757  	i := sort.Search(len(files), func(i int) bool {
   758  		idir, _, _ := split(files[i].name)
   759  		return idir >= dir
   760  	})
   761  	j := sort.Search(len(files), func(j int) bool {
   762  		jdir, _, _ := split(files[j].name)
   763  		return jdir > dir
   764  	})
   765  	return files[i:j]
   766  }
   767  
   768  type openDir struct {
   769  	e      *fileListEntry
   770  	files  []fileListEntry
   771  	offset int
   772  }
   773  
   774  func (d *openDir) Close() error               { return nil }
   775  func (d *openDir) Stat() (fs.FileInfo, error) { return d.e.stat(), nil }
   776  
   777  func (d *openDir) Read([]byte) (int, error) {
   778  	return 0, &fs.PathError{Op: "read", Path: d.e.name, Err: errors.New("is a directory")}
   779  }
   780  
   781  func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
   782  	n := len(d.files) - d.offset
   783  	if count > 0 && n > count {
   784  		n = count
   785  	}
   786  	if n == 0 {
   787  		if count <= 0 {
   788  			return nil, nil
   789  		}
   790  		return nil, io.EOF
   791  	}
   792  	list := make([]fs.DirEntry, n)
   793  	for i := range list {
   794  		list[i] = d.files[d.offset+i].stat()
   795  	}
   796  	d.offset += n
   797  	return list, nil
   798  }