github.com/corona10/go@v0.0.0-20180224231303-7a218942be57/src/archive/tar/reader.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package tar 6 7 import ( 8 "bytes" 9 "io" 10 "io/ioutil" 11 "strconv" 12 "strings" 13 "time" 14 ) 15 16 // Reader provides sequential access to the contents of a tar archive. 17 // Reader.Next advances to the next file in the archive (including the first), 18 // and then Reader can be treated as an io.Reader to access the file's data. 19 type Reader struct { 20 r io.Reader 21 pad int64 // Amount of padding (ignored) after current file entry 22 curr fileReader // Reader for current file entry 23 blk block // Buffer to use as temporary local storage 24 25 // err is a persistent error. 26 // It is only the responsibility of every exported method of Reader to 27 // ensure that this error is sticky. 28 err error 29 } 30 31 type fileReader interface { 32 io.Reader 33 fileState 34 35 WriteTo(io.Writer) (int64, error) 36 } 37 38 // NewReader creates a new Reader reading from r. 39 func NewReader(r io.Reader) *Reader { 40 return &Reader{r: r, curr: ®FileReader{r, 0}} 41 } 42 43 // Next advances to the next entry in the tar archive. 44 // The Header.Size determines how many bytes can be read for the next file. 45 // Any remaining data in the current file is automatically discarded. 46 // 47 // io.EOF is returned at the end of the input. 48 func (tr *Reader) Next() (*Header, error) { 49 if tr.err != nil { 50 return nil, tr.err 51 } 52 hdr, err := tr.next() 53 tr.err = err 54 return hdr, err 55 } 56 57 func (tr *Reader) next() (*Header, error) { 58 var paxHdrs map[string]string 59 var gnuLongName, gnuLongLink string 60 61 // Externally, Next iterates through the tar archive as if it is a series of 62 // files. Internally, the tar format often uses fake "files" to add meta 63 // data that describes the next file. These meta data "files" should not 64 // normally be visible to the outside. As such, this loop iterates through 65 // one or more "header files" until it finds a "normal file". 66 format := FormatUSTAR | FormatPAX | FormatGNU 67 loop: 68 for { 69 // Discard the remainder of the file and any padding. 70 if err := discard(tr.r, tr.curr.PhysicalRemaining()); err != nil { 71 return nil, err 72 } 73 if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil { 74 return nil, err 75 } 76 tr.pad = 0 77 78 hdr, rawHdr, err := tr.readHeader() 79 if err != nil { 80 return nil, err 81 } 82 if err := tr.handleRegularFile(hdr); err != nil { 83 return nil, err 84 } 85 format.mayOnlyBe(hdr.Format) 86 87 // Check for PAX/GNU special headers and files. 88 switch hdr.Typeflag { 89 case TypeXHeader, TypeXGlobalHeader: 90 format.mayOnlyBe(FormatPAX) 91 paxHdrs, err = parsePAX(tr) 92 if err != nil { 93 return nil, err 94 } 95 if hdr.Typeflag == TypeXGlobalHeader { 96 mergePAX(hdr, paxHdrs) 97 return &Header{ 98 Name: hdr.Name, 99 Typeflag: hdr.Typeflag, 100 Xattrs: hdr.Xattrs, 101 PAXRecords: hdr.PAXRecords, 102 Format: format, 103 }, nil 104 } 105 continue loop // This is a meta header affecting the next header 106 case TypeGNULongName, TypeGNULongLink: 107 format.mayOnlyBe(FormatGNU) 108 realname, err := ioutil.ReadAll(tr) 109 if err != nil { 110 return nil, err 111 } 112 113 var p parser 114 switch hdr.Typeflag { 115 case TypeGNULongName: 116 gnuLongName = p.parseString(realname) 117 case TypeGNULongLink: 118 gnuLongLink = p.parseString(realname) 119 } 120 continue loop // This is a meta header affecting the next header 121 default: 122 // The old GNU sparse format is handled here since it is technically 123 // just a regular file with additional attributes. 124 125 if err := mergePAX(hdr, paxHdrs); err != nil { 126 return nil, err 127 } 128 if gnuLongName != "" { 129 hdr.Name = gnuLongName 130 } 131 if gnuLongLink != "" { 132 hdr.Linkname = gnuLongLink 133 } 134 if hdr.Typeflag == TypeRegA { 135 if strings.HasSuffix(hdr.Name, "/") { 136 hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories 137 } else { 138 hdr.Typeflag = TypeReg 139 } 140 } 141 142 // The extended headers may have updated the size. 143 // Thus, setup the regFileReader again after merging PAX headers. 144 if err := tr.handleRegularFile(hdr); err != nil { 145 return nil, err 146 } 147 148 // Sparse formats rely on being able to read from the logical data 149 // section; there must be a preceding call to handleRegularFile. 150 if err := tr.handleSparseFile(hdr, rawHdr); err != nil { 151 return nil, err 152 } 153 154 // Set the final guess at the format. 155 if format.has(FormatUSTAR) && format.has(FormatPAX) { 156 format.mayOnlyBe(FormatUSTAR) 157 } 158 hdr.Format = format 159 return hdr, nil // This is a file, so stop 160 } 161 } 162 } 163 164 // handleRegularFile sets up the current file reader and padding such that it 165 // can only read the following logical data section. It will properly handle 166 // special headers that contain no data section. 167 func (tr *Reader) handleRegularFile(hdr *Header) error { 168 nb := hdr.Size 169 if isHeaderOnlyType(hdr.Typeflag) { 170 nb = 0 171 } 172 if nb < 0 { 173 return ErrHeader 174 } 175 176 tr.pad = blockPadding(nb) 177 tr.curr = ®FileReader{r: tr.r, nb: nb} 178 return nil 179 } 180 181 // handleSparseFile checks if the current file is a sparse format of any type 182 // and sets the curr reader appropriately. 183 func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error { 184 var spd sparseDatas 185 var err error 186 if hdr.Typeflag == TypeGNUSparse { 187 spd, err = tr.readOldGNUSparseMap(hdr, rawHdr) 188 } else { 189 spd, err = tr.readGNUSparsePAXHeaders(hdr) 190 } 191 192 // If sp is non-nil, then this is a sparse file. 193 // Note that it is possible for len(sp) == 0. 194 if err == nil && spd != nil { 195 if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) { 196 return ErrHeader 197 } 198 sph := invertSparseEntries(spd, hdr.Size) 199 tr.curr = &sparseFileReader{tr.curr, sph, 0} 200 } 201 return err 202 } 203 204 // readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. 205 // If they are found, then this function reads the sparse map and returns it. 206 // This assumes that 0.0 headers have already been converted to 0.1 headers 207 // by the PAX header parsing logic. 208 func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) { 209 // Identify the version of GNU headers. 210 var is1x0 bool 211 major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor] 212 switch { 213 case major == "0" && (minor == "0" || minor == "1"): 214 is1x0 = false 215 case major == "1" && minor == "0": 216 is1x0 = true 217 case major != "" || minor != "": 218 return nil, nil // Unknown GNU sparse PAX version 219 case hdr.PAXRecords[paxGNUSparseMap] != "": 220 is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess 221 default: 222 return nil, nil // Not a PAX format GNU sparse file. 223 } 224 hdr.Format.mayOnlyBe(FormatPAX) 225 226 // Update hdr from GNU sparse PAX headers. 227 if name := hdr.PAXRecords[paxGNUSparseName]; name != "" { 228 hdr.Name = name 229 } 230 size := hdr.PAXRecords[paxGNUSparseSize] 231 if size == "" { 232 size = hdr.PAXRecords[paxGNUSparseRealSize] 233 } 234 if size != "" { 235 n, err := strconv.ParseInt(size, 10, 64) 236 if err != nil { 237 return nil, ErrHeader 238 } 239 hdr.Size = n 240 } 241 242 // Read the sparse map according to the appropriate format. 243 if is1x0 { 244 return readGNUSparseMap1x0(tr.curr) 245 } 246 return readGNUSparseMap0x1(hdr.PAXRecords) 247 } 248 249 // mergePAX merges paxHdrs into hdr for all relevant fields of Header. 250 func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { 251 for k, v := range paxHdrs { 252 if v == "" { 253 continue // Keep the original USTAR value 254 } 255 var id64 int64 256 switch k { 257 case paxPath: 258 hdr.Name = v 259 case paxLinkpath: 260 hdr.Linkname = v 261 case paxUname: 262 hdr.Uname = v 263 case paxGname: 264 hdr.Gname = v 265 case paxUid: 266 id64, err = strconv.ParseInt(v, 10, 64) 267 hdr.Uid = int(id64) // Integer overflow possible 268 case paxGid: 269 id64, err = strconv.ParseInt(v, 10, 64) 270 hdr.Gid = int(id64) // Integer overflow possible 271 case paxAtime: 272 hdr.AccessTime, err = parsePAXTime(v) 273 case paxMtime: 274 hdr.ModTime, err = parsePAXTime(v) 275 case paxCtime: 276 hdr.ChangeTime, err = parsePAXTime(v) 277 case paxSize: 278 hdr.Size, err = strconv.ParseInt(v, 10, 64) 279 default: 280 if strings.HasPrefix(k, paxSchilyXattr) { 281 if hdr.Xattrs == nil { 282 hdr.Xattrs = make(map[string]string) 283 } 284 hdr.Xattrs[k[len(paxSchilyXattr):]] = v 285 } 286 } 287 if err != nil { 288 return ErrHeader 289 } 290 } 291 hdr.PAXRecords = paxHdrs 292 return nil 293 } 294 295 // parsePAX parses PAX headers. 296 // If an extended header (type 'x') is invalid, ErrHeader is returned 297 func parsePAX(r io.Reader) (map[string]string, error) { 298 buf, err := ioutil.ReadAll(r) 299 if err != nil { 300 return nil, err 301 } 302 sbuf := string(buf) 303 304 // For GNU PAX sparse format 0.0 support. 305 // This function transforms the sparse format 0.0 headers into format 0.1 306 // headers since 0.0 headers were not PAX compliant. 307 var sparseMap []string 308 309 paxHdrs := make(map[string]string) 310 for len(sbuf) > 0 { 311 key, value, residual, err := parsePAXRecord(sbuf) 312 if err != nil { 313 return nil, ErrHeader 314 } 315 sbuf = residual 316 317 switch key { 318 case paxGNUSparseOffset, paxGNUSparseNumBytes: 319 // Validate sparse header order and value. 320 if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) || 321 (len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) || 322 strings.Contains(value, ",") { 323 return nil, ErrHeader 324 } 325 sparseMap = append(sparseMap, value) 326 default: 327 paxHdrs[key] = value 328 } 329 } 330 if len(sparseMap) > 0 { 331 paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",") 332 } 333 return paxHdrs, nil 334 } 335 336 // readHeader reads the next block header and assumes that the underlying reader 337 // is already aligned to a block boundary. It returns the raw block of the 338 // header in case further processing is required. 339 // 340 // The err will be set to io.EOF only when one of the following occurs: 341 // * Exactly 0 bytes are read and EOF is hit. 342 // * Exactly 1 block of zeros is read and EOF is hit. 343 // * At least 2 blocks of zeros are read. 344 func (tr *Reader) readHeader() (*Header, *block, error) { 345 // Two blocks of zero bytes marks the end of the archive. 346 if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil { 347 return nil, nil, err // EOF is okay here; exactly 0 bytes read 348 } 349 if bytes.Equal(tr.blk[:], zeroBlock[:]) { 350 if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil { 351 return nil, nil, err // EOF is okay here; exactly 1 block of zeros read 352 } 353 if bytes.Equal(tr.blk[:], zeroBlock[:]) { 354 return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read 355 } 356 return nil, nil, ErrHeader // Zero block and then non-zero block 357 } 358 359 // Verify the header matches a known format. 360 format := tr.blk.GetFormat() 361 if format == FormatUnknown { 362 return nil, nil, ErrHeader 363 } 364 365 var p parser 366 hdr := new(Header) 367 368 // Unpack the V7 header. 369 v7 := tr.blk.V7() 370 hdr.Typeflag = v7.TypeFlag()[0] 371 hdr.Name = p.parseString(v7.Name()) 372 hdr.Linkname = p.parseString(v7.LinkName()) 373 hdr.Size = p.parseNumeric(v7.Size()) 374 hdr.Mode = p.parseNumeric(v7.Mode()) 375 hdr.Uid = int(p.parseNumeric(v7.UID())) 376 hdr.Gid = int(p.parseNumeric(v7.GID())) 377 hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0) 378 379 // Unpack format specific fields. 380 if format > formatV7 { 381 ustar := tr.blk.USTAR() 382 hdr.Uname = p.parseString(ustar.UserName()) 383 hdr.Gname = p.parseString(ustar.GroupName()) 384 hdr.Devmajor = p.parseNumeric(ustar.DevMajor()) 385 hdr.Devminor = p.parseNumeric(ustar.DevMinor()) 386 387 var prefix string 388 switch { 389 case format.has(FormatUSTAR | FormatPAX): 390 hdr.Format = format 391 ustar := tr.blk.USTAR() 392 prefix = p.parseString(ustar.Prefix()) 393 394 // For Format detection, check if block is properly formatted since 395 // the parser is more liberal than what USTAR actually permits. 396 notASCII := func(r rune) bool { return r >= 0x80 } 397 if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 { 398 hdr.Format = FormatUnknown // Non-ASCII characters in block. 399 } 400 nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 } 401 if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) && 402 nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) { 403 hdr.Format = FormatUnknown // Numeric fields must end in NUL 404 } 405 case format.has(formatSTAR): 406 star := tr.blk.STAR() 407 prefix = p.parseString(star.Prefix()) 408 hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0) 409 hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0) 410 case format.has(FormatGNU): 411 hdr.Format = format 412 var p2 parser 413 gnu := tr.blk.GNU() 414 if b := gnu.AccessTime(); b[0] != 0 { 415 hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0) 416 } 417 if b := gnu.ChangeTime(); b[0] != 0 { 418 hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0) 419 } 420 421 // Prior to Go1.8, the Writer had a bug where it would output 422 // an invalid tar file in certain rare situations because the logic 423 // incorrectly believed that the old GNU format had a prefix field. 424 // This is wrong and leads to an output file that mangles the 425 // atime and ctime fields, which are often left unused. 426 // 427 // In order to continue reading tar files created by former, buggy 428 // versions of Go, we skeptically parse the atime and ctime fields. 429 // If we are unable to parse them and the prefix field looks like 430 // an ASCII string, then we fallback on the pre-Go1.8 behavior 431 // of treating these fields as the USTAR prefix field. 432 // 433 // Note that this will not use the fallback logic for all possible 434 // files generated by a pre-Go1.8 toolchain. If the generated file 435 // happened to have a prefix field that parses as valid 436 // atime and ctime fields (e.g., when they are valid octal strings), 437 // then it is impossible to distinguish between an valid GNU file 438 // and an invalid pre-Go1.8 file. 439 // 440 // See https://golang.org/issues/12594 441 // See https://golang.org/issues/21005 442 if p2.err != nil { 443 hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{} 444 ustar := tr.blk.USTAR() 445 if s := p.parseString(ustar.Prefix()); isASCII(s) { 446 prefix = s 447 } 448 hdr.Format = FormatUnknown // Buggy file is not GNU 449 } 450 } 451 if len(prefix) > 0 { 452 hdr.Name = prefix + "/" + hdr.Name 453 } 454 } 455 return hdr, &tr.blk, p.err 456 } 457 458 // readOldGNUSparseMap reads the sparse map from the old GNU sparse format. 459 // The sparse map is stored in the tar header if it's small enough. 460 // If it's larger than four entries, then one or more extension headers are used 461 // to store the rest of the sparse map. 462 // 463 // The Header.Size does not reflect the size of any extended headers used. 464 // Thus, this function will read from the raw io.Reader to fetch extra headers. 465 // This method mutates blk in the process. 466 func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) { 467 // Make sure that the input format is GNU. 468 // Unfortunately, the STAR format also has a sparse header format that uses 469 // the same type flag but has a completely different layout. 470 if blk.GetFormat() != FormatGNU { 471 return nil, ErrHeader 472 } 473 hdr.Format.mayOnlyBe(FormatGNU) 474 475 var p parser 476 hdr.Size = p.parseNumeric(blk.GNU().RealSize()) 477 if p.err != nil { 478 return nil, p.err 479 } 480 s := blk.GNU().Sparse() 481 spd := make(sparseDatas, 0, s.MaxEntries()) 482 for { 483 for i := 0; i < s.MaxEntries(); i++ { 484 // This termination condition is identical to GNU and BSD tar. 485 if s.Entry(i).Offset()[0] == 0x00 { 486 break // Don't return, need to process extended headers (even if empty) 487 } 488 offset := p.parseNumeric(s.Entry(i).Offset()) 489 length := p.parseNumeric(s.Entry(i).Length()) 490 if p.err != nil { 491 return nil, p.err 492 } 493 spd = append(spd, sparseEntry{Offset: offset, Length: length}) 494 } 495 496 if s.IsExtended()[0] > 0 { 497 // There are more entries. Read an extension header and parse its entries. 498 if _, err := mustReadFull(tr.r, blk[:]); err != nil { 499 return nil, err 500 } 501 s = blk.Sparse() 502 continue 503 } 504 return spd, nil // Done 505 } 506 } 507 508 // readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format 509 // version 1.0. The format of the sparse map consists of a series of 510 // newline-terminated numeric fields. The first field is the number of entries 511 // and is always present. Following this are the entries, consisting of two 512 // fields (offset, length). This function must stop reading at the end 513 // boundary of the block containing the last newline. 514 // 515 // Note that the GNU manual says that numeric values should be encoded in octal 516 // format. However, the GNU tar utility itself outputs these values in decimal. 517 // As such, this library treats values as being encoded in decimal. 518 func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { 519 var ( 520 cntNewline int64 521 buf bytes.Buffer 522 blk block 523 ) 524 525 // feedTokens copies data in blocks from r into buf until there are 526 // at least cnt newlines in buf. It will not read more blocks than needed. 527 feedTokens := func(n int64) error { 528 for cntNewline < n { 529 if _, err := mustReadFull(r, blk[:]); err != nil { 530 return err 531 } 532 buf.Write(blk[:]) 533 for _, c := range blk { 534 if c == '\n' { 535 cntNewline++ 536 } 537 } 538 } 539 return nil 540 } 541 542 // nextToken gets the next token delimited by a newline. This assumes that 543 // at least one newline exists in the buffer. 544 nextToken := func() string { 545 cntNewline-- 546 tok, _ := buf.ReadString('\n') 547 return strings.TrimRight(tok, "\n") 548 } 549 550 // Parse for the number of entries. 551 // Use integer overflow resistant math to check this. 552 if err := feedTokens(1); err != nil { 553 return nil, err 554 } 555 numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int 556 if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { 557 return nil, ErrHeader 558 } 559 560 // Parse for all member entries. 561 // numEntries is trusted after this since a potential attacker must have 562 // committed resources proportional to what this library used. 563 if err := feedTokens(2 * numEntries); err != nil { 564 return nil, err 565 } 566 spd := make(sparseDatas, 0, numEntries) 567 for i := int64(0); i < numEntries; i++ { 568 offset, err1 := strconv.ParseInt(nextToken(), 10, 64) 569 length, err2 := strconv.ParseInt(nextToken(), 10, 64) 570 if err1 != nil || err2 != nil { 571 return nil, ErrHeader 572 } 573 spd = append(spd, sparseEntry{Offset: offset, Length: length}) 574 } 575 return spd, nil 576 } 577 578 // readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format 579 // version 0.1. The sparse map is stored in the PAX headers. 580 func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) { 581 // Get number of entries. 582 // Use integer overflow resistant math to check this. 583 numEntriesStr := paxHdrs[paxGNUSparseNumBlocks] 584 numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int 585 if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { 586 return nil, ErrHeader 587 } 588 589 // There should be two numbers in sparseMap for each entry. 590 sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",") 591 if len(sparseMap) == 1 && sparseMap[0] == "" { 592 sparseMap = sparseMap[:0] 593 } 594 if int64(len(sparseMap)) != 2*numEntries { 595 return nil, ErrHeader 596 } 597 598 // Loop through the entries in the sparse map. 599 // numEntries is trusted now. 600 spd := make(sparseDatas, 0, numEntries) 601 for len(sparseMap) >= 2 { 602 offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64) 603 length, err2 := strconv.ParseInt(sparseMap[1], 10, 64) 604 if err1 != nil || err2 != nil { 605 return nil, ErrHeader 606 } 607 spd = append(spd, sparseEntry{Offset: offset, Length: length}) 608 sparseMap = sparseMap[2:] 609 } 610 return spd, nil 611 } 612 613 // Read reads from the current file in the tar archive. 614 // It returns (0, io.EOF) when it reaches the end of that file, 615 // until Next is called to advance to the next file. 616 // 617 // If the current file is sparse, then the regions marked as a hole 618 // are read back as NUL-bytes. 619 // 620 // Calling Read on special types like TypeLink, TypeSymlink, TypeChar, 621 // TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what 622 // the Header.Size claims. 623 func (tr *Reader) Read(b []byte) (int, error) { 624 if tr.err != nil { 625 return 0, tr.err 626 } 627 n, err := tr.curr.Read(b) 628 if err != nil && err != io.EOF { 629 tr.err = err 630 } 631 return n, err 632 } 633 634 // writeTo writes the content of the current file to w. 635 // The bytes written matches the number of remaining bytes in the current file. 636 // 637 // If the current file is sparse and w is an io.WriteSeeker, 638 // then writeTo uses Seek to skip past holes defined in Header.SparseHoles, 639 // assuming that skipped regions are filled with NULs. 640 // This always writes the last byte to ensure w is the right size. 641 // 642 // TODO(dsnet): Re-export this when adding sparse file support. 643 // See https://golang.org/issue/22735 644 func (tr *Reader) writeTo(w io.Writer) (int64, error) { 645 if tr.err != nil { 646 return 0, tr.err 647 } 648 n, err := tr.curr.WriteTo(w) 649 if err != nil { 650 tr.err = err 651 } 652 return n, err 653 } 654 655 // regFileReader is a fileReader for reading data from a regular file entry. 656 type regFileReader struct { 657 r io.Reader // Underlying Reader 658 nb int64 // Number of remaining bytes to read 659 } 660 661 func (fr *regFileReader) Read(b []byte) (n int, err error) { 662 if int64(len(b)) > fr.nb { 663 b = b[:fr.nb] 664 } 665 if len(b) > 0 { 666 n, err = fr.r.Read(b) 667 fr.nb -= int64(n) 668 } 669 switch { 670 case err == io.EOF && fr.nb > 0: 671 return n, io.ErrUnexpectedEOF 672 case err == nil && fr.nb == 0: 673 return n, io.EOF 674 default: 675 return n, err 676 } 677 } 678 679 func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) { 680 return io.Copy(w, struct{ io.Reader }{fr}) 681 } 682 683 func (fr regFileReader) LogicalRemaining() int64 { 684 return fr.nb 685 } 686 687 func (fr regFileReader) PhysicalRemaining() int64 { 688 return fr.nb 689 } 690 691 // sparseFileReader is a fileReader for reading data from a sparse file entry. 692 type sparseFileReader struct { 693 fr fileReader // Underlying fileReader 694 sp sparseHoles // Normalized list of sparse holes 695 pos int64 // Current position in sparse file 696 } 697 698 func (sr *sparseFileReader) Read(b []byte) (n int, err error) { 699 finished := int64(len(b)) >= sr.LogicalRemaining() 700 if finished { 701 b = b[:sr.LogicalRemaining()] 702 } 703 704 b0 := b 705 endPos := sr.pos + int64(len(b)) 706 for endPos > sr.pos && err == nil { 707 var nf int // Bytes read in fragment 708 holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() 709 if sr.pos < holeStart { // In a data fragment 710 bf := b[:min(int64(len(b)), holeStart-sr.pos)] 711 nf, err = tryReadFull(sr.fr, bf) 712 } else { // In a hole fragment 713 bf := b[:min(int64(len(b)), holeEnd-sr.pos)] 714 nf, err = tryReadFull(zeroReader{}, bf) 715 } 716 b = b[nf:] 717 sr.pos += int64(nf) 718 if sr.pos >= holeEnd && len(sr.sp) > 1 { 719 sr.sp = sr.sp[1:] // Ensure last fragment always remains 720 } 721 } 722 723 n = len(b0) - len(b) 724 switch { 725 case err == io.EOF: 726 return n, errMissData // Less data in dense file than sparse file 727 case err != nil: 728 return n, err 729 case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: 730 return n, errUnrefData // More data in dense file than sparse file 731 case finished: 732 return n, io.EOF 733 default: 734 return n, nil 735 } 736 } 737 738 func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) { 739 ws, ok := w.(io.WriteSeeker) 740 if ok { 741 if _, err := ws.Seek(0, io.SeekCurrent); err != nil { 742 ok = false // Not all io.Seeker can really seek 743 } 744 } 745 if !ok { 746 return io.Copy(w, struct{ io.Reader }{sr}) 747 } 748 749 var writeLastByte bool 750 pos0 := sr.pos 751 for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil { 752 var nf int64 // Size of fragment 753 holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() 754 if sr.pos < holeStart { // In a data fragment 755 nf = holeStart - sr.pos 756 nf, err = io.CopyN(ws, sr.fr, nf) 757 } else { // In a hole fragment 758 nf = holeEnd - sr.pos 759 if sr.PhysicalRemaining() == 0 { 760 writeLastByte = true 761 nf-- 762 } 763 _, err = ws.Seek(nf, io.SeekCurrent) 764 } 765 sr.pos += nf 766 if sr.pos >= holeEnd && len(sr.sp) > 1 { 767 sr.sp = sr.sp[1:] // Ensure last fragment always remains 768 } 769 } 770 771 // If the last fragment is a hole, then seek to 1-byte before EOF, and 772 // write a single byte to ensure the file is the right size. 773 if writeLastByte && err == nil { 774 _, err = ws.Write([]byte{0}) 775 sr.pos++ 776 } 777 778 n = sr.pos - pos0 779 switch { 780 case err == io.EOF: 781 return n, errMissData // Less data in dense file than sparse file 782 case err != nil: 783 return n, err 784 case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: 785 return n, errUnrefData // More data in dense file than sparse file 786 default: 787 return n, nil 788 } 789 } 790 791 func (sr sparseFileReader) LogicalRemaining() int64 { 792 return sr.sp[len(sr.sp)-1].endOffset() - sr.pos 793 } 794 func (sr sparseFileReader) PhysicalRemaining() int64 { 795 return sr.fr.PhysicalRemaining() 796 } 797 798 type zeroReader struct{} 799 800 func (zeroReader) Read(b []byte) (int, error) { 801 for i := range b { 802 b[i] = 0 803 } 804 return len(b), nil 805 } 806 807 // mustReadFull is like io.ReadFull except it returns 808 // io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read. 809 func mustReadFull(r io.Reader, b []byte) (int, error) { 810 n, err := tryReadFull(r, b) 811 if err == io.EOF { 812 err = io.ErrUnexpectedEOF 813 } 814 return n, err 815 } 816 817 // tryReadFull is like io.ReadFull except it returns 818 // io.EOF when it is hit before len(b) bytes are read. 819 func tryReadFull(r io.Reader, b []byte) (n int, err error) { 820 for len(b) > n && err == nil { 821 var nn int 822 nn, err = r.Read(b[n:]) 823 n += nn 824 } 825 if len(b) == n && err == io.EOF { 826 err = nil 827 } 828 return n, err 829 } 830 831 // discard skips n bytes in r, reporting an error if unable to do so. 832 func discard(r io.Reader, n int64) error { 833 // If possible, Seek to the last byte before the end of the data section. 834 // Do this because Seek is often lazy about reporting errors; this will mask 835 // the fact that the stream may be truncated. We can rely on the 836 // io.CopyN done shortly afterwards to trigger any IO errors. 837 var seekSkipped int64 // Number of bytes skipped via Seek 838 if sr, ok := r.(io.Seeker); ok && n > 1 { 839 // Not all io.Seeker can actually Seek. For example, os.Stdin implements 840 // io.Seeker, but calling Seek always returns an error and performs 841 // no action. Thus, we try an innocent seek to the current position 842 // to see if Seek is really supported. 843 pos1, err := sr.Seek(0, io.SeekCurrent) 844 if pos1 >= 0 && err == nil { 845 // Seek seems supported, so perform the real Seek. 846 pos2, err := sr.Seek(n-1, io.SeekCurrent) 847 if pos2 < 0 || err != nil { 848 return err 849 } 850 seekSkipped = pos2 - pos1 851 } 852 } 853 854 copySkipped, err := io.CopyN(ioutil.Discard, r, n-seekSkipped) 855 if err == io.EOF && seekSkipped+copySkipped < n { 856 err = io.ErrUnexpectedEOF 857 } 858 return err 859 }