github.com/docker/engine@v22.0.0-20211208180946-d456264580cf+incompatible/pkg/archive/archive.go (about) 1 package archive // import "github.com/docker/docker/pkg/archive" 2 3 import ( 4 "archive/tar" 5 "bufio" 6 "bytes" 7 "compress/bzip2" 8 "compress/gzip" 9 "context" 10 "encoding/binary" 11 "fmt" 12 "io" 13 "os" 14 "path/filepath" 15 "runtime" 16 "strconv" 17 "strings" 18 "syscall" 19 "time" 20 21 "github.com/docker/docker/pkg/fileutils" 22 "github.com/docker/docker/pkg/idtools" 23 "github.com/docker/docker/pkg/ioutils" 24 "github.com/docker/docker/pkg/pools" 25 "github.com/docker/docker/pkg/system" 26 "github.com/klauspost/compress/zstd" 27 "github.com/sirupsen/logrus" 28 exec "golang.org/x/sys/execabs" 29 ) 30 31 type ( 32 // Compression is the state represents if compressed or not. 33 Compression int 34 // WhiteoutFormat is the format of whiteouts unpacked 35 WhiteoutFormat int 36 37 // TarOptions wraps the tar options. 38 TarOptions struct { 39 IncludeFiles []string 40 ExcludePatterns []string 41 Compression Compression 42 NoLchown bool 43 UIDMaps []idtools.IDMap 44 GIDMaps []idtools.IDMap 45 ChownOpts *idtools.Identity 46 IncludeSourceDir bool 47 // WhiteoutFormat is the expected on disk format for whiteout files. 48 // This format will be converted to the standard format on pack 49 // and from the standard format on unpack. 50 WhiteoutFormat WhiteoutFormat 51 // When unpacking, specifies whether overwriting a directory with a 52 // non-directory is allowed and vice versa. 53 NoOverwriteDirNonDir bool 54 // For each include when creating an archive, the included name will be 55 // replaced with the matching name from this map. 56 RebaseNames map[string]string 57 InUserNS bool 58 } 59 ) 60 61 // Archiver implements the Archiver interface and allows the reuse of most utility functions of 62 // this package with a pluggable Untar function. Also, to facilitate the passing of specific id 63 // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. 64 type Archiver struct { 65 Untar func(io.Reader, string, *TarOptions) error 66 IDMapping *idtools.IdentityMapping 67 } 68 69 // NewDefaultArchiver returns a new Archiver without any IdentityMapping 70 func NewDefaultArchiver() *Archiver { 71 return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} 72 } 73 74 // breakoutError is used to differentiate errors related to breaking out 75 // When testing archive breakout in the unit tests, this error is expected 76 // in order for the test to pass. 77 type breakoutError error 78 79 const ( 80 // Uncompressed represents the uncompressed. 81 Uncompressed Compression = iota 82 // Bzip2 is bzip2 compression algorithm. 83 Bzip2 84 // Gzip is gzip compression algorithm. 85 Gzip 86 // Xz is xz compression algorithm. 87 Xz 88 // Zstd is zstd compression algorithm. 89 Zstd 90 ) 91 92 const ( 93 // AUFSWhiteoutFormat is the default format for whiteouts 94 AUFSWhiteoutFormat WhiteoutFormat = iota 95 // OverlayWhiteoutFormat formats whiteout according to the overlay 96 // standard. 97 OverlayWhiteoutFormat 98 ) 99 100 const ( 101 modeISDIR = 040000 // Directory 102 modeISFIFO = 010000 // FIFO 103 modeISREG = 0100000 // Regular file 104 modeISLNK = 0120000 // Symbolic link 105 modeISBLK = 060000 // Block special file 106 modeISCHR = 020000 // Character special file 107 modeISSOCK = 0140000 // Socket 108 ) 109 110 // IsArchivePath checks if the (possibly compressed) file at the given path 111 // starts with a tar file header. 112 func IsArchivePath(path string) bool { 113 file, err := os.Open(path) 114 if err != nil { 115 return false 116 } 117 defer file.Close() 118 rdr, err := DecompressStream(file) 119 if err != nil { 120 return false 121 } 122 defer rdr.Close() 123 r := tar.NewReader(rdr) 124 _, err = r.Next() 125 return err == nil 126 } 127 128 const ( 129 zstdMagicSkippableStart = 0x184D2A50 130 zstdMagicSkippableMask = 0xFFFFFFF0 131 ) 132 133 var ( 134 bzip2Magic = []byte{0x42, 0x5A, 0x68} 135 gzipMagic = []byte{0x1F, 0x8B, 0x08} 136 xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} 137 zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} 138 ) 139 140 type matcher = func([]byte) bool 141 142 func magicNumberMatcher(m []byte) matcher { 143 return func(source []byte) bool { 144 return bytes.HasPrefix(source, m) 145 } 146 } 147 148 // zstdMatcher detects zstd compression algorithm. 149 // Zstandard compressed data is made of one or more frames. 150 // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. 151 // See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. 152 func zstdMatcher() matcher { 153 return func(source []byte) bool { 154 if bytes.HasPrefix(source, zstdMagic) { 155 // Zstandard frame 156 return true 157 } 158 // skippable frame 159 if len(source) < 8 { 160 return false 161 } 162 // magic number from 0x184D2A50 to 0x184D2A5F. 163 if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { 164 return true 165 } 166 return false 167 } 168 } 169 170 // DetectCompression detects the compression algorithm of the source. 171 func DetectCompression(source []byte) Compression { 172 compressionMap := map[Compression]matcher{ 173 Bzip2: magicNumberMatcher(bzip2Magic), 174 Gzip: magicNumberMatcher(gzipMagic), 175 Xz: magicNumberMatcher(xzMagic), 176 Zstd: zstdMatcher(), 177 } 178 for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { 179 fn := compressionMap[compression] 180 if fn(source) { 181 return compression 182 } 183 } 184 return Uncompressed 185 } 186 187 func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { 188 args := []string{"xz", "-d", "-c", "-q"} 189 190 return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) 191 } 192 193 func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { 194 if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { 195 noPigz, err := strconv.ParseBool(noPigzEnv) 196 if err != nil { 197 logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") 198 } 199 if noPigz { 200 logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) 201 return gzip.NewReader(buf) 202 } 203 } 204 205 unpigzPath, err := exec.LookPath("unpigz") 206 if err != nil { 207 logrus.Debugf("unpigz binary not found, falling back to go gzip library") 208 return gzip.NewReader(buf) 209 } 210 211 logrus.Debugf("Using %s to decompress", unpigzPath) 212 213 return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) 214 } 215 216 func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { 217 return ioutils.NewReadCloserWrapper(readBuf, func() error { 218 cancel() 219 return readBuf.Close() 220 }) 221 } 222 223 // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. 224 func DecompressStream(archive io.Reader) (io.ReadCloser, error) { 225 p := pools.BufioReader32KPool 226 buf := p.Get(archive) 227 bs, err := buf.Peek(10) 228 if err != nil && err != io.EOF { 229 // Note: we'll ignore any io.EOF error because there are some odd 230 // cases where the layer.tar file will be empty (zero bytes) and 231 // that results in an io.EOF from the Peek() call. So, in those 232 // cases we'll just treat it as a non-compressed stream and 233 // that means just create an empty layer. 234 // See Issue 18170 235 return nil, err 236 } 237 238 compression := DetectCompression(bs) 239 switch compression { 240 case Uncompressed: 241 readBufWrapper := p.NewReadCloserWrapper(buf, buf) 242 return readBufWrapper, nil 243 case Gzip: 244 ctx, cancel := context.WithCancel(context.Background()) 245 246 gzReader, err := gzDecompress(ctx, buf) 247 if err != nil { 248 cancel() 249 return nil, err 250 } 251 readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) 252 return wrapReadCloser(readBufWrapper, cancel), nil 253 case Bzip2: 254 bz2Reader := bzip2.NewReader(buf) 255 readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) 256 return readBufWrapper, nil 257 case Xz: 258 ctx, cancel := context.WithCancel(context.Background()) 259 260 xzReader, err := xzDecompress(ctx, buf) 261 if err != nil { 262 cancel() 263 return nil, err 264 } 265 readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) 266 return wrapReadCloser(readBufWrapper, cancel), nil 267 case Zstd: 268 zstdReader, err := zstd.NewReader(buf) 269 if err != nil { 270 return nil, err 271 } 272 readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) 273 return readBufWrapper, nil 274 default: 275 return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) 276 } 277 } 278 279 // CompressStream compresses the dest with specified compression algorithm. 280 func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { 281 p := pools.BufioWriter32KPool 282 buf := p.Get(dest) 283 switch compression { 284 case Uncompressed: 285 writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) 286 return writeBufWrapper, nil 287 case Gzip: 288 gzWriter := gzip.NewWriter(dest) 289 writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) 290 return writeBufWrapper, nil 291 case Bzip2, Xz: 292 // archive/bzip2 does not support writing, and there is no xz support at all 293 // However, this is not a problem as docker only currently generates gzipped tars 294 return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) 295 default: 296 return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) 297 } 298 } 299 300 // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to 301 // modify the contents or header of an entry in the archive. If the file already 302 // exists in the archive the TarModifierFunc will be called with the Header and 303 // a reader which will return the files content. If the file does not exist both 304 // header and content will be nil. 305 type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) 306 307 // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the 308 // tar stream are modified if they match any of the keys in mods. 309 func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { 310 pipeReader, pipeWriter := io.Pipe() 311 312 go func() { 313 tarReader := tar.NewReader(inputTarStream) 314 tarWriter := tar.NewWriter(pipeWriter) 315 defer inputTarStream.Close() 316 defer tarWriter.Close() 317 318 modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { 319 header, data, err := modifier(name, original, tarReader) 320 switch { 321 case err != nil: 322 return err 323 case header == nil: 324 return nil 325 } 326 327 if header.Name == "" { 328 header.Name = name 329 } 330 header.Size = int64(len(data)) 331 if err := tarWriter.WriteHeader(header); err != nil { 332 return err 333 } 334 if len(data) != 0 { 335 if _, err := tarWriter.Write(data); err != nil { 336 return err 337 } 338 } 339 return nil 340 } 341 342 var err error 343 var originalHeader *tar.Header 344 for { 345 originalHeader, err = tarReader.Next() 346 if err == io.EOF { 347 break 348 } 349 if err != nil { 350 pipeWriter.CloseWithError(err) 351 return 352 } 353 354 modifier, ok := mods[originalHeader.Name] 355 if !ok { 356 // No modifiers for this file, copy the header and data 357 if err := tarWriter.WriteHeader(originalHeader); err != nil { 358 pipeWriter.CloseWithError(err) 359 return 360 } 361 if _, err := pools.Copy(tarWriter, tarReader); err != nil { 362 pipeWriter.CloseWithError(err) 363 return 364 } 365 continue 366 } 367 delete(mods, originalHeader.Name) 368 369 if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { 370 pipeWriter.CloseWithError(err) 371 return 372 } 373 } 374 375 // Apply the modifiers that haven't matched any files in the archive 376 for name, modifier := range mods { 377 if err := modify(name, nil, modifier, nil); err != nil { 378 pipeWriter.CloseWithError(err) 379 return 380 } 381 } 382 383 pipeWriter.Close() 384 385 }() 386 return pipeReader 387 } 388 389 // Extension returns the extension of a file that uses the specified compression algorithm. 390 func (compression *Compression) Extension() string { 391 switch *compression { 392 case Uncompressed: 393 return "tar" 394 case Bzip2: 395 return "tar.bz2" 396 case Gzip: 397 return "tar.gz" 398 case Xz: 399 return "tar.xz" 400 case Zstd: 401 return "tar.zst" 402 } 403 return "" 404 } 405 406 // FileInfoHeader creates a populated Header from fi. 407 // Compared to archive pkg this function fills in more information. 408 // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), 409 // which have been deleted since Go 1.9 archive/tar. 410 func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { 411 hdr, err := tar.FileInfoHeader(fi, link) 412 if err != nil { 413 return nil, err 414 } 415 hdr.Format = tar.FormatPAX 416 hdr.ModTime = hdr.ModTime.Truncate(time.Second) 417 hdr.AccessTime = time.Time{} 418 hdr.ChangeTime = time.Time{} 419 hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) 420 hdr.Name = canonicalTarName(name, fi.IsDir()) 421 if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { 422 return nil, err 423 } 424 return hdr, nil 425 } 426 427 // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar 428 // https://github.com/golang/go/commit/66b5a2f 429 func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { 430 fm := fi.Mode() 431 switch { 432 case fm.IsRegular(): 433 mode |= modeISREG 434 case fi.IsDir(): 435 mode |= modeISDIR 436 case fm&os.ModeSymlink != 0: 437 mode |= modeISLNK 438 case fm&os.ModeDevice != 0: 439 if fm&os.ModeCharDevice != 0 { 440 mode |= modeISCHR 441 } else { 442 mode |= modeISBLK 443 } 444 case fm&os.ModeNamedPipe != 0: 445 mode |= modeISFIFO 446 case fm&os.ModeSocket != 0: 447 mode |= modeISSOCK 448 } 449 return mode 450 } 451 452 // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem 453 // to a tar header 454 func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { 455 const ( 456 // Values based on linux/include/uapi/linux/capability.h 457 xattrCapsSz2 = 20 458 versionOffset = 3 459 vfsCapRevision2 = 2 460 vfsCapRevision3 = 3 461 ) 462 capability, _ := system.Lgetxattr(path, "security.capability") 463 if capability != nil { 464 length := len(capability) 465 if capability[versionOffset] == vfsCapRevision3 { 466 // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no 467 // sense outside the user namespace the archive is built in. 468 capability[versionOffset] = vfsCapRevision2 469 length = xattrCapsSz2 470 } 471 hdr.Xattrs = make(map[string]string) 472 hdr.Xattrs["security.capability"] = string(capability[:length]) 473 } 474 return nil 475 } 476 477 type tarWhiteoutConverter interface { 478 ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) 479 ConvertRead(*tar.Header, string) (bool, error) 480 } 481 482 type tarAppender struct { 483 TarWriter *tar.Writer 484 Buffer *bufio.Writer 485 486 // for hardlink mapping 487 SeenFiles map[uint64]string 488 IdentityMapping *idtools.IdentityMapping 489 ChownOpts *idtools.Identity 490 491 // For packing and unpacking whiteout files in the 492 // non standard format. The whiteout files defined 493 // by the AUFS standard are used as the tar whiteout 494 // standard. 495 WhiteoutConverter tarWhiteoutConverter 496 } 497 498 func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { 499 return &tarAppender{ 500 SeenFiles: make(map[uint64]string), 501 TarWriter: tar.NewWriter(writer), 502 Buffer: pools.BufioWriter32KPool.Get(nil), 503 IdentityMapping: idMapping, 504 ChownOpts: chownOpts, 505 } 506 } 507 508 // canonicalTarName provides a platform-independent and consistent posix-style 509 // path for files and directories to be archived regardless of the platform. 510 func canonicalTarName(name string, isDir bool) string { 511 name = CanonicalTarNameForPath(name) 512 513 // suffix with '/' for directories 514 if isDir && !strings.HasSuffix(name, "/") { 515 name += "/" 516 } 517 return name 518 } 519 520 // addTarFile adds to the tar archive a file from `path` as `name` 521 func (ta *tarAppender) addTarFile(path, name string) error { 522 fi, err := os.Lstat(path) 523 if err != nil { 524 return err 525 } 526 527 var link string 528 if fi.Mode()&os.ModeSymlink != 0 { 529 var err error 530 link, err = os.Readlink(path) 531 if err != nil { 532 return err 533 } 534 } 535 536 hdr, err := FileInfoHeader(name, fi, link) 537 if err != nil { 538 return err 539 } 540 if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { 541 return err 542 } 543 544 // if it's not a directory and has more than 1 link, 545 // it's hard linked, so set the type flag accordingly 546 if !fi.IsDir() && hasHardlinks(fi) { 547 inode, err := getInodeFromStat(fi.Sys()) 548 if err != nil { 549 return err 550 } 551 // a link should have a name that it links too 552 // and that linked name should be first in the tar archive 553 if oldpath, ok := ta.SeenFiles[inode]; ok { 554 hdr.Typeflag = tar.TypeLink 555 hdr.Linkname = oldpath 556 hdr.Size = 0 // This Must be here for the writer math to add up! 557 } else { 558 ta.SeenFiles[inode] = name 559 } 560 } 561 562 // check whether the file is overlayfs whiteout 563 // if yes, skip re-mapping container ID mappings. 564 isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 565 566 // handle re-mapping container ID mappings back to host ID mappings before 567 // writing tar headers/files. We skip whiteout files because they were written 568 // by the kernel and already have proper ownership relative to the host 569 if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { 570 fileIDPair, err := getFileUIDGID(fi.Sys()) 571 if err != nil { 572 return err 573 } 574 hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) 575 if err != nil { 576 return err 577 } 578 } 579 580 // explicitly override with ChownOpts 581 if ta.ChownOpts != nil { 582 hdr.Uid = ta.ChownOpts.UID 583 hdr.Gid = ta.ChownOpts.GID 584 } 585 586 if ta.WhiteoutConverter != nil { 587 wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) 588 if err != nil { 589 return err 590 } 591 592 // If a new whiteout file exists, write original hdr, then 593 // replace hdr with wo to be written after. Whiteouts should 594 // always be written after the original. Note the original 595 // hdr may have been updated to be a whiteout with returning 596 // a whiteout header 597 if wo != nil { 598 if err := ta.TarWriter.WriteHeader(hdr); err != nil { 599 return err 600 } 601 if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { 602 return fmt.Errorf("tar: cannot use whiteout for non-empty file") 603 } 604 hdr = wo 605 } 606 } 607 608 if err := ta.TarWriter.WriteHeader(hdr); err != nil { 609 return err 610 } 611 612 if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { 613 // We use system.OpenSequential to ensure we use sequential file 614 // access on Windows to avoid depleting the standby list. 615 // On Linux, this equates to a regular os.Open. 616 file, err := system.OpenSequential(path) 617 if err != nil { 618 return err 619 } 620 621 ta.Buffer.Reset(ta.TarWriter) 622 defer ta.Buffer.Reset(nil) 623 _, err = io.Copy(ta.Buffer, file) 624 file.Close() 625 if err != nil { 626 return err 627 } 628 err = ta.Buffer.Flush() 629 if err != nil { 630 return err 631 } 632 } 633 634 return nil 635 } 636 637 func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { 638 // hdr.Mode is in linux format, which we can use for sycalls, 639 // but for os.Foo() calls we need the mode converted to os.FileMode, 640 // so use hdrInfo.Mode() (they differ for e.g. setuid bits) 641 hdrInfo := hdr.FileInfo() 642 643 switch hdr.Typeflag { 644 case tar.TypeDir: 645 // Create directory unless it exists as a directory already. 646 // In that case we just want to merge the two 647 if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { 648 if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { 649 return err 650 } 651 } 652 653 case tar.TypeReg, tar.TypeRegA: 654 // Source is regular file. We use system.OpenFileSequential to use sequential 655 // file access to avoid depleting the standby list on Windows. 656 // On Linux, this equates to a regular os.OpenFile 657 file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) 658 if err != nil { 659 return err 660 } 661 if _, err := io.Copy(file, reader); err != nil { 662 file.Close() 663 return err 664 } 665 file.Close() 666 667 case tar.TypeBlock, tar.TypeChar: 668 if inUserns { // cannot create devices in a userns 669 return nil 670 } 671 // Handle this is an OS-specific way 672 if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { 673 return err 674 } 675 676 case tar.TypeFifo: 677 // Handle this is an OS-specific way 678 if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { 679 return err 680 } 681 682 case tar.TypeLink: 683 targetPath := filepath.Join(extractDir, hdr.Linkname) 684 // check for hardlink breakout 685 if !strings.HasPrefix(targetPath, extractDir) { 686 return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) 687 } 688 if err := os.Link(targetPath, path); err != nil { 689 return err 690 } 691 692 case tar.TypeSymlink: 693 // path -> hdr.Linkname = targetPath 694 // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file 695 targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) 696 697 // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because 698 // that symlink would first have to be created, which would be caught earlier, at this very check: 699 if !strings.HasPrefix(targetPath, extractDir) { 700 return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) 701 } 702 if err := os.Symlink(hdr.Linkname, path); err != nil { 703 return err 704 } 705 706 case tar.TypeXGlobalHeader: 707 logrus.Debug("PAX Global Extended Headers found and ignored") 708 return nil 709 710 default: 711 return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) 712 } 713 714 // Lchown is not supported on Windows. 715 if Lchown && runtime.GOOS != "windows" { 716 if chownOpts == nil { 717 chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} 718 } 719 if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { 720 return err 721 } 722 } 723 724 var errors []string 725 for key, value := range hdr.Xattrs { 726 if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { 727 if err == syscall.ENOTSUP || err == syscall.EPERM { 728 // We ignore errors here because not all graphdrivers support 729 // xattrs *cough* old versions of AUFS *cough*. However only 730 // ENOTSUP should be emitted in that case, otherwise we still 731 // bail. 732 // EPERM occurs if modifying xattrs is not allowed. This can 733 // happen when running in userns with restrictions (ChromeOS). 734 errors = append(errors, err.Error()) 735 continue 736 } 737 return err 738 } 739 740 } 741 742 if len(errors) > 0 { 743 logrus.WithFields(logrus.Fields{ 744 "errors": errors, 745 }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") 746 } 747 748 // There is no LChmod, so ignore mode for symlink. Also, this 749 // must happen after chown, as that can modify the file mode 750 if err := handleLChmod(hdr, path, hdrInfo); err != nil { 751 return err 752 } 753 754 aTime := hdr.AccessTime 755 if aTime.Before(hdr.ModTime) { 756 // Last access time should never be before last modified time. 757 aTime = hdr.ModTime 758 } 759 760 // system.Chtimes doesn't support a NOFOLLOW flag atm 761 if hdr.Typeflag == tar.TypeLink { 762 if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { 763 if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { 764 return err 765 } 766 } 767 } else if hdr.Typeflag != tar.TypeSymlink { 768 if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { 769 return err 770 } 771 } else { 772 ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} 773 if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { 774 return err 775 } 776 } 777 return nil 778 } 779 780 // Tar creates an archive from the directory at `path`, and returns it as a 781 // stream of bytes. 782 func Tar(path string, compression Compression) (io.ReadCloser, error) { 783 return TarWithOptions(path, &TarOptions{Compression: compression}) 784 } 785 786 // TarWithOptions creates an archive from the directory at `path`, only including files whose relative 787 // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. 788 func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { 789 790 // Fix the source path to work with long path names. This is a no-op 791 // on platforms other than Windows. 792 srcPath = fixVolumePathPrefix(srcPath) 793 794 pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) 795 if err != nil { 796 return nil, err 797 } 798 799 pipeReader, pipeWriter := io.Pipe() 800 801 compressWriter, err := CompressStream(pipeWriter, options.Compression) 802 if err != nil { 803 return nil, err 804 } 805 806 whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) 807 if err != nil { 808 return nil, err 809 } 810 811 go func() { 812 ta := newTarAppender( 813 idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), 814 compressWriter, 815 options.ChownOpts, 816 ) 817 ta.WhiteoutConverter = whiteoutConverter 818 819 defer func() { 820 // Make sure to check the error on Close. 821 if err := ta.TarWriter.Close(); err != nil { 822 logrus.Errorf("Can't close tar writer: %s", err) 823 } 824 if err := compressWriter.Close(); err != nil { 825 logrus.Errorf("Can't close compress writer: %s", err) 826 } 827 if err := pipeWriter.Close(); err != nil { 828 logrus.Errorf("Can't close pipe writer: %s", err) 829 } 830 }() 831 832 // this buffer is needed for the duration of this piped stream 833 defer pools.BufioWriter32KPool.Put(ta.Buffer) 834 835 // In general we log errors here but ignore them because 836 // during e.g. a diff operation the container can continue 837 // mutating the filesystem and we can see transient errors 838 // from this 839 840 stat, err := os.Lstat(srcPath) 841 if err != nil { 842 return 843 } 844 845 if !stat.IsDir() { 846 // We can't later join a non-dir with any includes because the 847 // 'walk' will error if "file/." is stat-ed and "file" is not a 848 // directory. So, we must split the source path and use the 849 // basename as the include. 850 if len(options.IncludeFiles) > 0 { 851 logrus.Warn("Tar: Can't archive a file with includes") 852 } 853 854 dir, base := SplitPathDirEntry(srcPath) 855 srcPath = dir 856 options.IncludeFiles = []string{base} 857 } 858 859 if len(options.IncludeFiles) == 0 { 860 options.IncludeFiles = []string{"."} 861 } 862 863 seen := make(map[string]bool) 864 865 for _, include := range options.IncludeFiles { 866 rebaseName := options.RebaseNames[include] 867 868 var ( 869 parentMatchInfo []fileutils.MatchInfo 870 parentDirs []string 871 ) 872 873 walkRoot := getWalkRoot(srcPath, include) 874 filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { 875 if err != nil { 876 logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) 877 return nil 878 } 879 880 relFilePath, err := filepath.Rel(srcPath, filePath) 881 if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { 882 // Error getting relative path OR we are looking 883 // at the source directory path. Skip in both situations. 884 return nil 885 } 886 887 if options.IncludeSourceDir && include == "." && relFilePath != "." { 888 relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) 889 } 890 891 skip := false 892 893 // If "include" is an exact match for the current file 894 // then even if there's an "excludePatterns" pattern that 895 // matches it, don't skip it. IOW, assume an explicit 'include' 896 // is asking for that file no matter what - which is true 897 // for some files, like .dockerignore and Dockerfile (sometimes) 898 if include != relFilePath { 899 for len(parentDirs) != 0 { 900 lastParentDir := parentDirs[len(parentDirs)-1] 901 if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { 902 break 903 } 904 parentDirs = parentDirs[:len(parentDirs)-1] 905 parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1] 906 } 907 908 var matchInfo fileutils.MatchInfo 909 if len(parentMatchInfo) != 0 { 910 skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1]) 911 } else { 912 skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, fileutils.MatchInfo{}) 913 } 914 if err != nil { 915 logrus.Errorf("Error matching %s: %v", relFilePath, err) 916 return err 917 } 918 919 if f.IsDir() { 920 parentDirs = append(parentDirs, relFilePath) 921 parentMatchInfo = append(parentMatchInfo, matchInfo) 922 } 923 } 924 925 if skip { 926 // If we want to skip this file and its a directory 927 // then we should first check to see if there's an 928 // excludes pattern (e.g. !dir/file) that starts with this 929 // dir. If so then we can't skip this dir. 930 931 // Its not a dir then so we can just return/skip. 932 if !f.IsDir() { 933 return nil 934 } 935 936 // No exceptions (!...) in patterns so just skip dir 937 if !pm.Exclusions() { 938 return filepath.SkipDir 939 } 940 941 dirSlash := relFilePath + string(filepath.Separator) 942 943 for _, pat := range pm.Patterns() { 944 if !pat.Exclusion() { 945 continue 946 } 947 if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { 948 // found a match - so can't skip this dir 949 return nil 950 } 951 } 952 953 // No matching exclusion dir so just skip dir 954 return filepath.SkipDir 955 } 956 957 if seen[relFilePath] { 958 return nil 959 } 960 seen[relFilePath] = true 961 962 // Rename the base resource. 963 if rebaseName != "" { 964 var replacement string 965 if rebaseName != string(filepath.Separator) { 966 // Special case the root directory to replace with an 967 // empty string instead so that we don't end up with 968 // double slashes in the paths. 969 replacement = rebaseName 970 } 971 972 relFilePath = strings.Replace(relFilePath, include, replacement, 1) 973 } 974 975 if err := ta.addTarFile(filePath, relFilePath); err != nil { 976 logrus.Errorf("Can't add file %s to tar: %s", filePath, err) 977 // if pipe is broken, stop writing tar stream to it 978 if err == io.ErrClosedPipe { 979 return err 980 } 981 } 982 return nil 983 }) 984 } 985 }() 986 987 return pipeReader, nil 988 } 989 990 // Unpack unpacks the decompressedArchive to dest with options. 991 func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { 992 tr := tar.NewReader(decompressedArchive) 993 trBuf := pools.BufioReader32KPool.Get(nil) 994 defer pools.BufioReader32KPool.Put(trBuf) 995 996 var dirs []*tar.Header 997 idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) 998 rootIDs := idMapping.RootPair() 999 whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) 1000 if err != nil { 1001 return err 1002 } 1003 1004 // Iterate through the files in the archive. 1005 loop: 1006 for { 1007 hdr, err := tr.Next() 1008 if err == io.EOF { 1009 // end of tar archive 1010 break 1011 } 1012 if err != nil { 1013 return err 1014 } 1015 1016 // ignore XGlobalHeader early to avoid creating parent directories for them 1017 if hdr.Typeflag == tar.TypeXGlobalHeader { 1018 logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) 1019 continue 1020 } 1021 1022 // Normalize name, for safety and for a simple is-root check 1023 // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: 1024 // This keeps "..\" as-is, but normalizes "\..\" to "\". 1025 hdr.Name = filepath.Clean(hdr.Name) 1026 1027 for _, exclude := range options.ExcludePatterns { 1028 if strings.HasPrefix(hdr.Name, exclude) { 1029 continue loop 1030 } 1031 } 1032 1033 // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in 1034 // the filepath format for the OS on which the daemon is running. Hence 1035 // the check for a slash-suffix MUST be done in an OS-agnostic way. 1036 if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { 1037 // Not the root directory, ensure that the parent directory exists 1038 parent := filepath.Dir(hdr.Name) 1039 parentPath := filepath.Join(dest, parent) 1040 if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { 1041 err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) 1042 if err != nil { 1043 return err 1044 } 1045 } 1046 } 1047 1048 path := filepath.Join(dest, hdr.Name) 1049 rel, err := filepath.Rel(dest, path) 1050 if err != nil { 1051 return err 1052 } 1053 if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { 1054 return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) 1055 } 1056 1057 // If path exits we almost always just want to remove and replace it 1058 // The only exception is when it is a directory *and* the file from 1059 // the layer is also a directory. Then we want to merge them (i.e. 1060 // just apply the metadata from the layer). 1061 if fi, err := os.Lstat(path); err == nil { 1062 if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { 1063 // If NoOverwriteDirNonDir is true then we cannot replace 1064 // an existing directory with a non-directory from the archive. 1065 return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) 1066 } 1067 1068 if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { 1069 // If NoOverwriteDirNonDir is true then we cannot replace 1070 // an existing non-directory with a directory from the archive. 1071 return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) 1072 } 1073 1074 if fi.IsDir() && hdr.Name == "." { 1075 continue 1076 } 1077 1078 if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { 1079 if err := os.RemoveAll(path); err != nil { 1080 return err 1081 } 1082 } 1083 } 1084 trBuf.Reset(tr) 1085 1086 if err := remapIDs(idMapping, hdr); err != nil { 1087 return err 1088 } 1089 1090 if whiteoutConverter != nil { 1091 writeFile, err := whiteoutConverter.ConvertRead(hdr, path) 1092 if err != nil { 1093 return err 1094 } 1095 if !writeFile { 1096 continue 1097 } 1098 } 1099 1100 if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { 1101 return err 1102 } 1103 1104 // Directory mtimes must be handled at the end to avoid further 1105 // file creation in them to modify the directory mtime 1106 if hdr.Typeflag == tar.TypeDir { 1107 dirs = append(dirs, hdr) 1108 } 1109 } 1110 1111 for _, hdr := range dirs { 1112 path := filepath.Join(dest, hdr.Name) 1113 1114 if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { 1115 return err 1116 } 1117 } 1118 return nil 1119 } 1120 1121 // Untar reads a stream of bytes from `archive`, parses it as a tar archive, 1122 // and unpacks it into the directory at `dest`. 1123 // The archive may be compressed with one of the following algorithms: 1124 // identity (uncompressed), gzip, bzip2, xz. 1125 // FIXME: specify behavior when target path exists vs. doesn't exist. 1126 func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { 1127 return untarHandler(tarArchive, dest, options, true) 1128 } 1129 1130 // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, 1131 // and unpacks it into the directory at `dest`. 1132 // The archive must be an uncompressed stream. 1133 func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { 1134 return untarHandler(tarArchive, dest, options, false) 1135 } 1136 1137 // Handler for teasing out the automatic decompression 1138 func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { 1139 if tarArchive == nil { 1140 return fmt.Errorf("Empty archive") 1141 } 1142 dest = filepath.Clean(dest) 1143 if options == nil { 1144 options = &TarOptions{} 1145 } 1146 if options.ExcludePatterns == nil { 1147 options.ExcludePatterns = []string{} 1148 } 1149 1150 r := tarArchive 1151 if decompress { 1152 decompressedArchive, err := DecompressStream(tarArchive) 1153 if err != nil { 1154 return err 1155 } 1156 defer decompressedArchive.Close() 1157 r = decompressedArchive 1158 } 1159 1160 return Unpack(r, dest, options) 1161 } 1162 1163 // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. 1164 // If either Tar or Untar fails, TarUntar aborts and returns the error. 1165 func (archiver *Archiver) TarUntar(src, dst string) error { 1166 archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) 1167 if err != nil { 1168 return err 1169 } 1170 defer archive.Close() 1171 options := &TarOptions{ 1172 UIDMaps: archiver.IDMapping.UIDs(), 1173 GIDMaps: archiver.IDMapping.GIDs(), 1174 } 1175 return archiver.Untar(archive, dst, options) 1176 } 1177 1178 // UntarPath untar a file from path to a destination, src is the source tar file path. 1179 func (archiver *Archiver) UntarPath(src, dst string) error { 1180 archive, err := os.Open(src) 1181 if err != nil { 1182 return err 1183 } 1184 defer archive.Close() 1185 options := &TarOptions{ 1186 UIDMaps: archiver.IDMapping.UIDs(), 1187 GIDMaps: archiver.IDMapping.GIDs(), 1188 } 1189 return archiver.Untar(archive, dst, options) 1190 } 1191 1192 // CopyWithTar creates a tar archive of filesystem path `src`, and 1193 // unpacks it at filesystem path `dst`. 1194 // The archive is streamed directly with fixed buffering and no 1195 // intermediary disk IO. 1196 func (archiver *Archiver) CopyWithTar(src, dst string) error { 1197 srcSt, err := os.Stat(src) 1198 if err != nil { 1199 return err 1200 } 1201 if !srcSt.IsDir() { 1202 return archiver.CopyFileWithTar(src, dst) 1203 } 1204 1205 // if this Archiver is set up with ID mapping we need to create 1206 // the new destination directory with the remapped root UID/GID pair 1207 // as owner 1208 rootIDs := archiver.IDMapping.RootPair() 1209 // Create dst, copy src's content into it 1210 if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { 1211 return err 1212 } 1213 return archiver.TarUntar(src, dst) 1214 } 1215 1216 // CopyFileWithTar emulates the behavior of the 'cp' command-line 1217 // for a single file. It copies a regular file from path `src` to 1218 // path `dst`, and preserves all its metadata. 1219 func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { 1220 srcSt, err := os.Stat(src) 1221 if err != nil { 1222 return err 1223 } 1224 1225 if srcSt.IsDir() { 1226 return fmt.Errorf("Can't copy a directory") 1227 } 1228 1229 // Clean up the trailing slash. This must be done in an operating 1230 // system specific manner. 1231 if dst[len(dst)-1] == os.PathSeparator { 1232 dst = filepath.Join(dst, filepath.Base(src)) 1233 } 1234 // Create the holding directory if necessary 1235 if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { 1236 return err 1237 } 1238 1239 r, w := io.Pipe() 1240 errC := make(chan error, 1) 1241 1242 go func() { 1243 defer close(errC) 1244 1245 errC <- func() error { 1246 defer w.Close() 1247 1248 srcF, err := os.Open(src) 1249 if err != nil { 1250 return err 1251 } 1252 defer srcF.Close() 1253 1254 hdr, err := tar.FileInfoHeader(srcSt, "") 1255 if err != nil { 1256 return err 1257 } 1258 hdr.Format = tar.FormatPAX 1259 hdr.ModTime = hdr.ModTime.Truncate(time.Second) 1260 hdr.AccessTime = time.Time{} 1261 hdr.ChangeTime = time.Time{} 1262 hdr.Name = filepath.Base(dst) 1263 hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) 1264 1265 if err := remapIDs(archiver.IDMapping, hdr); err != nil { 1266 return err 1267 } 1268 1269 tw := tar.NewWriter(w) 1270 defer tw.Close() 1271 if err := tw.WriteHeader(hdr); err != nil { 1272 return err 1273 } 1274 if _, err := io.Copy(tw, srcF); err != nil { 1275 return err 1276 } 1277 return nil 1278 }() 1279 }() 1280 defer func() { 1281 if er := <-errC; err == nil && er != nil { 1282 err = er 1283 } 1284 }() 1285 1286 err = archiver.Untar(r, filepath.Dir(dst), nil) 1287 if err != nil { 1288 r.CloseWithError(err) 1289 } 1290 return err 1291 } 1292 1293 // IdentityMapping returns the IdentityMapping of the archiver. 1294 func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { 1295 return archiver.IDMapping 1296 } 1297 1298 func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { 1299 ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) 1300 hdr.Uid, hdr.Gid = ids.UID, ids.GID 1301 return err 1302 } 1303 1304 // cmdStream executes a command, and returns its stdout as a stream. 1305 // If the command fails to run or doesn't complete successfully, an error 1306 // will be returned, including anything written on stderr. 1307 func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { 1308 cmd.Stdin = input 1309 pipeR, pipeW := io.Pipe() 1310 cmd.Stdout = pipeW 1311 var errBuf bytes.Buffer 1312 cmd.Stderr = &errBuf 1313 1314 // Run the command and return the pipe 1315 if err := cmd.Start(); err != nil { 1316 return nil, err 1317 } 1318 1319 // Ensure the command has exited before we clean anything up 1320 done := make(chan struct{}) 1321 1322 // Copy stdout to the returned pipe 1323 go func() { 1324 if err := cmd.Wait(); err != nil { 1325 pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) 1326 } else { 1327 pipeW.Close() 1328 } 1329 close(done) 1330 }() 1331 1332 return ioutils.NewReadCloserWrapper(pipeR, func() error { 1333 // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as 1334 // cmd.Wait waits for any non-file stdout/stderr/stdin to close. 1335 err := pipeR.Close() 1336 <-done 1337 return err 1338 }), nil 1339 } 1340 1341 // NewTempArchive reads the content of src into a temporary file, and returns the contents 1342 // of that file as an archive. The archive can only be read once - as soon as reading completes, 1343 // the file will be deleted. 1344 func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { 1345 f, err := os.CreateTemp(dir, "") 1346 if err != nil { 1347 return nil, err 1348 } 1349 if _, err := io.Copy(f, src); err != nil { 1350 return nil, err 1351 } 1352 if _, err := f.Seek(0, 0); err != nil { 1353 return nil, err 1354 } 1355 st, err := f.Stat() 1356 if err != nil { 1357 return nil, err 1358 } 1359 size := st.Size() 1360 return &TempArchive{File: f, Size: size}, nil 1361 } 1362 1363 // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, 1364 // the file will be deleted. 1365 type TempArchive struct { 1366 *os.File 1367 Size int64 // Pre-computed from Stat().Size() as a convenience 1368 read int64 1369 closed bool 1370 } 1371 1372 // Close closes the underlying file if it's still open, or does a no-op 1373 // to allow callers to try to close the TempArchive multiple times safely. 1374 func (archive *TempArchive) Close() error { 1375 if archive.closed { 1376 return nil 1377 } 1378 1379 archive.closed = true 1380 1381 return archive.File.Close() 1382 } 1383 1384 func (archive *TempArchive) Read(data []byte) (int, error) { 1385 n, err := archive.File.Read(data) 1386 archive.read += int64(n) 1387 if err != nil || archive.read == archive.Size { 1388 archive.Close() 1389 os.Remove(archive.File.Name()) 1390 } 1391 return n, err 1392 }