github.com/adityamillind98/moby@v23.0.0-rc.4+incompatible/pkg/archive/archive.go (about) 1 package archive // import "github.com/docker/docker/pkg/archive" 2 3 import ( 4 "archive/tar" 5 "bufio" 6 "bytes" 7 "compress/bzip2" 8 "compress/gzip" 9 "context" 10 "encoding/binary" 11 "fmt" 12 "io" 13 "os" 14 "path/filepath" 15 "runtime" 16 "strconv" 17 "strings" 18 "syscall" 19 "time" 20 21 "github.com/containerd/containerd/pkg/userns" 22 "github.com/docker/docker/pkg/idtools" 23 "github.com/docker/docker/pkg/ioutils" 24 "github.com/docker/docker/pkg/pools" 25 "github.com/docker/docker/pkg/system" 26 "github.com/klauspost/compress/zstd" 27 "github.com/moby/patternmatcher" 28 "github.com/moby/sys/sequential" 29 "github.com/pkg/errors" 30 "github.com/sirupsen/logrus" 31 exec "golang.org/x/sys/execabs" 32 ) 33 34 // ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a 35 // tar, but that do not have their own header entry. 36 // 37 // The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not 38 // proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and 39 // a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755. 40 // 41 // This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is 42 // subject to change in Moby at any time -- image authors who require consistent or known directory permissions 43 // should explicitly control them by ensuring that header entries exist for any applicable path. 44 const ImpliedDirectoryMode = 0755 45 46 type ( 47 // Compression is the state represents if compressed or not. 48 Compression int 49 // WhiteoutFormat is the format of whiteouts unpacked 50 WhiteoutFormat int 51 52 // TarOptions wraps the tar options. 53 TarOptions struct { 54 IncludeFiles []string 55 ExcludePatterns []string 56 Compression Compression 57 NoLchown bool 58 IDMap idtools.IdentityMapping 59 ChownOpts *idtools.Identity 60 IncludeSourceDir bool 61 // WhiteoutFormat is the expected on disk format for whiteout files. 62 // This format will be converted to the standard format on pack 63 // and from the standard format on unpack. 64 WhiteoutFormat WhiteoutFormat 65 // When unpacking, specifies whether overwriting a directory with a 66 // non-directory is allowed and vice versa. 67 NoOverwriteDirNonDir bool 68 // For each include when creating an archive, the included name will be 69 // replaced with the matching name from this map. 70 RebaseNames map[string]string 71 InUserNS bool 72 } 73 ) 74 75 // Archiver implements the Archiver interface and allows the reuse of most utility functions of 76 // this package with a pluggable Untar function. Also, to facilitate the passing of specific id 77 // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. 78 type Archiver struct { 79 Untar func(io.Reader, string, *TarOptions) error 80 IDMapping idtools.IdentityMapping 81 } 82 83 // NewDefaultArchiver returns a new Archiver without any IdentityMapping 84 func NewDefaultArchiver() *Archiver { 85 return &Archiver{Untar: Untar} 86 } 87 88 // breakoutError is used to differentiate errors related to breaking out 89 // When testing archive breakout in the unit tests, this error is expected 90 // in order for the test to pass. 91 type breakoutError error 92 93 const ( 94 // Uncompressed represents the uncompressed. 95 Uncompressed Compression = iota 96 // Bzip2 is bzip2 compression algorithm. 97 Bzip2 98 // Gzip is gzip compression algorithm. 99 Gzip 100 // Xz is xz compression algorithm. 101 Xz 102 // Zstd is zstd compression algorithm. 103 Zstd 104 ) 105 106 const ( 107 // AUFSWhiteoutFormat is the default format for whiteouts 108 AUFSWhiteoutFormat WhiteoutFormat = iota 109 // OverlayWhiteoutFormat formats whiteout according to the overlay 110 // standard. 111 OverlayWhiteoutFormat 112 ) 113 114 const ( 115 modeISDIR = 040000 // Directory 116 modeISFIFO = 010000 // FIFO 117 modeISREG = 0100000 // Regular file 118 modeISLNK = 0120000 // Symbolic link 119 modeISBLK = 060000 // Block special file 120 modeISCHR = 020000 // Character special file 121 modeISSOCK = 0140000 // Socket 122 ) 123 124 // IsArchivePath checks if the (possibly compressed) file at the given path 125 // starts with a tar file header. 126 func IsArchivePath(path string) bool { 127 file, err := os.Open(path) 128 if err != nil { 129 return false 130 } 131 defer file.Close() 132 rdr, err := DecompressStream(file) 133 if err != nil { 134 return false 135 } 136 defer rdr.Close() 137 r := tar.NewReader(rdr) 138 _, err = r.Next() 139 return err == nil 140 } 141 142 const ( 143 zstdMagicSkippableStart = 0x184D2A50 144 zstdMagicSkippableMask = 0xFFFFFFF0 145 ) 146 147 var ( 148 bzip2Magic = []byte{0x42, 0x5A, 0x68} 149 gzipMagic = []byte{0x1F, 0x8B, 0x08} 150 xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} 151 zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} 152 ) 153 154 type matcher = func([]byte) bool 155 156 func magicNumberMatcher(m []byte) matcher { 157 return func(source []byte) bool { 158 return bytes.HasPrefix(source, m) 159 } 160 } 161 162 // zstdMatcher detects zstd compression algorithm. 163 // Zstandard compressed data is made of one or more frames. 164 // There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. 165 // See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. 166 func zstdMatcher() matcher { 167 return func(source []byte) bool { 168 if bytes.HasPrefix(source, zstdMagic) { 169 // Zstandard frame 170 return true 171 } 172 // skippable frame 173 if len(source) < 8 { 174 return false 175 } 176 // magic number from 0x184D2A50 to 0x184D2A5F. 177 if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { 178 return true 179 } 180 return false 181 } 182 } 183 184 // DetectCompression detects the compression algorithm of the source. 185 func DetectCompression(source []byte) Compression { 186 compressionMap := map[Compression]matcher{ 187 Bzip2: magicNumberMatcher(bzip2Magic), 188 Gzip: magicNumberMatcher(gzipMagic), 189 Xz: magicNumberMatcher(xzMagic), 190 Zstd: zstdMatcher(), 191 } 192 for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { 193 fn := compressionMap[compression] 194 if fn(source) { 195 return compression 196 } 197 } 198 return Uncompressed 199 } 200 201 func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { 202 args := []string{"xz", "-d", "-c", "-q"} 203 204 return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) 205 } 206 207 func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { 208 if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { 209 noPigz, err := strconv.ParseBool(noPigzEnv) 210 if err != nil { 211 logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") 212 } 213 if noPigz { 214 logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) 215 return gzip.NewReader(buf) 216 } 217 } 218 219 unpigzPath, err := exec.LookPath("unpigz") 220 if err != nil { 221 logrus.Debugf("unpigz binary not found, falling back to go gzip library") 222 return gzip.NewReader(buf) 223 } 224 225 logrus.Debugf("Using %s to decompress", unpigzPath) 226 227 return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) 228 } 229 230 func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { 231 return ioutils.NewReadCloserWrapper(readBuf, func() error { 232 cancel() 233 return readBuf.Close() 234 }) 235 } 236 237 // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. 238 func DecompressStream(archive io.Reader) (io.ReadCloser, error) { 239 p := pools.BufioReader32KPool 240 buf := p.Get(archive) 241 bs, err := buf.Peek(10) 242 if err != nil && err != io.EOF { 243 // Note: we'll ignore any io.EOF error because there are some odd 244 // cases where the layer.tar file will be empty (zero bytes) and 245 // that results in an io.EOF from the Peek() call. So, in those 246 // cases we'll just treat it as a non-compressed stream and 247 // that means just create an empty layer. 248 // See Issue 18170 249 return nil, err 250 } 251 252 compression := DetectCompression(bs) 253 switch compression { 254 case Uncompressed: 255 readBufWrapper := p.NewReadCloserWrapper(buf, buf) 256 return readBufWrapper, nil 257 case Gzip: 258 ctx, cancel := context.WithCancel(context.Background()) 259 260 gzReader, err := gzDecompress(ctx, buf) 261 if err != nil { 262 cancel() 263 return nil, err 264 } 265 readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) 266 return wrapReadCloser(readBufWrapper, cancel), nil 267 case Bzip2: 268 bz2Reader := bzip2.NewReader(buf) 269 readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) 270 return readBufWrapper, nil 271 case Xz: 272 ctx, cancel := context.WithCancel(context.Background()) 273 274 xzReader, err := xzDecompress(ctx, buf) 275 if err != nil { 276 cancel() 277 return nil, err 278 } 279 readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) 280 return wrapReadCloser(readBufWrapper, cancel), nil 281 case Zstd: 282 zstdReader, err := zstd.NewReader(buf) 283 if err != nil { 284 return nil, err 285 } 286 readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) 287 return readBufWrapper, nil 288 default: 289 return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) 290 } 291 } 292 293 // CompressStream compresses the dest with specified compression algorithm. 294 func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { 295 p := pools.BufioWriter32KPool 296 buf := p.Get(dest) 297 switch compression { 298 case Uncompressed: 299 writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) 300 return writeBufWrapper, nil 301 case Gzip: 302 gzWriter := gzip.NewWriter(dest) 303 writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) 304 return writeBufWrapper, nil 305 case Bzip2, Xz: 306 // archive/bzip2 does not support writing, and there is no xz support at all 307 // However, this is not a problem as docker only currently generates gzipped tars 308 return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) 309 default: 310 return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) 311 } 312 } 313 314 // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to 315 // modify the contents or header of an entry in the archive. If the file already 316 // exists in the archive the TarModifierFunc will be called with the Header and 317 // a reader which will return the files content. If the file does not exist both 318 // header and content will be nil. 319 type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) 320 321 // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the 322 // tar stream are modified if they match any of the keys in mods. 323 func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { 324 pipeReader, pipeWriter := io.Pipe() 325 326 go func() { 327 tarReader := tar.NewReader(inputTarStream) 328 tarWriter := tar.NewWriter(pipeWriter) 329 defer inputTarStream.Close() 330 defer tarWriter.Close() 331 332 modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { 333 header, data, err := modifier(name, original, tarReader) 334 switch { 335 case err != nil: 336 return err 337 case header == nil: 338 return nil 339 } 340 341 if header.Name == "" { 342 header.Name = name 343 } 344 header.Size = int64(len(data)) 345 if err := tarWriter.WriteHeader(header); err != nil { 346 return err 347 } 348 if len(data) != 0 { 349 if _, err := tarWriter.Write(data); err != nil { 350 return err 351 } 352 } 353 return nil 354 } 355 356 var err error 357 var originalHeader *tar.Header 358 for { 359 originalHeader, err = tarReader.Next() 360 if err == io.EOF { 361 break 362 } 363 if err != nil { 364 pipeWriter.CloseWithError(err) 365 return 366 } 367 368 modifier, ok := mods[originalHeader.Name] 369 if !ok { 370 // No modifiers for this file, copy the header and data 371 if err := tarWriter.WriteHeader(originalHeader); err != nil { 372 pipeWriter.CloseWithError(err) 373 return 374 } 375 if _, err := pools.Copy(tarWriter, tarReader); err != nil { 376 pipeWriter.CloseWithError(err) 377 return 378 } 379 continue 380 } 381 delete(mods, originalHeader.Name) 382 383 if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { 384 pipeWriter.CloseWithError(err) 385 return 386 } 387 } 388 389 // Apply the modifiers that haven't matched any files in the archive 390 for name, modifier := range mods { 391 if err := modify(name, nil, modifier, nil); err != nil { 392 pipeWriter.CloseWithError(err) 393 return 394 } 395 } 396 397 pipeWriter.Close() 398 }() 399 return pipeReader 400 } 401 402 // Extension returns the extension of a file that uses the specified compression algorithm. 403 func (compression *Compression) Extension() string { 404 switch *compression { 405 case Uncompressed: 406 return "tar" 407 case Bzip2: 408 return "tar.bz2" 409 case Gzip: 410 return "tar.gz" 411 case Xz: 412 return "tar.xz" 413 case Zstd: 414 return "tar.zst" 415 } 416 return "" 417 } 418 419 // nosysFileInfo hides the system-dependent info of the wrapped FileInfo to 420 // prevent tar.FileInfoHeader from introspecting it and potentially calling into 421 // glibc. 422 type nosysFileInfo struct { 423 os.FileInfo 424 } 425 426 func (fi nosysFileInfo) Sys() interface{} { 427 // A Sys value of type *tar.Header is safe as it is system-independent. 428 // The tar.FileInfoHeader function copies the fields into the returned 429 // header without performing any OS lookups. 430 if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { 431 return sys 432 } 433 return nil 434 } 435 436 // sysStat, if non-nil, populates hdr from system-dependent fields of fi. 437 var sysStat func(fi os.FileInfo, hdr *tar.Header) error 438 439 // FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. 440 // 441 // Compared to the archive/tar.FileInfoHeader function, this function is safe to 442 // call from a chrooted process as it does not populate fields which would 443 // require operating system lookups. It behaves identically to 444 // tar.FileInfoHeader when fi is a FileInfo value returned from 445 // tar.Header.FileInfo(). 446 // 447 // When fi is a FileInfo for a native file, such as returned from os.Stat() and 448 // os.Lstat(), the returned Header value differs from one returned from 449 // tar.FileInfoHeader in the following ways. The Uname and Gname fields are not 450 // set as OS lookups would be required to populate them. The AccessTime and 451 // ChangeTime fields are not currently set (not yet implemented) although that 452 // is subject to change. Callers which require the AccessTime or ChangeTime 453 // fields to be zeroed should explicitly zero them out in the returned Header 454 // value to avoid any compatibility issues in the future. 455 func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { 456 hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) 457 if err != nil { 458 return nil, err 459 } 460 if sysStat != nil { 461 return hdr, sysStat(fi, hdr) 462 } 463 return hdr, nil 464 } 465 466 // FileInfoHeader creates a populated Header from fi. 467 // 468 // Compared to the archive/tar package, this function fills in less information 469 // but is safe to call from a chrooted process. The AccessTime and ChangeTime 470 // fields are not set in the returned header, ModTime is truncated to one-second 471 // precision, and the Uname and Gname fields are only set when fi is a FileInfo 472 // value returned from tar.Header.FileInfo(). Also, regardless of Go version, 473 // this function fills file type bits (e.g. hdr.Mode |= modeISDIR), which have 474 // been deleted since Go 1.9 archive/tar. 475 func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { 476 hdr, err := FileInfoHeaderNoLookups(fi, link) 477 if err != nil { 478 return nil, err 479 } 480 hdr.Format = tar.FormatPAX 481 hdr.ModTime = hdr.ModTime.Truncate(time.Second) 482 hdr.AccessTime = time.Time{} 483 hdr.ChangeTime = time.Time{} 484 hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) 485 hdr.Name = canonicalTarName(name, fi.IsDir()) 486 return hdr, nil 487 } 488 489 // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar 490 // https://github.com/golang/go/commit/66b5a2f 491 func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { 492 fm := fi.Mode() 493 switch { 494 case fm.IsRegular(): 495 mode |= modeISREG 496 case fi.IsDir(): 497 mode |= modeISDIR 498 case fm&os.ModeSymlink != 0: 499 mode |= modeISLNK 500 case fm&os.ModeDevice != 0: 501 if fm&os.ModeCharDevice != 0 { 502 mode |= modeISCHR 503 } else { 504 mode |= modeISBLK 505 } 506 case fm&os.ModeNamedPipe != 0: 507 mode |= modeISFIFO 508 case fm&os.ModeSocket != 0: 509 mode |= modeISSOCK 510 } 511 return mode 512 } 513 514 // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem 515 // to a tar header 516 func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { 517 const ( 518 // Values based on linux/include/uapi/linux/capability.h 519 xattrCapsSz2 = 20 520 versionOffset = 3 521 vfsCapRevision2 = 2 522 vfsCapRevision3 = 3 523 ) 524 capability, _ := system.Lgetxattr(path, "security.capability") 525 if capability != nil { 526 length := len(capability) 527 if capability[versionOffset] == vfsCapRevision3 { 528 // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no 529 // sense outside the user namespace the archive is built in. 530 capability[versionOffset] = vfsCapRevision2 531 length = xattrCapsSz2 532 } 533 hdr.Xattrs = make(map[string]string) 534 hdr.Xattrs["security.capability"] = string(capability[:length]) 535 } 536 return nil 537 } 538 539 type tarWhiteoutConverter interface { 540 ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) 541 ConvertRead(*tar.Header, string) (bool, error) 542 } 543 544 type tarAppender struct { 545 TarWriter *tar.Writer 546 Buffer *bufio.Writer 547 548 // for hardlink mapping 549 SeenFiles map[uint64]string 550 IdentityMapping idtools.IdentityMapping 551 ChownOpts *idtools.Identity 552 553 // For packing and unpacking whiteout files in the 554 // non standard format. The whiteout files defined 555 // by the AUFS standard are used as the tar whiteout 556 // standard. 557 WhiteoutConverter tarWhiteoutConverter 558 } 559 560 func newTarAppender(idMapping idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { 561 return &tarAppender{ 562 SeenFiles: make(map[uint64]string), 563 TarWriter: tar.NewWriter(writer), 564 Buffer: pools.BufioWriter32KPool.Get(nil), 565 IdentityMapping: idMapping, 566 ChownOpts: chownOpts, 567 } 568 } 569 570 // canonicalTarName provides a platform-independent and consistent posix-style 571 // path for files and directories to be archived regardless of the platform. 572 func canonicalTarName(name string, isDir bool) string { 573 name = CanonicalTarNameForPath(name) 574 575 // suffix with '/' for directories 576 if isDir && !strings.HasSuffix(name, "/") { 577 name += "/" 578 } 579 return name 580 } 581 582 // addTarFile adds to the tar archive a file from `path` as `name` 583 func (ta *tarAppender) addTarFile(path, name string) error { 584 fi, err := os.Lstat(path) 585 if err != nil { 586 return err 587 } 588 589 var link string 590 if fi.Mode()&os.ModeSymlink != 0 { 591 var err error 592 link, err = os.Readlink(path) 593 if err != nil { 594 return err 595 } 596 } 597 598 hdr, err := FileInfoHeader(name, fi, link) 599 if err != nil { 600 return err 601 } 602 if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { 603 return err 604 } 605 606 // if it's not a directory and has more than 1 link, 607 // it's hard linked, so set the type flag accordingly 608 if !fi.IsDir() && hasHardlinks(fi) { 609 inode, err := getInodeFromStat(fi.Sys()) 610 if err != nil { 611 return err 612 } 613 // a link should have a name that it links too 614 // and that linked name should be first in the tar archive 615 if oldpath, ok := ta.SeenFiles[inode]; ok { 616 hdr.Typeflag = tar.TypeLink 617 hdr.Linkname = oldpath 618 hdr.Size = 0 // This Must be here for the writer math to add up! 619 } else { 620 ta.SeenFiles[inode] = name 621 } 622 } 623 624 // check whether the file is overlayfs whiteout 625 // if yes, skip re-mapping container ID mappings. 626 isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 627 628 // handle re-mapping container ID mappings back to host ID mappings before 629 // writing tar headers/files. We skip whiteout files because they were written 630 // by the kernel and already have proper ownership relative to the host 631 if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { 632 fileIDPair, err := getFileUIDGID(fi.Sys()) 633 if err != nil { 634 return err 635 } 636 hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) 637 if err != nil { 638 return err 639 } 640 } 641 642 // explicitly override with ChownOpts 643 if ta.ChownOpts != nil { 644 hdr.Uid = ta.ChownOpts.UID 645 hdr.Gid = ta.ChownOpts.GID 646 } 647 648 if ta.WhiteoutConverter != nil { 649 wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) 650 if err != nil { 651 return err 652 } 653 654 // If a new whiteout file exists, write original hdr, then 655 // replace hdr with wo to be written after. Whiteouts should 656 // always be written after the original. Note the original 657 // hdr may have been updated to be a whiteout with returning 658 // a whiteout header 659 if wo != nil { 660 if err := ta.TarWriter.WriteHeader(hdr); err != nil { 661 return err 662 } 663 if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { 664 return fmt.Errorf("tar: cannot use whiteout for non-empty file") 665 } 666 hdr = wo 667 } 668 } 669 670 if err := ta.TarWriter.WriteHeader(hdr); err != nil { 671 return err 672 } 673 674 if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { 675 // We use sequential file access to avoid depleting the standby list on 676 // Windows. On Linux, this equates to a regular os.Open. 677 file, err := sequential.Open(path) 678 if err != nil { 679 return err 680 } 681 682 ta.Buffer.Reset(ta.TarWriter) 683 defer ta.Buffer.Reset(nil) 684 _, err = io.Copy(ta.Buffer, file) 685 file.Close() 686 if err != nil { 687 return err 688 } 689 err = ta.Buffer.Flush() 690 if err != nil { 691 return err 692 } 693 } 694 695 return nil 696 } 697 698 func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { 699 // hdr.Mode is in linux format, which we can use for sycalls, 700 // but for os.Foo() calls we need the mode converted to os.FileMode, 701 // so use hdrInfo.Mode() (they differ for e.g. setuid bits) 702 hdrInfo := hdr.FileInfo() 703 704 switch hdr.Typeflag { 705 case tar.TypeDir: 706 // Create directory unless it exists as a directory already. 707 // In that case we just want to merge the two 708 if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { 709 if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { 710 return err 711 } 712 } 713 714 case tar.TypeReg, tar.TypeRegA: 715 // Source is regular file. We use sequential file access to avoid depleting 716 // the standby list on Windows. On Linux, this equates to a regular os.OpenFile. 717 file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) 718 if err != nil { 719 return err 720 } 721 if _, err := io.Copy(file, reader); err != nil { 722 file.Close() 723 return err 724 } 725 file.Close() 726 727 case tar.TypeBlock, tar.TypeChar: 728 if inUserns { // cannot create devices in a userns 729 return nil 730 } 731 // Handle this is an OS-specific way 732 if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { 733 return err 734 } 735 736 case tar.TypeFifo: 737 // Handle this is an OS-specific way 738 if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { 739 return err 740 } 741 742 case tar.TypeLink: 743 // #nosec G305 -- The target path is checked for path traversal. 744 targetPath := filepath.Join(extractDir, hdr.Linkname) 745 // check for hardlink breakout 746 if !strings.HasPrefix(targetPath, extractDir) { 747 return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) 748 } 749 if err := os.Link(targetPath, path); err != nil { 750 return err 751 } 752 753 case tar.TypeSymlink: 754 // path -> hdr.Linkname = targetPath 755 // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file 756 targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // #nosec G305 -- The target path is checked for path traversal. 757 758 // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because 759 // that symlink would first have to be created, which would be caught earlier, at this very check: 760 if !strings.HasPrefix(targetPath, extractDir) { 761 return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) 762 } 763 if err := os.Symlink(hdr.Linkname, path); err != nil { 764 return err 765 } 766 767 case tar.TypeXGlobalHeader: 768 logrus.Debug("PAX Global Extended Headers found and ignored") 769 return nil 770 771 default: 772 return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) 773 } 774 775 // Lchown is not supported on Windows. 776 if Lchown && runtime.GOOS != "windows" { 777 if chownOpts == nil { 778 chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} 779 } 780 if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { 781 msg := "failed to Lchown %q for UID %d, GID %d" 782 if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() { 783 msg += " (try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)" 784 } 785 return errors.Wrapf(err, msg, path, hdr.Uid, hdr.Gid) 786 } 787 } 788 789 var errors []string 790 for key, value := range hdr.Xattrs { 791 if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { 792 if err == syscall.ENOTSUP || err == syscall.EPERM { 793 // We ignore errors here because not all graphdrivers support 794 // xattrs *cough* old versions of AUFS *cough*. However only 795 // ENOTSUP should be emitted in that case, otherwise we still 796 // bail. 797 // EPERM occurs if modifying xattrs is not allowed. This can 798 // happen when running in userns with restrictions (ChromeOS). 799 errors = append(errors, err.Error()) 800 continue 801 } 802 return err 803 } 804 } 805 806 if len(errors) > 0 { 807 logrus.WithFields(logrus.Fields{ 808 "errors": errors, 809 }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") 810 } 811 812 // There is no LChmod, so ignore mode for symlink. Also, this 813 // must happen after chown, as that can modify the file mode 814 if err := handleLChmod(hdr, path, hdrInfo); err != nil { 815 return err 816 } 817 818 aTime := hdr.AccessTime 819 if aTime.Before(hdr.ModTime) { 820 // Last access time should never be before last modified time. 821 aTime = hdr.ModTime 822 } 823 824 // system.Chtimes doesn't support a NOFOLLOW flag atm 825 if hdr.Typeflag == tar.TypeLink { 826 if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { 827 if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { 828 return err 829 } 830 } 831 } else if hdr.Typeflag != tar.TypeSymlink { 832 if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { 833 return err 834 } 835 } else { 836 ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} 837 if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { 838 return err 839 } 840 } 841 return nil 842 } 843 844 // Tar creates an archive from the directory at `path`, and returns it as a 845 // stream of bytes. 846 func Tar(path string, compression Compression) (io.ReadCloser, error) { 847 return TarWithOptions(path, &TarOptions{Compression: compression}) 848 } 849 850 // TarWithOptions creates an archive from the directory at `path`, only including files whose relative 851 // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. 852 func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { 853 // Fix the source path to work with long path names. This is a no-op 854 // on platforms other than Windows. 855 srcPath = fixVolumePathPrefix(srcPath) 856 857 pm, err := patternmatcher.New(options.ExcludePatterns) 858 if err != nil { 859 return nil, err 860 } 861 862 pipeReader, pipeWriter := io.Pipe() 863 864 compressWriter, err := CompressStream(pipeWriter, options.Compression) 865 if err != nil { 866 return nil, err 867 } 868 869 whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) 870 if err != nil { 871 return nil, err 872 } 873 874 go func() { 875 ta := newTarAppender( 876 options.IDMap, 877 compressWriter, 878 options.ChownOpts, 879 ) 880 ta.WhiteoutConverter = whiteoutConverter 881 882 defer func() { 883 // Make sure to check the error on Close. 884 if err := ta.TarWriter.Close(); err != nil { 885 logrus.Errorf("Can't close tar writer: %s", err) 886 } 887 if err := compressWriter.Close(); err != nil { 888 logrus.Errorf("Can't close compress writer: %s", err) 889 } 890 if err := pipeWriter.Close(); err != nil { 891 logrus.Errorf("Can't close pipe writer: %s", err) 892 } 893 }() 894 895 // this buffer is needed for the duration of this piped stream 896 defer pools.BufioWriter32KPool.Put(ta.Buffer) 897 898 // In general we log errors here but ignore them because 899 // during e.g. a diff operation the container can continue 900 // mutating the filesystem and we can see transient errors 901 // from this 902 903 stat, err := os.Lstat(srcPath) 904 if err != nil { 905 return 906 } 907 908 if !stat.IsDir() { 909 // We can't later join a non-dir with any includes because the 910 // 'walk' will error if "file/." is stat-ed and "file" is not a 911 // directory. So, we must split the source path and use the 912 // basename as the include. 913 if len(options.IncludeFiles) > 0 { 914 logrus.Warn("Tar: Can't archive a file with includes") 915 } 916 917 dir, base := SplitPathDirEntry(srcPath) 918 srcPath = dir 919 options.IncludeFiles = []string{base} 920 } 921 922 if len(options.IncludeFiles) == 0 { 923 options.IncludeFiles = []string{"."} 924 } 925 926 seen := make(map[string]bool) 927 928 for _, include := range options.IncludeFiles { 929 rebaseName := options.RebaseNames[include] 930 931 var ( 932 parentMatchInfo []patternmatcher.MatchInfo 933 parentDirs []string 934 ) 935 936 walkRoot := getWalkRoot(srcPath, include) 937 filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { 938 if err != nil { 939 logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) 940 return nil 941 } 942 943 relFilePath, err := filepath.Rel(srcPath, filePath) 944 if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { 945 // Error getting relative path OR we are looking 946 // at the source directory path. Skip in both situations. 947 return nil 948 } 949 950 if options.IncludeSourceDir && include == "." && relFilePath != "." { 951 relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) 952 } 953 954 skip := false 955 956 // If "include" is an exact match for the current file 957 // then even if there's an "excludePatterns" pattern that 958 // matches it, don't skip it. IOW, assume an explicit 'include' 959 // is asking for that file no matter what - which is true 960 // for some files, like .dockerignore and Dockerfile (sometimes) 961 if include != relFilePath { 962 for len(parentDirs) != 0 { 963 lastParentDir := parentDirs[len(parentDirs)-1] 964 if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { 965 break 966 } 967 parentDirs = parentDirs[:len(parentDirs)-1] 968 parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1] 969 } 970 971 var matchInfo patternmatcher.MatchInfo 972 if len(parentMatchInfo) != 0 { 973 skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1]) 974 } else { 975 skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) 976 } 977 if err != nil { 978 logrus.Errorf("Error matching %s: %v", relFilePath, err) 979 return err 980 } 981 982 if f.IsDir() { 983 parentDirs = append(parentDirs, relFilePath) 984 parentMatchInfo = append(parentMatchInfo, matchInfo) 985 } 986 } 987 988 if skip { 989 // If we want to skip this file and its a directory 990 // then we should first check to see if there's an 991 // excludes pattern (e.g. !dir/file) that starts with this 992 // dir. If so then we can't skip this dir. 993 994 // Its not a dir then so we can just return/skip. 995 if !f.IsDir() { 996 return nil 997 } 998 999 // No exceptions (!...) in patterns so just skip dir 1000 if !pm.Exclusions() { 1001 return filepath.SkipDir 1002 } 1003 1004 dirSlash := relFilePath + string(filepath.Separator) 1005 1006 for _, pat := range pm.Patterns() { 1007 if !pat.Exclusion() { 1008 continue 1009 } 1010 if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { 1011 // found a match - so can't skip this dir 1012 return nil 1013 } 1014 } 1015 1016 // No matching exclusion dir so just skip dir 1017 return filepath.SkipDir 1018 } 1019 1020 if seen[relFilePath] { 1021 return nil 1022 } 1023 seen[relFilePath] = true 1024 1025 // Rename the base resource. 1026 if rebaseName != "" { 1027 var replacement string 1028 if rebaseName != string(filepath.Separator) { 1029 // Special case the root directory to replace with an 1030 // empty string instead so that we don't end up with 1031 // double slashes in the paths. 1032 replacement = rebaseName 1033 } 1034 1035 relFilePath = strings.Replace(relFilePath, include, replacement, 1) 1036 } 1037 1038 if err := ta.addTarFile(filePath, relFilePath); err != nil { 1039 logrus.Errorf("Can't add file %s to tar: %s", filePath, err) 1040 // if pipe is broken, stop writing tar stream to it 1041 if err == io.ErrClosedPipe { 1042 return err 1043 } 1044 } 1045 return nil 1046 }) 1047 } 1048 }() 1049 1050 return pipeReader, nil 1051 } 1052 1053 // Unpack unpacks the decompressedArchive to dest with options. 1054 func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { 1055 tr := tar.NewReader(decompressedArchive) 1056 trBuf := pools.BufioReader32KPool.Get(nil) 1057 defer pools.BufioReader32KPool.Put(trBuf) 1058 1059 var dirs []*tar.Header 1060 whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) 1061 if err != nil { 1062 return err 1063 } 1064 1065 // Iterate through the files in the archive. 1066 loop: 1067 for { 1068 hdr, err := tr.Next() 1069 if err == io.EOF { 1070 // end of tar archive 1071 break 1072 } 1073 if err != nil { 1074 return err 1075 } 1076 1077 // ignore XGlobalHeader early to avoid creating parent directories for them 1078 if hdr.Typeflag == tar.TypeXGlobalHeader { 1079 logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) 1080 continue 1081 } 1082 1083 // Normalize name, for safety and for a simple is-root check 1084 // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: 1085 // This keeps "..\" as-is, but normalizes "\..\" to "\". 1086 hdr.Name = filepath.Clean(hdr.Name) 1087 1088 for _, exclude := range options.ExcludePatterns { 1089 if strings.HasPrefix(hdr.Name, exclude) { 1090 continue loop 1091 } 1092 } 1093 1094 // Ensure that the parent directory exists. 1095 err = createImpliedDirectories(dest, hdr, options) 1096 if err != nil { 1097 return err 1098 } 1099 1100 // #nosec G305 -- The joined path is checked for path traversal. 1101 path := filepath.Join(dest, hdr.Name) 1102 rel, err := filepath.Rel(dest, path) 1103 if err != nil { 1104 return err 1105 } 1106 if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { 1107 return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) 1108 } 1109 1110 // If path exits we almost always just want to remove and replace it 1111 // The only exception is when it is a directory *and* the file from 1112 // the layer is also a directory. Then we want to merge them (i.e. 1113 // just apply the metadata from the layer). 1114 if fi, err := os.Lstat(path); err == nil { 1115 if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { 1116 // If NoOverwriteDirNonDir is true then we cannot replace 1117 // an existing directory with a non-directory from the archive. 1118 return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) 1119 } 1120 1121 if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { 1122 // If NoOverwriteDirNonDir is true then we cannot replace 1123 // an existing non-directory with a directory from the archive. 1124 return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) 1125 } 1126 1127 if fi.IsDir() && hdr.Name == "." { 1128 continue 1129 } 1130 1131 if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { 1132 if err := os.RemoveAll(path); err != nil { 1133 return err 1134 } 1135 } 1136 } 1137 trBuf.Reset(tr) 1138 1139 if err := remapIDs(options.IDMap, hdr); err != nil { 1140 return err 1141 } 1142 1143 if whiteoutConverter != nil { 1144 writeFile, err := whiteoutConverter.ConvertRead(hdr, path) 1145 if err != nil { 1146 return err 1147 } 1148 if !writeFile { 1149 continue 1150 } 1151 } 1152 1153 if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { 1154 return err 1155 } 1156 1157 // Directory mtimes must be handled at the end to avoid further 1158 // file creation in them to modify the directory mtime 1159 if hdr.Typeflag == tar.TypeDir { 1160 dirs = append(dirs, hdr) 1161 } 1162 } 1163 1164 for _, hdr := range dirs { 1165 // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. 1166 path := filepath.Join(dest, hdr.Name) 1167 1168 if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { 1169 return err 1170 } 1171 } 1172 return nil 1173 } 1174 1175 // createImpliedDirectories will create all parent directories of the current path with default permissions, if they do 1176 // not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is 1177 // defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus 1178 // we most both create them and choose metadata like permissions. 1179 // 1180 // The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS 1181 // on which the daemon is running. This precondition is required because this function assumes a OS-specific path 1182 // separator when checking that a path is not the root. 1183 func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error { 1184 // Not the root directory, ensure that the parent directory exists 1185 if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { 1186 parent := filepath.Dir(hdr.Name) 1187 parentPath := filepath.Join(dest, parent) 1188 if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { 1189 // RootPair() is confined inside this loop as most cases will not require a call, so we can spend some 1190 // unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche 1191 // usage that reduces the portability of an image. 1192 rootIDs := options.IDMap.RootPair() 1193 1194 err = idtools.MkdirAllAndChownNew(parentPath, ImpliedDirectoryMode, rootIDs) 1195 if err != nil { 1196 return err 1197 } 1198 } 1199 } 1200 1201 return nil 1202 } 1203 1204 // Untar reads a stream of bytes from `archive`, parses it as a tar archive, 1205 // and unpacks it into the directory at `dest`. 1206 // The archive may be compressed with one of the following algorithms: 1207 // identity (uncompressed), gzip, bzip2, xz. 1208 // 1209 // FIXME: specify behavior when target path exists vs. doesn't exist. 1210 func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { 1211 return untarHandler(tarArchive, dest, options, true) 1212 } 1213 1214 // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, 1215 // and unpacks it into the directory at `dest`. 1216 // The archive must be an uncompressed stream. 1217 func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { 1218 return untarHandler(tarArchive, dest, options, false) 1219 } 1220 1221 // Handler for teasing out the automatic decompression 1222 func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { 1223 if tarArchive == nil { 1224 return fmt.Errorf("Empty archive") 1225 } 1226 dest = filepath.Clean(dest) 1227 if options == nil { 1228 options = &TarOptions{} 1229 } 1230 if options.ExcludePatterns == nil { 1231 options.ExcludePatterns = []string{} 1232 } 1233 1234 r := tarArchive 1235 if decompress { 1236 decompressedArchive, err := DecompressStream(tarArchive) 1237 if err != nil { 1238 return err 1239 } 1240 defer decompressedArchive.Close() 1241 r = decompressedArchive 1242 } 1243 1244 return Unpack(r, dest, options) 1245 } 1246 1247 // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. 1248 // If either Tar or Untar fails, TarUntar aborts and returns the error. 1249 func (archiver *Archiver) TarUntar(src, dst string) error { 1250 archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) 1251 if err != nil { 1252 return err 1253 } 1254 defer archive.Close() 1255 options := &TarOptions{ 1256 IDMap: archiver.IDMapping, 1257 } 1258 return archiver.Untar(archive, dst, options) 1259 } 1260 1261 // UntarPath untar a file from path to a destination, src is the source tar file path. 1262 func (archiver *Archiver) UntarPath(src, dst string) error { 1263 archive, err := os.Open(src) 1264 if err != nil { 1265 return err 1266 } 1267 defer archive.Close() 1268 options := &TarOptions{ 1269 IDMap: archiver.IDMapping, 1270 } 1271 return archiver.Untar(archive, dst, options) 1272 } 1273 1274 // CopyWithTar creates a tar archive of filesystem path `src`, and 1275 // unpacks it at filesystem path `dst`. 1276 // The archive is streamed directly with fixed buffering and no 1277 // intermediary disk IO. 1278 func (archiver *Archiver) CopyWithTar(src, dst string) error { 1279 srcSt, err := os.Stat(src) 1280 if err != nil { 1281 return err 1282 } 1283 if !srcSt.IsDir() { 1284 return archiver.CopyFileWithTar(src, dst) 1285 } 1286 1287 // if this Archiver is set up with ID mapping we need to create 1288 // the new destination directory with the remapped root UID/GID pair 1289 // as owner 1290 rootIDs := archiver.IDMapping.RootPair() 1291 // Create dst, copy src's content into it 1292 if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { 1293 return err 1294 } 1295 return archiver.TarUntar(src, dst) 1296 } 1297 1298 // CopyFileWithTar emulates the behavior of the 'cp' command-line 1299 // for a single file. It copies a regular file from path `src` to 1300 // path `dst`, and preserves all its metadata. 1301 func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { 1302 srcSt, err := os.Stat(src) 1303 if err != nil { 1304 return err 1305 } 1306 1307 if srcSt.IsDir() { 1308 return fmt.Errorf("Can't copy a directory") 1309 } 1310 1311 // Clean up the trailing slash. This must be done in an operating 1312 // system specific manner. 1313 if dst[len(dst)-1] == os.PathSeparator { 1314 dst = filepath.Join(dst, filepath.Base(src)) 1315 } 1316 // Create the holding directory if necessary 1317 if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { 1318 return err 1319 } 1320 1321 r, w := io.Pipe() 1322 errC := make(chan error, 1) 1323 1324 go func() { 1325 defer close(errC) 1326 1327 errC <- func() error { 1328 defer w.Close() 1329 1330 srcF, err := os.Open(src) 1331 if err != nil { 1332 return err 1333 } 1334 defer srcF.Close() 1335 1336 hdr, err := FileInfoHeaderNoLookups(srcSt, "") 1337 if err != nil { 1338 return err 1339 } 1340 hdr.Format = tar.FormatPAX 1341 hdr.ModTime = hdr.ModTime.Truncate(time.Second) 1342 hdr.AccessTime = time.Time{} 1343 hdr.ChangeTime = time.Time{} 1344 hdr.Name = filepath.Base(dst) 1345 hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) 1346 1347 if err := remapIDs(archiver.IDMapping, hdr); err != nil { 1348 return err 1349 } 1350 1351 tw := tar.NewWriter(w) 1352 defer tw.Close() 1353 if err := tw.WriteHeader(hdr); err != nil { 1354 return err 1355 } 1356 if _, err := io.Copy(tw, srcF); err != nil { 1357 return err 1358 } 1359 return nil 1360 }() 1361 }() 1362 defer func() { 1363 if er := <-errC; err == nil && er != nil { 1364 err = er 1365 } 1366 }() 1367 1368 err = archiver.Untar(r, filepath.Dir(dst), nil) 1369 if err != nil { 1370 r.CloseWithError(err) 1371 } 1372 return err 1373 } 1374 1375 // IdentityMapping returns the IdentityMapping of the archiver. 1376 func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping { 1377 return archiver.IDMapping 1378 } 1379 1380 func remapIDs(idMapping idtools.IdentityMapping, hdr *tar.Header) error { 1381 ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) 1382 hdr.Uid, hdr.Gid = ids.UID, ids.GID 1383 return err 1384 } 1385 1386 // cmdStream executes a command, and returns its stdout as a stream. 1387 // If the command fails to run or doesn't complete successfully, an error 1388 // will be returned, including anything written on stderr. 1389 func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { 1390 cmd.Stdin = input 1391 pipeR, pipeW := io.Pipe() 1392 cmd.Stdout = pipeW 1393 var errBuf bytes.Buffer 1394 cmd.Stderr = &errBuf 1395 1396 // Run the command and return the pipe 1397 if err := cmd.Start(); err != nil { 1398 return nil, err 1399 } 1400 1401 // Ensure the command has exited before we clean anything up 1402 done := make(chan struct{}) 1403 1404 // Copy stdout to the returned pipe 1405 go func() { 1406 if err := cmd.Wait(); err != nil { 1407 pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) 1408 } else { 1409 pipeW.Close() 1410 } 1411 close(done) 1412 }() 1413 1414 return ioutils.NewReadCloserWrapper(pipeR, func() error { 1415 // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as 1416 // cmd.Wait waits for any non-file stdout/stderr/stdin to close. 1417 err := pipeR.Close() 1418 <-done 1419 return err 1420 }), nil 1421 } 1422 1423 // NewTempArchive reads the content of src into a temporary file, and returns the contents 1424 // of that file as an archive. The archive can only be read once - as soon as reading completes, 1425 // the file will be deleted. 1426 func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { 1427 f, err := os.CreateTemp(dir, "") 1428 if err != nil { 1429 return nil, err 1430 } 1431 if _, err := io.Copy(f, src); err != nil { 1432 return nil, err 1433 } 1434 if _, err := f.Seek(0, 0); err != nil { 1435 return nil, err 1436 } 1437 st, err := f.Stat() 1438 if err != nil { 1439 return nil, err 1440 } 1441 size := st.Size() 1442 return &TempArchive{File: f, Size: size}, nil 1443 } 1444 1445 // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, 1446 // the file will be deleted. 1447 type TempArchive struct { 1448 *os.File 1449 Size int64 // Pre-computed from Stat().Size() as a convenience 1450 read int64 1451 closed bool 1452 } 1453 1454 // Close closes the underlying file if it's still open, or does a no-op 1455 // to allow callers to try to close the TempArchive multiple times safely. 1456 func (archive *TempArchive) Close() error { 1457 if archive.closed { 1458 return nil 1459 } 1460 1461 archive.closed = true 1462 1463 return archive.File.Close() 1464 } 1465 1466 func (archive *TempArchive) Read(data []byte) (int, error) { 1467 n, err := archive.File.Read(data) 1468 archive.read += int64(n) 1469 if err != nil || archive.read == archive.Size { 1470 archive.Close() 1471 os.Remove(archive.File.Name()) 1472 } 1473 return n, err 1474 }