github.com/darciopacifico/docker@v1.9.0-rc1/pkg/archive/archive.go (about) 1 package archive 2 3 import ( 4 "archive/tar" 5 "bufio" 6 "bytes" 7 "compress/bzip2" 8 "compress/gzip" 9 "errors" 10 "fmt" 11 "io" 12 "io/ioutil" 13 "os" 14 "os/exec" 15 "path/filepath" 16 "runtime" 17 "strings" 18 "syscall" 19 20 "github.com/Sirupsen/logrus" 21 "github.com/docker/docker/pkg/fileutils" 22 "github.com/docker/docker/pkg/idtools" 23 "github.com/docker/docker/pkg/pools" 24 "github.com/docker/docker/pkg/promise" 25 "github.com/docker/docker/pkg/system" 26 ) 27 28 type ( 29 // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. 30 Archive io.ReadCloser 31 // Reader is a type of io.Reader. 32 Reader io.Reader 33 // Compression is the state represtents if compressed or not. 34 Compression int 35 // TarChownOptions wraps the chown options UID and GID. 36 TarChownOptions struct { 37 UID, GID int 38 } 39 // TarOptions wraps the tar options. 40 TarOptions struct { 41 IncludeFiles []string 42 ExcludePatterns []string 43 Compression Compression 44 NoLchown bool 45 UIDMaps []idtools.IDMap 46 GIDMaps []idtools.IDMap 47 ChownOpts *TarChownOptions 48 IncludeSourceDir bool 49 // When unpacking, specifies whether overwriting a directory with a 50 // non-directory is allowed and vice versa. 51 NoOverwriteDirNonDir bool 52 // For each include when creating an archive, the included name will be 53 // replaced with the matching name from this map. 54 RebaseNames map[string]string 55 } 56 57 // Archiver allows the reuse of most utility functions of this package 58 // with a pluggable Untar function. Also, to facilitate the passing of 59 // specific id mappings for untar, an archiver can be created with maps 60 // which will then be passed to Untar operations 61 Archiver struct { 62 Untar func(io.Reader, string, *TarOptions) error 63 UIDMaps []idtools.IDMap 64 GIDMaps []idtools.IDMap 65 } 66 67 // breakoutError is used to differentiate errors related to breaking out 68 // When testing archive breakout in the unit tests, this error is expected 69 // in order for the test to pass. 70 breakoutError error 71 ) 72 73 var ( 74 // ErrNotImplemented is the error message of function not implemented. 75 ErrNotImplemented = errors.New("Function not implemented") 76 defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} 77 ) 78 79 const ( 80 // Uncompressed represents the uncompressed. 81 Uncompressed Compression = iota 82 // Bzip2 is bzip2 compression algorithm. 83 Bzip2 84 // Gzip is gzip compression algorithm. 85 Gzip 86 // Xz is xz compression algorithm. 87 Xz 88 ) 89 90 // IsArchive checks if it is a archive by the header. 91 func IsArchive(header []byte) bool { 92 compression := DetectCompression(header) 93 if compression != Uncompressed { 94 return true 95 } 96 r := tar.NewReader(bytes.NewBuffer(header)) 97 _, err := r.Next() 98 return err == nil 99 } 100 101 // DetectCompression detects the compression algorithm of the source. 102 func DetectCompression(source []byte) Compression { 103 for compression, m := range map[Compression][]byte{ 104 Bzip2: {0x42, 0x5A, 0x68}, 105 Gzip: {0x1F, 0x8B, 0x08}, 106 Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, 107 } { 108 if len(source) < len(m) { 109 logrus.Debugf("Len too short") 110 continue 111 } 112 if bytes.Compare(m, source[:len(m)]) == 0 { 113 return compression 114 } 115 } 116 return Uncompressed 117 } 118 119 func xzDecompress(archive io.Reader) (io.ReadCloser, error) { 120 args := []string{"xz", "-d", "-c", "-q"} 121 122 return CmdStream(exec.Command(args[0], args[1:]...), archive) 123 } 124 125 // DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive. 126 func DecompressStream(archive io.Reader) (io.ReadCloser, error) { 127 p := pools.BufioReader32KPool 128 buf := p.Get(archive) 129 bs, err := buf.Peek(10) 130 if err != nil { 131 return nil, err 132 } 133 134 compression := DetectCompression(bs) 135 switch compression { 136 case Uncompressed: 137 readBufWrapper := p.NewReadCloserWrapper(buf, buf) 138 return readBufWrapper, nil 139 case Gzip: 140 gzReader, err := gzip.NewReader(buf) 141 if err != nil { 142 return nil, err 143 } 144 readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) 145 return readBufWrapper, nil 146 case Bzip2: 147 bz2Reader := bzip2.NewReader(buf) 148 readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) 149 return readBufWrapper, nil 150 case Xz: 151 xzReader, err := xzDecompress(buf) 152 if err != nil { 153 return nil, err 154 } 155 readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) 156 return readBufWrapper, nil 157 default: 158 return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) 159 } 160 } 161 162 // CompressStream compresses the dest with specified compression algorithm. 163 func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { 164 p := pools.BufioWriter32KPool 165 buf := p.Get(dest) 166 switch compression { 167 case Uncompressed: 168 writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) 169 return writeBufWrapper, nil 170 case Gzip: 171 gzWriter := gzip.NewWriter(dest) 172 writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) 173 return writeBufWrapper, nil 174 case Bzip2, Xz: 175 // archive/bzip2 does not support writing, and there is no xz support at all 176 // However, this is not a problem as docker only currently generates gzipped tars 177 return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) 178 default: 179 return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) 180 } 181 } 182 183 // Extension returns the extension of a file that uses the specified compression algorithm. 184 func (compression *Compression) Extension() string { 185 switch *compression { 186 case Uncompressed: 187 return "tar" 188 case Bzip2: 189 return "tar.bz2" 190 case Gzip: 191 return "tar.gz" 192 case Xz: 193 return "tar.xz" 194 } 195 return "" 196 } 197 198 type tarAppender struct { 199 TarWriter *tar.Writer 200 Buffer *bufio.Writer 201 202 // for hardlink mapping 203 SeenFiles map[uint64]string 204 UIDMaps []idtools.IDMap 205 GIDMaps []idtools.IDMap 206 } 207 208 // canonicalTarName provides a platform-independent and consistent posix-style 209 //path for files and directories to be archived regardless of the platform. 210 func canonicalTarName(name string, isDir bool) (string, error) { 211 name, err := CanonicalTarNameForPath(name) 212 if err != nil { 213 return "", err 214 } 215 216 // suffix with '/' for directories 217 if isDir && !strings.HasSuffix(name, "/") { 218 name += "/" 219 } 220 return name, nil 221 } 222 223 func (ta *tarAppender) addTarFile(path, name string) error { 224 fi, err := os.Lstat(path) 225 if err != nil { 226 return err 227 } 228 229 link := "" 230 if fi.Mode()&os.ModeSymlink != 0 { 231 if link, err = os.Readlink(path); err != nil { 232 return err 233 } 234 } 235 236 hdr, err := tar.FileInfoHeader(fi, link) 237 if err != nil { 238 return err 239 } 240 hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) 241 242 name, err = canonicalTarName(name, fi.IsDir()) 243 if err != nil { 244 return fmt.Errorf("tar: cannot canonicalize path: %v", err) 245 } 246 hdr.Name = name 247 248 nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) 249 if err != nil { 250 return err 251 } 252 253 // if it's a regular file and has more than 1 link, 254 // it's hardlinked, so set the type flag accordingly 255 if fi.Mode().IsRegular() && nlink > 1 { 256 // a link should have a name that it links too 257 // and that linked name should be first in the tar archive 258 if oldpath, ok := ta.SeenFiles[inode]; ok { 259 hdr.Typeflag = tar.TypeLink 260 hdr.Linkname = oldpath 261 hdr.Size = 0 // This Must be here for the writer math to add up! 262 } else { 263 ta.SeenFiles[inode] = name 264 } 265 } 266 267 capability, _ := system.Lgetxattr(path, "security.capability") 268 if capability != nil { 269 hdr.Xattrs = make(map[string]string) 270 hdr.Xattrs["security.capability"] = string(capability) 271 } 272 273 //handle re-mapping container ID mappings back to host ID mappings before 274 //writing tar headers/files 275 if ta.UIDMaps != nil || ta.GIDMaps != nil { 276 uid, gid, err := getFileUIDGID(fi.Sys()) 277 if err != nil { 278 return err 279 } 280 xUID, err := idtools.ToContainer(uid, ta.UIDMaps) 281 if err != nil { 282 return err 283 } 284 xGID, err := idtools.ToContainer(gid, ta.GIDMaps) 285 if err != nil { 286 return err 287 } 288 hdr.Uid = xUID 289 hdr.Gid = xGID 290 } 291 292 if err := ta.TarWriter.WriteHeader(hdr); err != nil { 293 return err 294 } 295 296 if hdr.Typeflag == tar.TypeReg { 297 file, err := os.Open(path) 298 if err != nil { 299 return err 300 } 301 302 ta.Buffer.Reset(ta.TarWriter) 303 defer ta.Buffer.Reset(nil) 304 _, err = io.Copy(ta.Buffer, file) 305 file.Close() 306 if err != nil { 307 return err 308 } 309 err = ta.Buffer.Flush() 310 if err != nil { 311 return err 312 } 313 } 314 315 return nil 316 } 317 318 func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { 319 // hdr.Mode is in linux format, which we can use for sycalls, 320 // but for os.Foo() calls we need the mode converted to os.FileMode, 321 // so use hdrInfo.Mode() (they differ for e.g. setuid bits) 322 hdrInfo := hdr.FileInfo() 323 324 switch hdr.Typeflag { 325 case tar.TypeDir: 326 // Create directory unless it exists as a directory already. 327 // In that case we just want to merge the two 328 if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { 329 if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { 330 return err 331 } 332 } 333 334 case tar.TypeReg, tar.TypeRegA: 335 // Source is regular file 336 file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) 337 if err != nil { 338 return err 339 } 340 if _, err := io.Copy(file, reader); err != nil { 341 file.Close() 342 return err 343 } 344 file.Close() 345 346 case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: 347 // Handle this is an OS-specific way 348 if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { 349 return err 350 } 351 352 case tar.TypeLink: 353 targetPath := filepath.Join(extractDir, hdr.Linkname) 354 // check for hardlink breakout 355 if !strings.HasPrefix(targetPath, extractDir) { 356 return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) 357 } 358 if err := os.Link(targetPath, path); err != nil { 359 return err 360 } 361 362 case tar.TypeSymlink: 363 // path -> hdr.Linkname = targetPath 364 // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file 365 targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) 366 367 // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because 368 // that symlink would first have to be created, which would be caught earlier, at this very check: 369 if !strings.HasPrefix(targetPath, extractDir) { 370 return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) 371 } 372 if err := os.Symlink(hdr.Linkname, path); err != nil { 373 return err 374 } 375 376 case tar.TypeXGlobalHeader: 377 logrus.Debugf("PAX Global Extended Headers found and ignored") 378 return nil 379 380 default: 381 return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) 382 } 383 384 // Lchown is not supported on Windows. 385 if Lchown && runtime.GOOS != "windows" { 386 if chownOpts == nil { 387 chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} 388 } 389 if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { 390 return err 391 } 392 } 393 394 for key, value := range hdr.Xattrs { 395 if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { 396 return err 397 } 398 } 399 400 // There is no LChmod, so ignore mode for symlink. Also, this 401 // must happen after chown, as that can modify the file mode 402 if err := handleLChmod(hdr, path, hdrInfo); err != nil { 403 return err 404 } 405 406 // system.Chtimes doesn't support a NOFOLLOW flag atm 407 if hdr.Typeflag == tar.TypeLink { 408 if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { 409 if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { 410 return err 411 } 412 } 413 } else if hdr.Typeflag != tar.TypeSymlink { 414 if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { 415 return err 416 } 417 } else { 418 ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} 419 if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { 420 return err 421 } 422 } 423 return nil 424 } 425 426 // Tar creates an archive from the directory at `path`, and returns it as a 427 // stream of bytes. 428 func Tar(path string, compression Compression) (io.ReadCloser, error) { 429 return TarWithOptions(path, &TarOptions{Compression: compression}) 430 } 431 432 // TarWithOptions creates an archive from the directory at `path`, only including files whose relative 433 // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. 434 func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { 435 436 // Fix the source path to work with long path names. This is a no-op 437 // on platforms other than Windows. 438 srcPath = fixVolumePathPrefix(srcPath) 439 440 patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) 441 442 if err != nil { 443 return nil, err 444 } 445 446 pipeReader, pipeWriter := io.Pipe() 447 448 compressWriter, err := CompressStream(pipeWriter, options.Compression) 449 if err != nil { 450 return nil, err 451 } 452 453 go func() { 454 ta := &tarAppender{ 455 TarWriter: tar.NewWriter(compressWriter), 456 Buffer: pools.BufioWriter32KPool.Get(nil), 457 SeenFiles: make(map[uint64]string), 458 UIDMaps: options.UIDMaps, 459 GIDMaps: options.GIDMaps, 460 } 461 462 defer func() { 463 // Make sure to check the error on Close. 464 if err := ta.TarWriter.Close(); err != nil { 465 logrus.Debugf("Can't close tar writer: %s", err) 466 } 467 if err := compressWriter.Close(); err != nil { 468 logrus.Debugf("Can't close compress writer: %s", err) 469 } 470 if err := pipeWriter.Close(); err != nil { 471 logrus.Debugf("Can't close pipe writer: %s", err) 472 } 473 }() 474 475 // this buffer is needed for the duration of this piped stream 476 defer pools.BufioWriter32KPool.Put(ta.Buffer) 477 478 // In general we log errors here but ignore them because 479 // during e.g. a diff operation the container can continue 480 // mutating the filesystem and we can see transient errors 481 // from this 482 483 stat, err := os.Lstat(srcPath) 484 if err != nil { 485 return 486 } 487 488 if !stat.IsDir() { 489 // We can't later join a non-dir with any includes because the 490 // 'walk' will error if "file/." is stat-ed and "file" is not a 491 // directory. So, we must split the source path and use the 492 // basename as the include. 493 if len(options.IncludeFiles) > 0 { 494 logrus.Warn("Tar: Can't archive a file with includes") 495 } 496 497 dir, base := SplitPathDirEntry(srcPath) 498 srcPath = dir 499 options.IncludeFiles = []string{base} 500 } 501 502 if len(options.IncludeFiles) == 0 { 503 options.IncludeFiles = []string{"."} 504 } 505 506 seen := make(map[string]bool) 507 508 for _, include := range options.IncludeFiles { 509 rebaseName := options.RebaseNames[include] 510 511 walkRoot := getWalkRoot(srcPath, include) 512 filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { 513 if err != nil { 514 logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) 515 return nil 516 } 517 518 relFilePath, err := filepath.Rel(srcPath, filePath) 519 if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { 520 // Error getting relative path OR we are looking 521 // at the source directory path. Skip in both situations. 522 return nil 523 } 524 525 if options.IncludeSourceDir && include == "." && relFilePath != "." { 526 relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) 527 } 528 529 skip := false 530 531 // If "include" is an exact match for the current file 532 // then even if there's an "excludePatterns" pattern that 533 // matches it, don't skip it. IOW, assume an explicit 'include' 534 // is asking for that file no matter what - which is true 535 // for some files, like .dockerignore and Dockerfile (sometimes) 536 if include != relFilePath { 537 skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) 538 if err != nil { 539 logrus.Debugf("Error matching %s: %v", relFilePath, err) 540 return err 541 } 542 } 543 544 if skip { 545 if !exceptions && f.IsDir() { 546 return filepath.SkipDir 547 } 548 return nil 549 } 550 551 if seen[relFilePath] { 552 return nil 553 } 554 seen[relFilePath] = true 555 556 // Rename the base resource. 557 if rebaseName != "" { 558 var replacement string 559 if rebaseName != string(filepath.Separator) { 560 // Special case the root directory to replace with an 561 // empty string instead so that we don't end up with 562 // double slashes in the paths. 563 replacement = rebaseName 564 } 565 566 relFilePath = strings.Replace(relFilePath, include, replacement, 1) 567 } 568 569 if err := ta.addTarFile(filePath, relFilePath); err != nil { 570 logrus.Debugf("Can't add file %s to tar: %s", filePath, err) 571 } 572 return nil 573 }) 574 } 575 }() 576 577 return pipeReader, nil 578 } 579 580 // Unpack unpacks the decompressedArchive to dest with options. 581 func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { 582 tr := tar.NewReader(decompressedArchive) 583 trBuf := pools.BufioReader32KPool.Get(nil) 584 defer pools.BufioReader32KPool.Put(trBuf) 585 586 var dirs []*tar.Header 587 remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) 588 if err != nil { 589 return err 590 } 591 592 // Iterate through the files in the archive. 593 loop: 594 for { 595 hdr, err := tr.Next() 596 if err == io.EOF { 597 // end of tar archive 598 break 599 } 600 if err != nil { 601 return err 602 } 603 604 // Normalize name, for safety and for a simple is-root check 605 // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: 606 // This keeps "..\" as-is, but normalizes "\..\" to "\". 607 hdr.Name = filepath.Clean(hdr.Name) 608 609 for _, exclude := range options.ExcludePatterns { 610 if strings.HasPrefix(hdr.Name, exclude) { 611 continue loop 612 } 613 } 614 615 // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in 616 // the filepath format for the OS on which the daemon is running. Hence 617 // the check for a slash-suffix MUST be done in an OS-agnostic way. 618 if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { 619 // Not the root directory, ensure that the parent directory exists 620 parent := filepath.Dir(hdr.Name) 621 parentPath := filepath.Join(dest, parent) 622 if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { 623 err = system.MkdirAll(parentPath, 0777) 624 if err != nil { 625 return err 626 } 627 } 628 } 629 630 path := filepath.Join(dest, hdr.Name) 631 rel, err := filepath.Rel(dest, path) 632 if err != nil { 633 return err 634 } 635 if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { 636 return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) 637 } 638 639 // If path exits we almost always just want to remove and replace it 640 // The only exception is when it is a directory *and* the file from 641 // the layer is also a directory. Then we want to merge them (i.e. 642 // just apply the metadata from the layer). 643 if fi, err := os.Lstat(path); err == nil { 644 if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { 645 // If NoOverwriteDirNonDir is true then we cannot replace 646 // an existing directory with a non-directory from the archive. 647 return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) 648 } 649 650 if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { 651 // If NoOverwriteDirNonDir is true then we cannot replace 652 // an existing non-directory with a directory from the archive. 653 return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) 654 } 655 656 if fi.IsDir() && hdr.Name == "." { 657 continue 658 } 659 660 if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { 661 if err := os.RemoveAll(path); err != nil { 662 return err 663 } 664 } 665 } 666 trBuf.Reset(tr) 667 668 // if the options contain a uid & gid maps, convert header uid/gid 669 // entries using the maps such that lchown sets the proper mapped 670 // uid/gid after writing the file. We only perform this mapping if 671 // the file isn't already owned by the remapped root UID or GID, as 672 // that specific uid/gid has no mapping from container -> host, and 673 // those files already have the proper ownership for inside the 674 // container. 675 if hdr.Uid != remappedRootUID { 676 xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) 677 if err != nil { 678 return err 679 } 680 hdr.Uid = xUID 681 } 682 if hdr.Gid != remappedRootGID { 683 xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) 684 if err != nil { 685 return err 686 } 687 hdr.Gid = xGID 688 } 689 690 if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { 691 return err 692 } 693 694 // Directory mtimes must be handled at the end to avoid further 695 // file creation in them to modify the directory mtime 696 if hdr.Typeflag == tar.TypeDir { 697 dirs = append(dirs, hdr) 698 } 699 } 700 701 for _, hdr := range dirs { 702 path := filepath.Join(dest, hdr.Name) 703 704 if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { 705 return err 706 } 707 } 708 return nil 709 } 710 711 // Untar reads a stream of bytes from `archive`, parses it as a tar archive, 712 // and unpacks it into the directory at `dest`. 713 // The archive may be compressed with one of the following algorithms: 714 // identity (uncompressed), gzip, bzip2, xz. 715 // FIXME: specify behavior when target path exists vs. doesn't exist. 716 func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { 717 return untarHandler(tarArchive, dest, options, true) 718 } 719 720 // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, 721 // and unpacks it into the directory at `dest`. 722 // The archive must be an uncompressed stream. 723 func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { 724 return untarHandler(tarArchive, dest, options, false) 725 } 726 727 // Handler for teasing out the automatic decompression 728 func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { 729 if tarArchive == nil { 730 return fmt.Errorf("Empty archive") 731 } 732 dest = filepath.Clean(dest) 733 if options == nil { 734 options = &TarOptions{} 735 } 736 if options.ExcludePatterns == nil { 737 options.ExcludePatterns = []string{} 738 } 739 740 r := tarArchive 741 if decompress { 742 decompressedArchive, err := DecompressStream(tarArchive) 743 if err != nil { 744 return err 745 } 746 defer decompressedArchive.Close() 747 r = decompressedArchive 748 } 749 750 return Unpack(r, dest, options) 751 } 752 753 // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. 754 // If either Tar or Untar fails, TarUntar aborts and returns the error. 755 func (archiver *Archiver) TarUntar(src, dst string) error { 756 logrus.Debugf("TarUntar(%s %s)", src, dst) 757 archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) 758 if err != nil { 759 return err 760 } 761 defer archive.Close() 762 763 var options *TarOptions 764 if archiver.UIDMaps != nil || archiver.GIDMaps != nil { 765 options = &TarOptions{ 766 UIDMaps: archiver.UIDMaps, 767 GIDMaps: archiver.GIDMaps, 768 } 769 } 770 return archiver.Untar(archive, dst, options) 771 } 772 773 // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. 774 // If either Tar or Untar fails, TarUntar aborts and returns the error. 775 func TarUntar(src, dst string) error { 776 return defaultArchiver.TarUntar(src, dst) 777 } 778 779 // UntarPath untar a file from path to a destination, src is the source tar file path. 780 func (archiver *Archiver) UntarPath(src, dst string) error { 781 archive, err := os.Open(src) 782 if err != nil { 783 return err 784 } 785 defer archive.Close() 786 var options *TarOptions 787 if archiver.UIDMaps != nil || archiver.GIDMaps != nil { 788 options = &TarOptions{ 789 UIDMaps: archiver.UIDMaps, 790 GIDMaps: archiver.GIDMaps, 791 } 792 } 793 if err := archiver.Untar(archive, dst, options); err != nil { 794 return err 795 } 796 return nil 797 } 798 799 // UntarPath is a convenience function which looks for an archive 800 // at filesystem path `src`, and unpacks it at `dst`. 801 func UntarPath(src, dst string) error { 802 return defaultArchiver.UntarPath(src, dst) 803 } 804 805 // CopyWithTar creates a tar archive of filesystem path `src`, and 806 // unpacks it at filesystem path `dst`. 807 // The archive is streamed directly with fixed buffering and no 808 // intermediary disk IO. 809 func (archiver *Archiver) CopyWithTar(src, dst string) error { 810 srcSt, err := os.Stat(src) 811 if err != nil { 812 return err 813 } 814 if !srcSt.IsDir() { 815 return archiver.CopyFileWithTar(src, dst) 816 } 817 // Create dst, copy src's content into it 818 logrus.Debugf("Creating dest directory: %s", dst) 819 if err := system.MkdirAll(dst, 0755); err != nil { 820 return err 821 } 822 logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) 823 return archiver.TarUntar(src, dst) 824 } 825 826 // CopyWithTar creates a tar archive of filesystem path `src`, and 827 // unpacks it at filesystem path `dst`. 828 // The archive is streamed directly with fixed buffering and no 829 // intermediary disk IO. 830 func CopyWithTar(src, dst string) error { 831 return defaultArchiver.CopyWithTar(src, dst) 832 } 833 834 // CopyFileWithTar emulates the behavior of the 'cp' command-line 835 // for a single file. It copies a regular file from path `src` to 836 // path `dst`, and preserves all its metadata. 837 func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { 838 logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) 839 srcSt, err := os.Stat(src) 840 if err != nil { 841 return err 842 } 843 844 if srcSt.IsDir() { 845 return fmt.Errorf("Can't copy a directory") 846 } 847 848 // Clean up the trailing slash. This must be done in an operating 849 // system specific manner. 850 if dst[len(dst)-1] == os.PathSeparator { 851 dst = filepath.Join(dst, filepath.Base(src)) 852 } 853 // Create the holding directory if necessary 854 if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { 855 return err 856 } 857 858 r, w := io.Pipe() 859 errC := promise.Go(func() error { 860 defer w.Close() 861 862 srcF, err := os.Open(src) 863 if err != nil { 864 return err 865 } 866 defer srcF.Close() 867 868 hdr, err := tar.FileInfoHeader(srcSt, "") 869 if err != nil { 870 return err 871 } 872 hdr.Name = filepath.Base(dst) 873 hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) 874 875 remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) 876 if err != nil { 877 return err 878 } 879 880 // only perform mapping if the file being copied isn't already owned by the 881 // uid or gid of the remapped root in the container 882 if remappedRootUID != hdr.Uid { 883 xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) 884 if err != nil { 885 return err 886 } 887 hdr.Uid = xUID 888 } 889 if remappedRootGID != hdr.Gid { 890 xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) 891 if err != nil { 892 return err 893 } 894 hdr.Gid = xGID 895 } 896 897 tw := tar.NewWriter(w) 898 defer tw.Close() 899 if err := tw.WriteHeader(hdr); err != nil { 900 return err 901 } 902 if _, err := io.Copy(tw, srcF); err != nil { 903 return err 904 } 905 return nil 906 }) 907 defer func() { 908 if er := <-errC; err != nil { 909 err = er 910 } 911 }() 912 913 return archiver.Untar(r, filepath.Dir(dst), nil) 914 } 915 916 // CopyFileWithTar emulates the behavior of the 'cp' command-line 917 // for a single file. It copies a regular file from path `src` to 918 // path `dst`, and preserves all its metadata. 919 // 920 // Destination handling is in an operating specific manner depending 921 // where the daemon is running. If `dst` ends with a trailing slash 922 // the final destination path will be `dst/base(src)` (Linux) or 923 // `dst\base(src)` (Windows). 924 func CopyFileWithTar(src, dst string) (err error) { 925 return defaultArchiver.CopyFileWithTar(src, dst) 926 } 927 928 // CmdStream executes a command, and returns its stdout as a stream. 929 // If the command fails to run or doesn't complete successfully, an error 930 // will be returned, including anything written on stderr. 931 func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { 932 if input != nil { 933 stdin, err := cmd.StdinPipe() 934 if err != nil { 935 return nil, err 936 } 937 // Write stdin if any 938 go func() { 939 io.Copy(stdin, input) 940 stdin.Close() 941 }() 942 } 943 stdout, err := cmd.StdoutPipe() 944 if err != nil { 945 return nil, err 946 } 947 stderr, err := cmd.StderrPipe() 948 if err != nil { 949 return nil, err 950 } 951 pipeR, pipeW := io.Pipe() 952 errChan := make(chan []byte) 953 // Collect stderr, we will use it in case of an error 954 go func() { 955 errText, e := ioutil.ReadAll(stderr) 956 if e != nil { 957 errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") 958 } 959 errChan <- errText 960 }() 961 // Copy stdout to the returned pipe 962 go func() { 963 _, err := io.Copy(pipeW, stdout) 964 if err != nil { 965 pipeW.CloseWithError(err) 966 } 967 errText := <-errChan 968 if err := cmd.Wait(); err != nil { 969 pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) 970 } else { 971 pipeW.Close() 972 } 973 }() 974 // Run the command and return the pipe 975 if err := cmd.Start(); err != nil { 976 return nil, err 977 } 978 return pipeR, nil 979 } 980 981 // NewTempArchive reads the content of src into a temporary file, and returns the contents 982 // of that file as an archive. The archive can only be read once - as soon as reading completes, 983 // the file will be deleted. 984 func NewTempArchive(src Archive, dir string) (*TempArchive, error) { 985 f, err := ioutil.TempFile(dir, "") 986 if err != nil { 987 return nil, err 988 } 989 if _, err := io.Copy(f, src); err != nil { 990 return nil, err 991 } 992 if _, err := f.Seek(0, 0); err != nil { 993 return nil, err 994 } 995 st, err := f.Stat() 996 if err != nil { 997 return nil, err 998 } 999 size := st.Size() 1000 return &TempArchive{File: f, Size: size}, nil 1001 } 1002 1003 // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, 1004 // the file will be deleted. 1005 type TempArchive struct { 1006 *os.File 1007 Size int64 // Pre-computed from Stat().Size() as a convenience 1008 read int64 1009 closed bool 1010 } 1011 1012 // Close closes the underlying file if it's still open, or does a no-op 1013 // to allow callers to try to close the TempArchive multiple times safely. 1014 func (archive *TempArchive) Close() error { 1015 if archive.closed { 1016 return nil 1017 } 1018 1019 archive.closed = true 1020 1021 return archive.File.Close() 1022 } 1023 1024 func (archive *TempArchive) Read(data []byte) (int, error) { 1025 n, err := archive.File.Read(data) 1026 archive.read += int64(n) 1027 if err != nil || archive.read == archive.Size { 1028 archive.Close() 1029 os.Remove(archive.File.Name()) 1030 } 1031 return n, err 1032 }