github.com/miqui/docker@v1.9.1/pkg/archive/archive.go (about)

     1  package archive
     2  
     3  import (
     4  	"archive/tar"
     5  	"bufio"
     6  	"bytes"
     7  	"compress/bzip2"
     8  	"compress/gzip"
     9  	"errors"
    10  	"fmt"
    11  	"io"
    12  	"io/ioutil"
    13  	"os"
    14  	"os/exec"
    15  	"path/filepath"
    16  	"runtime"
    17  	"strings"
    18  	"syscall"
    19  
    20  	"github.com/Sirupsen/logrus"
    21  	"github.com/docker/docker/pkg/fileutils"
    22  	"github.com/docker/docker/pkg/idtools"
    23  	"github.com/docker/docker/pkg/ioutils"
    24  	"github.com/docker/docker/pkg/pools"
    25  	"github.com/docker/docker/pkg/promise"
    26  	"github.com/docker/docker/pkg/system"
    27  )
    28  
    29  type (
    30  	// Archive is a type of io.ReadCloser which has two interfaces Read and Closer.
    31  	Archive io.ReadCloser
    32  	// Reader is a type of io.Reader.
    33  	Reader io.Reader
    34  	// Compression is the state represtents if compressed or not.
    35  	Compression int
    36  	// TarChownOptions wraps the chown options UID and GID.
    37  	TarChownOptions struct {
    38  		UID, GID int
    39  	}
    40  	// TarOptions wraps the tar options.
    41  	TarOptions struct {
    42  		IncludeFiles     []string
    43  		ExcludePatterns  []string
    44  		Compression      Compression
    45  		NoLchown         bool
    46  		UIDMaps          []idtools.IDMap
    47  		GIDMaps          []idtools.IDMap
    48  		ChownOpts        *TarChownOptions
    49  		IncludeSourceDir bool
    50  		// When unpacking, specifies whether overwriting a directory with a
    51  		// non-directory is allowed and vice versa.
    52  		NoOverwriteDirNonDir bool
    53  		// For each include when creating an archive, the included name will be
    54  		// replaced with the matching name from this map.
    55  		RebaseNames map[string]string
    56  	}
    57  
    58  	// Archiver allows the reuse of most utility functions of this package
    59  	// with a pluggable Untar function. Also, to facilitate the passing of
    60  	// specific id mappings for untar, an archiver can be created with maps
    61  	// which will then be passed to Untar operations
    62  	Archiver struct {
    63  		Untar   func(io.Reader, string, *TarOptions) error
    64  		UIDMaps []idtools.IDMap
    65  		GIDMaps []idtools.IDMap
    66  	}
    67  
    68  	// breakoutError is used to differentiate errors related to breaking out
    69  	// When testing archive breakout in the unit tests, this error is expected
    70  	// in order for the test to pass.
    71  	breakoutError error
    72  )
    73  
    74  var (
    75  	// ErrNotImplemented is the error message of function not implemented.
    76  	ErrNotImplemented = errors.New("Function not implemented")
    77  	defaultArchiver   = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil}
    78  )
    79  
    80  const (
    81  	// Uncompressed represents the uncompressed.
    82  	Uncompressed Compression = iota
    83  	// Bzip2 is bzip2 compression algorithm.
    84  	Bzip2
    85  	// Gzip is gzip compression algorithm.
    86  	Gzip
    87  	// Xz is xz compression algorithm.
    88  	Xz
    89  )
    90  
    91  // IsArchive checks if it is a archive by the header.
    92  func IsArchive(header []byte) bool {
    93  	compression := DetectCompression(header)
    94  	if compression != Uncompressed {
    95  		return true
    96  	}
    97  	r := tar.NewReader(bytes.NewBuffer(header))
    98  	_, err := r.Next()
    99  	return err == nil
   100  }
   101  
   102  // DetectCompression detects the compression algorithm of the source.
   103  func DetectCompression(source []byte) Compression {
   104  	for compression, m := range map[Compression][]byte{
   105  		Bzip2: {0x42, 0x5A, 0x68},
   106  		Gzip:  {0x1F, 0x8B, 0x08},
   107  		Xz:    {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
   108  	} {
   109  		if len(source) < len(m) {
   110  			logrus.Debugf("Len too short")
   111  			continue
   112  		}
   113  		if bytes.Compare(m, source[:len(m)]) == 0 {
   114  			return compression
   115  		}
   116  	}
   117  	return Uncompressed
   118  }
   119  
   120  func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
   121  	args := []string{"xz", "-d", "-c", "-q"}
   122  
   123  	return cmdStream(exec.Command(args[0], args[1:]...), archive)
   124  }
   125  
   126  // DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive.
   127  func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
   128  	p := pools.BufioReader32KPool
   129  	buf := p.Get(archive)
   130  	bs, err := buf.Peek(10)
   131  	if err != nil {
   132  		return nil, err
   133  	}
   134  
   135  	compression := DetectCompression(bs)
   136  	switch compression {
   137  	case Uncompressed:
   138  		readBufWrapper := p.NewReadCloserWrapper(buf, buf)
   139  		return readBufWrapper, nil
   140  	case Gzip:
   141  		gzReader, err := gzip.NewReader(buf)
   142  		if err != nil {
   143  			return nil, err
   144  		}
   145  		readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
   146  		return readBufWrapper, nil
   147  	case Bzip2:
   148  		bz2Reader := bzip2.NewReader(buf)
   149  		readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
   150  		return readBufWrapper, nil
   151  	case Xz:
   152  		xzReader, chdone, err := xzDecompress(buf)
   153  		if err != nil {
   154  			return nil, err
   155  		}
   156  		readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
   157  		return ioutils.NewReadCloserWrapper(readBufWrapper, func() error {
   158  			<-chdone
   159  			return readBufWrapper.Close()
   160  		}), nil
   161  	default:
   162  		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
   163  	}
   164  }
   165  
   166  // CompressStream compresses the dest with specified compression algorithm.
   167  func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
   168  	p := pools.BufioWriter32KPool
   169  	buf := p.Get(dest)
   170  	switch compression {
   171  	case Uncompressed:
   172  		writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
   173  		return writeBufWrapper, nil
   174  	case Gzip:
   175  		gzWriter := gzip.NewWriter(dest)
   176  		writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
   177  		return writeBufWrapper, nil
   178  	case Bzip2, Xz:
   179  		// archive/bzip2 does not support writing, and there is no xz support at all
   180  		// However, this is not a problem as docker only currently generates gzipped tars
   181  		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
   182  	default:
   183  		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
   184  	}
   185  }
   186  
   187  // Extension returns the extension of a file that uses the specified compression algorithm.
   188  func (compression *Compression) Extension() string {
   189  	switch *compression {
   190  	case Uncompressed:
   191  		return "tar"
   192  	case Bzip2:
   193  		return "tar.bz2"
   194  	case Gzip:
   195  		return "tar.gz"
   196  	case Xz:
   197  		return "tar.xz"
   198  	}
   199  	return ""
   200  }
   201  
   202  type tarAppender struct {
   203  	TarWriter *tar.Writer
   204  	Buffer    *bufio.Writer
   205  
   206  	// for hardlink mapping
   207  	SeenFiles map[uint64]string
   208  	UIDMaps   []idtools.IDMap
   209  	GIDMaps   []idtools.IDMap
   210  }
   211  
   212  // canonicalTarName provides a platform-independent and consistent posix-style
   213  //path for files and directories to be archived regardless of the platform.
   214  func canonicalTarName(name string, isDir bool) (string, error) {
   215  	name, err := CanonicalTarNameForPath(name)
   216  	if err != nil {
   217  		return "", err
   218  	}
   219  
   220  	// suffix with '/' for directories
   221  	if isDir && !strings.HasSuffix(name, "/") {
   222  		name += "/"
   223  	}
   224  	return name, nil
   225  }
   226  
   227  func (ta *tarAppender) addTarFile(path, name string) error {
   228  	fi, err := os.Lstat(path)
   229  	if err != nil {
   230  		return err
   231  	}
   232  
   233  	link := ""
   234  	if fi.Mode()&os.ModeSymlink != 0 {
   235  		if link, err = os.Readlink(path); err != nil {
   236  			return err
   237  		}
   238  	}
   239  
   240  	hdr, err := tar.FileInfoHeader(fi, link)
   241  	if err != nil {
   242  		return err
   243  	}
   244  	hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
   245  
   246  	name, err = canonicalTarName(name, fi.IsDir())
   247  	if err != nil {
   248  		return fmt.Errorf("tar: cannot canonicalize path: %v", err)
   249  	}
   250  	hdr.Name = name
   251  
   252  	nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
   253  	if err != nil {
   254  		return err
   255  	}
   256  
   257  	// if it's a regular file and has more than 1 link,
   258  	// it's hardlinked, so set the type flag accordingly
   259  	if fi.Mode().IsRegular() && nlink > 1 {
   260  		// a link should have a name that it links too
   261  		// and that linked name should be first in the tar archive
   262  		if oldpath, ok := ta.SeenFiles[inode]; ok {
   263  			hdr.Typeflag = tar.TypeLink
   264  			hdr.Linkname = oldpath
   265  			hdr.Size = 0 // This Must be here for the writer math to add up!
   266  		} else {
   267  			ta.SeenFiles[inode] = name
   268  		}
   269  	}
   270  
   271  	capability, _ := system.Lgetxattr(path, "security.capability")
   272  	if capability != nil {
   273  		hdr.Xattrs = make(map[string]string)
   274  		hdr.Xattrs["security.capability"] = string(capability)
   275  	}
   276  
   277  	//handle re-mapping container ID mappings back to host ID mappings before
   278  	//writing tar headers/files
   279  	if ta.UIDMaps != nil || ta.GIDMaps != nil {
   280  		uid, gid, err := getFileUIDGID(fi.Sys())
   281  		if err != nil {
   282  			return err
   283  		}
   284  		xUID, err := idtools.ToContainer(uid, ta.UIDMaps)
   285  		if err != nil {
   286  			return err
   287  		}
   288  		xGID, err := idtools.ToContainer(gid, ta.GIDMaps)
   289  		if err != nil {
   290  			return err
   291  		}
   292  		hdr.Uid = xUID
   293  		hdr.Gid = xGID
   294  	}
   295  
   296  	if err := ta.TarWriter.WriteHeader(hdr); err != nil {
   297  		return err
   298  	}
   299  
   300  	if hdr.Typeflag == tar.TypeReg {
   301  		file, err := os.Open(path)
   302  		if err != nil {
   303  			return err
   304  		}
   305  
   306  		ta.Buffer.Reset(ta.TarWriter)
   307  		defer ta.Buffer.Reset(nil)
   308  		_, err = io.Copy(ta.Buffer, file)
   309  		file.Close()
   310  		if err != nil {
   311  			return err
   312  		}
   313  		err = ta.Buffer.Flush()
   314  		if err != nil {
   315  			return err
   316  		}
   317  	}
   318  
   319  	return nil
   320  }
   321  
   322  func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error {
   323  	// hdr.Mode is in linux format, which we can use for sycalls,
   324  	// but for os.Foo() calls we need the mode converted to os.FileMode,
   325  	// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
   326  	hdrInfo := hdr.FileInfo()
   327  
   328  	switch hdr.Typeflag {
   329  	case tar.TypeDir:
   330  		// Create directory unless it exists as a directory already.
   331  		// In that case we just want to merge the two
   332  		if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
   333  			if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
   334  				return err
   335  			}
   336  		}
   337  
   338  	case tar.TypeReg, tar.TypeRegA:
   339  		// Source is regular file
   340  		file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
   341  		if err != nil {
   342  			return err
   343  		}
   344  		if _, err := io.Copy(file, reader); err != nil {
   345  			file.Close()
   346  			return err
   347  		}
   348  		file.Close()
   349  
   350  	case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
   351  		// Handle this is an OS-specific way
   352  		if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
   353  			return err
   354  		}
   355  
   356  	case tar.TypeLink:
   357  		targetPath := filepath.Join(extractDir, hdr.Linkname)
   358  		// check for hardlink breakout
   359  		if !strings.HasPrefix(targetPath, extractDir) {
   360  			return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
   361  		}
   362  		if err := os.Link(targetPath, path); err != nil {
   363  			return err
   364  		}
   365  
   366  	case tar.TypeSymlink:
   367  		// 	path 				-> hdr.Linkname = targetPath
   368  		// e.g. /extractDir/path/to/symlink 	-> ../2/file	= /extractDir/path/2/file
   369  		targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
   370  
   371  		// the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
   372  		// that symlink would first have to be created, which would be caught earlier, at this very check:
   373  		if !strings.HasPrefix(targetPath, extractDir) {
   374  			return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
   375  		}
   376  		if err := os.Symlink(hdr.Linkname, path); err != nil {
   377  			return err
   378  		}
   379  
   380  	case tar.TypeXGlobalHeader:
   381  		logrus.Debugf("PAX Global Extended Headers found and ignored")
   382  		return nil
   383  
   384  	default:
   385  		return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
   386  	}
   387  
   388  	// Lchown is not supported on Windows.
   389  	if Lchown && runtime.GOOS != "windows" {
   390  		if chownOpts == nil {
   391  			chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid}
   392  		}
   393  		if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
   394  			return err
   395  		}
   396  	}
   397  
   398  	for key, value := range hdr.Xattrs {
   399  		if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
   400  			return err
   401  		}
   402  	}
   403  
   404  	// There is no LChmod, so ignore mode for symlink. Also, this
   405  	// must happen after chown, as that can modify the file mode
   406  	if err := handleLChmod(hdr, path, hdrInfo); err != nil {
   407  		return err
   408  	}
   409  
   410  	// system.Chtimes doesn't support a NOFOLLOW flag atm
   411  	if hdr.Typeflag == tar.TypeLink {
   412  		if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
   413  			if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
   414  				return err
   415  			}
   416  		}
   417  	} else if hdr.Typeflag != tar.TypeSymlink {
   418  		if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
   419  			return err
   420  		}
   421  	} else {
   422  		ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
   423  		if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
   424  			return err
   425  		}
   426  	}
   427  	return nil
   428  }
   429  
   430  // Tar creates an archive from the directory at `path`, and returns it as a
   431  // stream of bytes.
   432  func Tar(path string, compression Compression) (io.ReadCloser, error) {
   433  	return TarWithOptions(path, &TarOptions{Compression: compression})
   434  }
   435  
   436  // TarWithOptions creates an archive from the directory at `path`, only including files whose relative
   437  // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
   438  func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
   439  
   440  	// Fix the source path to work with long path names. This is a no-op
   441  	// on platforms other than Windows.
   442  	srcPath = fixVolumePathPrefix(srcPath)
   443  
   444  	patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
   445  
   446  	if err != nil {
   447  		return nil, err
   448  	}
   449  
   450  	pipeReader, pipeWriter := io.Pipe()
   451  
   452  	compressWriter, err := CompressStream(pipeWriter, options.Compression)
   453  	if err != nil {
   454  		return nil, err
   455  	}
   456  
   457  	go func() {
   458  		ta := &tarAppender{
   459  			TarWriter: tar.NewWriter(compressWriter),
   460  			Buffer:    pools.BufioWriter32KPool.Get(nil),
   461  			SeenFiles: make(map[uint64]string),
   462  			UIDMaps:   options.UIDMaps,
   463  			GIDMaps:   options.GIDMaps,
   464  		}
   465  
   466  		defer func() {
   467  			// Make sure to check the error on Close.
   468  			if err := ta.TarWriter.Close(); err != nil {
   469  				logrus.Debugf("Can't close tar writer: %s", err)
   470  			}
   471  			if err := compressWriter.Close(); err != nil {
   472  				logrus.Debugf("Can't close compress writer: %s", err)
   473  			}
   474  			if err := pipeWriter.Close(); err != nil {
   475  				logrus.Debugf("Can't close pipe writer: %s", err)
   476  			}
   477  		}()
   478  
   479  		// this buffer is needed for the duration of this piped stream
   480  		defer pools.BufioWriter32KPool.Put(ta.Buffer)
   481  
   482  		// In general we log errors here but ignore them because
   483  		// during e.g. a diff operation the container can continue
   484  		// mutating the filesystem and we can see transient errors
   485  		// from this
   486  
   487  		stat, err := os.Lstat(srcPath)
   488  		if err != nil {
   489  			return
   490  		}
   491  
   492  		if !stat.IsDir() {
   493  			// We can't later join a non-dir with any includes because the
   494  			// 'walk' will error if "file/." is stat-ed and "file" is not a
   495  			// directory. So, we must split the source path and use the
   496  			// basename as the include.
   497  			if len(options.IncludeFiles) > 0 {
   498  				logrus.Warn("Tar: Can't archive a file with includes")
   499  			}
   500  
   501  			dir, base := SplitPathDirEntry(srcPath)
   502  			srcPath = dir
   503  			options.IncludeFiles = []string{base}
   504  		}
   505  
   506  		if len(options.IncludeFiles) == 0 {
   507  			options.IncludeFiles = []string{"."}
   508  		}
   509  
   510  		seen := make(map[string]bool)
   511  
   512  		for _, include := range options.IncludeFiles {
   513  			rebaseName := options.RebaseNames[include]
   514  
   515  			walkRoot := getWalkRoot(srcPath, include)
   516  			filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
   517  				if err != nil {
   518  					logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
   519  					return nil
   520  				}
   521  
   522  				relFilePath, err := filepath.Rel(srcPath, filePath)
   523  				if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
   524  					// Error getting relative path OR we are looking
   525  					// at the source directory path. Skip in both situations.
   526  					return nil
   527  				}
   528  
   529  				if options.IncludeSourceDir && include == "." && relFilePath != "." {
   530  					relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
   531  				}
   532  
   533  				skip := false
   534  
   535  				// If "include" is an exact match for the current file
   536  				// then even if there's an "excludePatterns" pattern that
   537  				// matches it, don't skip it. IOW, assume an explicit 'include'
   538  				// is asking for that file no matter what - which is true
   539  				// for some files, like .dockerignore and Dockerfile (sometimes)
   540  				if include != relFilePath {
   541  					skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
   542  					if err != nil {
   543  						logrus.Debugf("Error matching %s: %v", relFilePath, err)
   544  						return err
   545  					}
   546  				}
   547  
   548  				if skip {
   549  					if !exceptions && f.IsDir() {
   550  						return filepath.SkipDir
   551  					}
   552  					return nil
   553  				}
   554  
   555  				if seen[relFilePath] {
   556  					return nil
   557  				}
   558  				seen[relFilePath] = true
   559  
   560  				// Rename the base resource.
   561  				if rebaseName != "" {
   562  					var replacement string
   563  					if rebaseName != string(filepath.Separator) {
   564  						// Special case the root directory to replace with an
   565  						// empty string instead so that we don't end up with
   566  						// double slashes in the paths.
   567  						replacement = rebaseName
   568  					}
   569  
   570  					relFilePath = strings.Replace(relFilePath, include, replacement, 1)
   571  				}
   572  
   573  				if err := ta.addTarFile(filePath, relFilePath); err != nil {
   574  					logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
   575  				}
   576  				return nil
   577  			})
   578  		}
   579  	}()
   580  
   581  	return pipeReader, nil
   582  }
   583  
   584  // Unpack unpacks the decompressedArchive to dest with options.
   585  func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
   586  	tr := tar.NewReader(decompressedArchive)
   587  	trBuf := pools.BufioReader32KPool.Get(nil)
   588  	defer pools.BufioReader32KPool.Put(trBuf)
   589  
   590  	var dirs []*tar.Header
   591  	remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
   592  	if err != nil {
   593  		return err
   594  	}
   595  
   596  	// Iterate through the files in the archive.
   597  loop:
   598  	for {
   599  		hdr, err := tr.Next()
   600  		if err == io.EOF {
   601  			// end of tar archive
   602  			break
   603  		}
   604  		if err != nil {
   605  			return err
   606  		}
   607  
   608  		// Normalize name, for safety and for a simple is-root check
   609  		// This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
   610  		// This keeps "..\" as-is, but normalizes "\..\" to "\".
   611  		hdr.Name = filepath.Clean(hdr.Name)
   612  
   613  		for _, exclude := range options.ExcludePatterns {
   614  			if strings.HasPrefix(hdr.Name, exclude) {
   615  				continue loop
   616  			}
   617  		}
   618  
   619  		// After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
   620  		// the filepath format for the OS on which the daemon is running. Hence
   621  		// the check for a slash-suffix MUST be done in an OS-agnostic way.
   622  		if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
   623  			// Not the root directory, ensure that the parent directory exists
   624  			parent := filepath.Dir(hdr.Name)
   625  			parentPath := filepath.Join(dest, parent)
   626  			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
   627  				err = system.MkdirAll(parentPath, 0777)
   628  				if err != nil {
   629  					return err
   630  				}
   631  			}
   632  		}
   633  
   634  		path := filepath.Join(dest, hdr.Name)
   635  		rel, err := filepath.Rel(dest, path)
   636  		if err != nil {
   637  			return err
   638  		}
   639  		if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
   640  			return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
   641  		}
   642  
   643  		// If path exits we almost always just want to remove and replace it
   644  		// The only exception is when it is a directory *and* the file from
   645  		// the layer is also a directory. Then we want to merge them (i.e.
   646  		// just apply the metadata from the layer).
   647  		if fi, err := os.Lstat(path); err == nil {
   648  			if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
   649  				// If NoOverwriteDirNonDir is true then we cannot replace
   650  				// an existing directory with a non-directory from the archive.
   651  				return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
   652  			}
   653  
   654  			if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
   655  				// If NoOverwriteDirNonDir is true then we cannot replace
   656  				// an existing non-directory with a directory from the archive.
   657  				return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
   658  			}
   659  
   660  			if fi.IsDir() && hdr.Name == "." {
   661  				continue
   662  			}
   663  
   664  			if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
   665  				if err := os.RemoveAll(path); err != nil {
   666  					return err
   667  				}
   668  			}
   669  		}
   670  		trBuf.Reset(tr)
   671  
   672  		// if the options contain a uid & gid maps, convert header uid/gid
   673  		// entries using the maps such that lchown sets the proper mapped
   674  		// uid/gid after writing the file. We only perform this mapping if
   675  		// the file isn't already owned by the remapped root UID or GID, as
   676  		// that specific uid/gid has no mapping from container -> host, and
   677  		// those files already have the proper ownership for inside the
   678  		// container.
   679  		if hdr.Uid != remappedRootUID {
   680  			xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps)
   681  			if err != nil {
   682  				return err
   683  			}
   684  			hdr.Uid = xUID
   685  		}
   686  		if hdr.Gid != remappedRootGID {
   687  			xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps)
   688  			if err != nil {
   689  				return err
   690  			}
   691  			hdr.Gid = xGID
   692  		}
   693  
   694  		if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
   695  			return err
   696  		}
   697  
   698  		// Directory mtimes must be handled at the end to avoid further
   699  		// file creation in them to modify the directory mtime
   700  		if hdr.Typeflag == tar.TypeDir {
   701  			dirs = append(dirs, hdr)
   702  		}
   703  	}
   704  
   705  	for _, hdr := range dirs {
   706  		path := filepath.Join(dest, hdr.Name)
   707  
   708  		if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
   709  			return err
   710  		}
   711  	}
   712  	return nil
   713  }
   714  
   715  // Untar reads a stream of bytes from `archive`, parses it as a tar archive,
   716  // and unpacks it into the directory at `dest`.
   717  // The archive may be compressed with one of the following algorithms:
   718  //  identity (uncompressed), gzip, bzip2, xz.
   719  // FIXME: specify behavior when target path exists vs. doesn't exist.
   720  func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
   721  	return untarHandler(tarArchive, dest, options, true)
   722  }
   723  
   724  // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
   725  // and unpacks it into the directory at `dest`.
   726  // The archive must be an uncompressed stream.
   727  func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
   728  	return untarHandler(tarArchive, dest, options, false)
   729  }
   730  
   731  // Handler for teasing out the automatic decompression
   732  func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
   733  	if tarArchive == nil {
   734  		return fmt.Errorf("Empty archive")
   735  	}
   736  	dest = filepath.Clean(dest)
   737  	if options == nil {
   738  		options = &TarOptions{}
   739  	}
   740  	if options.ExcludePatterns == nil {
   741  		options.ExcludePatterns = []string{}
   742  	}
   743  
   744  	r := tarArchive
   745  	if decompress {
   746  		decompressedArchive, err := DecompressStream(tarArchive)
   747  		if err != nil {
   748  			return err
   749  		}
   750  		defer decompressedArchive.Close()
   751  		r = decompressedArchive
   752  	}
   753  
   754  	return Unpack(r, dest, options)
   755  }
   756  
   757  // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
   758  // If either Tar or Untar fails, TarUntar aborts and returns the error.
   759  func (archiver *Archiver) TarUntar(src, dst string) error {
   760  	logrus.Debugf("TarUntar(%s %s)", src, dst)
   761  	archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
   762  	if err != nil {
   763  		return err
   764  	}
   765  	defer archive.Close()
   766  
   767  	var options *TarOptions
   768  	if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
   769  		options = &TarOptions{
   770  			UIDMaps: archiver.UIDMaps,
   771  			GIDMaps: archiver.GIDMaps,
   772  		}
   773  	}
   774  	return archiver.Untar(archive, dst, options)
   775  }
   776  
   777  // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
   778  // If either Tar or Untar fails, TarUntar aborts and returns the error.
   779  func TarUntar(src, dst string) error {
   780  	return defaultArchiver.TarUntar(src, dst)
   781  }
   782  
   783  // UntarPath untar a file from path to a destination, src is the source tar file path.
   784  func (archiver *Archiver) UntarPath(src, dst string) error {
   785  	archive, err := os.Open(src)
   786  	if err != nil {
   787  		return err
   788  	}
   789  	defer archive.Close()
   790  	var options *TarOptions
   791  	if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
   792  		options = &TarOptions{
   793  			UIDMaps: archiver.UIDMaps,
   794  			GIDMaps: archiver.GIDMaps,
   795  		}
   796  	}
   797  	if err := archiver.Untar(archive, dst, options); err != nil {
   798  		return err
   799  	}
   800  	return nil
   801  }
   802  
   803  // UntarPath is a convenience function which looks for an archive
   804  // at filesystem path `src`, and unpacks it at `dst`.
   805  func UntarPath(src, dst string) error {
   806  	return defaultArchiver.UntarPath(src, dst)
   807  }
   808  
   809  // CopyWithTar creates a tar archive of filesystem path `src`, and
   810  // unpacks it at filesystem path `dst`.
   811  // The archive is streamed directly with fixed buffering and no
   812  // intermediary disk IO.
   813  func (archiver *Archiver) CopyWithTar(src, dst string) error {
   814  	srcSt, err := os.Stat(src)
   815  	if err != nil {
   816  		return err
   817  	}
   818  	if !srcSt.IsDir() {
   819  		return archiver.CopyFileWithTar(src, dst)
   820  	}
   821  	// Create dst, copy src's content into it
   822  	logrus.Debugf("Creating dest directory: %s", dst)
   823  	if err := system.MkdirAll(dst, 0755); err != nil {
   824  		return err
   825  	}
   826  	logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
   827  	return archiver.TarUntar(src, dst)
   828  }
   829  
   830  // CopyWithTar creates a tar archive of filesystem path `src`, and
   831  // unpacks it at filesystem path `dst`.
   832  // The archive is streamed directly with fixed buffering and no
   833  // intermediary disk IO.
   834  func CopyWithTar(src, dst string) error {
   835  	return defaultArchiver.CopyWithTar(src, dst)
   836  }
   837  
   838  // CopyFileWithTar emulates the behavior of the 'cp' command-line
   839  // for a single file. It copies a regular file from path `src` to
   840  // path `dst`, and preserves all its metadata.
   841  func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
   842  	logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
   843  	srcSt, err := os.Stat(src)
   844  	if err != nil {
   845  		return err
   846  	}
   847  
   848  	if srcSt.IsDir() {
   849  		return fmt.Errorf("Can't copy a directory")
   850  	}
   851  
   852  	// Clean up the trailing slash. This must be done in an operating
   853  	// system specific manner.
   854  	if dst[len(dst)-1] == os.PathSeparator {
   855  		dst = filepath.Join(dst, filepath.Base(src))
   856  	}
   857  	// Create the holding directory if necessary
   858  	if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
   859  		return err
   860  	}
   861  
   862  	r, w := io.Pipe()
   863  	errC := promise.Go(func() error {
   864  		defer w.Close()
   865  
   866  		srcF, err := os.Open(src)
   867  		if err != nil {
   868  			return err
   869  		}
   870  		defer srcF.Close()
   871  
   872  		hdr, err := tar.FileInfoHeader(srcSt, "")
   873  		if err != nil {
   874  			return err
   875  		}
   876  		hdr.Name = filepath.Base(dst)
   877  		hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
   878  
   879  		remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
   880  		if err != nil {
   881  			return err
   882  		}
   883  
   884  		// only perform mapping if the file being copied isn't already owned by the
   885  		// uid or gid of the remapped root in the container
   886  		if remappedRootUID != hdr.Uid {
   887  			xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps)
   888  			if err != nil {
   889  				return err
   890  			}
   891  			hdr.Uid = xUID
   892  		}
   893  		if remappedRootGID != hdr.Gid {
   894  			xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps)
   895  			if err != nil {
   896  				return err
   897  			}
   898  			hdr.Gid = xGID
   899  		}
   900  
   901  		tw := tar.NewWriter(w)
   902  		defer tw.Close()
   903  		if err := tw.WriteHeader(hdr); err != nil {
   904  			return err
   905  		}
   906  		if _, err := io.Copy(tw, srcF); err != nil {
   907  			return err
   908  		}
   909  		return nil
   910  	})
   911  	defer func() {
   912  		if er := <-errC; err != nil {
   913  			err = er
   914  		}
   915  	}()
   916  
   917  	err = archiver.Untar(r, filepath.Dir(dst), nil)
   918  	if err != nil {
   919  		r.CloseWithError(err)
   920  	}
   921  	return err
   922  }
   923  
   924  // CopyFileWithTar emulates the behavior of the 'cp' command-line
   925  // for a single file. It copies a regular file from path `src` to
   926  // path `dst`, and preserves all its metadata.
   927  //
   928  // Destination handling is in an operating specific manner depending
   929  // where the daemon is running. If `dst` ends with a trailing slash
   930  // the final destination path will be `dst/base(src)`  (Linux) or
   931  // `dst\base(src)` (Windows).
   932  func CopyFileWithTar(src, dst string) (err error) {
   933  	return defaultArchiver.CopyFileWithTar(src, dst)
   934  }
   935  
   936  // cmdStream executes a command, and returns its stdout as a stream.
   937  // If the command fails to run or doesn't complete successfully, an error
   938  // will be returned, including anything written on stderr.
   939  func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) {
   940  	chdone := make(chan struct{})
   941  	cmd.Stdin = input
   942  	pipeR, pipeW := io.Pipe()
   943  	cmd.Stdout = pipeW
   944  	var errBuf bytes.Buffer
   945  	cmd.Stderr = &errBuf
   946  
   947  	// Run the command and return the pipe
   948  	if err := cmd.Start(); err != nil {
   949  		return nil, nil, err
   950  	}
   951  
   952  	// Copy stdout to the returned pipe
   953  	go func() {
   954  		if err := cmd.Wait(); err != nil {
   955  			pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
   956  		} else {
   957  			pipeW.Close()
   958  		}
   959  		close(chdone)
   960  	}()
   961  
   962  	return pipeR, chdone, nil
   963  }
   964  
   965  // NewTempArchive reads the content of src into a temporary file, and returns the contents
   966  // of that file as an archive. The archive can only be read once - as soon as reading completes,
   967  // the file will be deleted.
   968  func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
   969  	f, err := ioutil.TempFile(dir, "")
   970  	if err != nil {
   971  		return nil, err
   972  	}
   973  	if _, err := io.Copy(f, src); err != nil {
   974  		return nil, err
   975  	}
   976  	if _, err := f.Seek(0, 0); err != nil {
   977  		return nil, err
   978  	}
   979  	st, err := f.Stat()
   980  	if err != nil {
   981  		return nil, err
   982  	}
   983  	size := st.Size()
   984  	return &TempArchive{File: f, Size: size}, nil
   985  }
   986  
   987  // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
   988  // the file will be deleted.
   989  type TempArchive struct {
   990  	*os.File
   991  	Size   int64 // Pre-computed from Stat().Size() as a convenience
   992  	read   int64
   993  	closed bool
   994  }
   995  
   996  // Close closes the underlying file if it's still open, or does a no-op
   997  // to allow callers to try to close the TempArchive multiple times safely.
   998  func (archive *TempArchive) Close() error {
   999  	if archive.closed {
  1000  		return nil
  1001  	}
  1002  
  1003  	archive.closed = true
  1004  
  1005  	return archive.File.Close()
  1006  }
  1007  
  1008  func (archive *TempArchive) Read(data []byte) (int, error) {
  1009  	n, err := archive.File.Read(data)
  1010  	archive.read += int64(n)
  1011  	if err != nil || archive.read == archive.Size {
  1012  		archive.Close()
  1013  		os.Remove(archive.File.Name())
  1014  	}
  1015  	return n, err
  1016  }