github.com/guilhermebr/docker@v1.4.2-0.20150428121140-67da055cebca/pkg/archive/archive.go (about)

     1  package archive
     2  
     3  import (
     4  	"bufio"
     5  	"bytes"
     6  	"compress/bzip2"
     7  	"compress/gzip"
     8  	"errors"
     9  	"fmt"
    10  	"io"
    11  	"io/ioutil"
    12  	"os"
    13  	"os/exec"
    14  	"path"
    15  	"path/filepath"
    16  	"strings"
    17  	"syscall"
    18  
    19  	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
    20  
    21  	"github.com/Sirupsen/logrus"
    22  	"github.com/docker/docker/pkg/fileutils"
    23  	"github.com/docker/docker/pkg/pools"
    24  	"github.com/docker/docker/pkg/promise"
    25  	"github.com/docker/docker/pkg/system"
    26  )
    27  
    28  type (
    29  	Archive       io.ReadCloser
    30  	ArchiveReader io.Reader
    31  	Compression   int
    32  	TarOptions    struct {
    33  		IncludeFiles    []string
    34  		ExcludePatterns []string
    35  		Compression     Compression
    36  		NoLchown        bool
    37  		Name            string
    38  	}
    39  
    40  	// Archiver allows the reuse of most utility functions of this package
    41  	// with a pluggable Untar function.
    42  	Archiver struct {
    43  		Untar func(io.Reader, string, *TarOptions) error
    44  	}
    45  
    46  	// breakoutError is used to differentiate errors related to breaking out
    47  	// When testing archive breakout in the unit tests, this error is expected
    48  	// in order for the test to pass.
    49  	breakoutError error
    50  )
    51  
    52  var (
    53  	ErrNotImplemented = errors.New("Function not implemented")
    54  	defaultArchiver   = &Archiver{Untar}
    55  )
    56  
    57  const (
    58  	Uncompressed Compression = iota
    59  	Bzip2
    60  	Gzip
    61  	Xz
    62  )
    63  
    64  func IsArchive(header []byte) bool {
    65  	compression := DetectCompression(header)
    66  	if compression != Uncompressed {
    67  		return true
    68  	}
    69  	r := tar.NewReader(bytes.NewBuffer(header))
    70  	_, err := r.Next()
    71  	return err == nil
    72  }
    73  
    74  func DetectCompression(source []byte) Compression {
    75  	for compression, m := range map[Compression][]byte{
    76  		Bzip2: {0x42, 0x5A, 0x68},
    77  		Gzip:  {0x1F, 0x8B, 0x08},
    78  		Xz:    {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
    79  	} {
    80  		if len(source) < len(m) {
    81  			logrus.Debugf("Len too short")
    82  			continue
    83  		}
    84  		if bytes.Compare(m, source[:len(m)]) == 0 {
    85  			return compression
    86  		}
    87  	}
    88  	return Uncompressed
    89  }
    90  
    91  func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
    92  	args := []string{"xz", "-d", "-c", "-q"}
    93  
    94  	return CmdStream(exec.Command(args[0], args[1:]...), archive)
    95  }
    96  
    97  func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
    98  	p := pools.BufioReader32KPool
    99  	buf := p.Get(archive)
   100  	bs, err := buf.Peek(10)
   101  	if err != nil {
   102  		return nil, err
   103  	}
   104  
   105  	compression := DetectCompression(bs)
   106  	switch compression {
   107  	case Uncompressed:
   108  		readBufWrapper := p.NewReadCloserWrapper(buf, buf)
   109  		return readBufWrapper, nil
   110  	case Gzip:
   111  		gzReader, err := gzip.NewReader(buf)
   112  		if err != nil {
   113  			return nil, err
   114  		}
   115  		readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
   116  		return readBufWrapper, nil
   117  	case Bzip2:
   118  		bz2Reader := bzip2.NewReader(buf)
   119  		readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
   120  		return readBufWrapper, nil
   121  	case Xz:
   122  		xzReader, err := xzDecompress(buf)
   123  		if err != nil {
   124  			return nil, err
   125  		}
   126  		readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
   127  		return readBufWrapper, nil
   128  	default:
   129  		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
   130  	}
   131  }
   132  
   133  func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
   134  	p := pools.BufioWriter32KPool
   135  	buf := p.Get(dest)
   136  	switch compression {
   137  	case Uncompressed:
   138  		writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
   139  		return writeBufWrapper, nil
   140  	case Gzip:
   141  		gzWriter := gzip.NewWriter(dest)
   142  		writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
   143  		return writeBufWrapper, nil
   144  	case Bzip2, Xz:
   145  		// archive/bzip2 does not support writing, and there is no xz support at all
   146  		// However, this is not a problem as docker only currently generates gzipped tars
   147  		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
   148  	default:
   149  		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
   150  	}
   151  }
   152  
   153  func (compression *Compression) Extension() string {
   154  	switch *compression {
   155  	case Uncompressed:
   156  		return "tar"
   157  	case Bzip2:
   158  		return "tar.bz2"
   159  	case Gzip:
   160  		return "tar.gz"
   161  	case Xz:
   162  		return "tar.xz"
   163  	}
   164  	return ""
   165  }
   166  
   167  type tarAppender struct {
   168  	TarWriter *tar.Writer
   169  	Buffer    *bufio.Writer
   170  
   171  	// for hardlink mapping
   172  	SeenFiles map[uint64]string
   173  }
   174  
   175  // canonicalTarName provides a platform-independent and consistent posix-style
   176  //path for files and directories to be archived regardless of the platform.
   177  func canonicalTarName(name string, isDir bool) (string, error) {
   178  	name, err := CanonicalTarNameForPath(name)
   179  	if err != nil {
   180  		return "", err
   181  	}
   182  
   183  	// suffix with '/' for directories
   184  	if isDir && !strings.HasSuffix(name, "/") {
   185  		name += "/"
   186  	}
   187  	return name, nil
   188  }
   189  
   190  func (ta *tarAppender) addTarFile(path, name string) error {
   191  	fi, err := os.Lstat(path)
   192  	if err != nil {
   193  		return err
   194  	}
   195  
   196  	link := ""
   197  	if fi.Mode()&os.ModeSymlink != 0 {
   198  		if link, err = os.Readlink(path); err != nil {
   199  			return err
   200  		}
   201  	}
   202  
   203  	hdr, err := tar.FileInfoHeader(fi, link)
   204  	if err != nil {
   205  		return err
   206  	}
   207  	hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
   208  
   209  	name, err = canonicalTarName(name, fi.IsDir())
   210  	if err != nil {
   211  		return fmt.Errorf("tar: cannot canonicalize path: %v", err)
   212  	}
   213  	hdr.Name = name
   214  
   215  	nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
   216  	if err != nil {
   217  		return err
   218  	}
   219  
   220  	// if it's a regular file and has more than 1 link,
   221  	// it's hardlinked, so set the type flag accordingly
   222  	if fi.Mode().IsRegular() && nlink > 1 {
   223  		// a link should have a name that it links too
   224  		// and that linked name should be first in the tar archive
   225  		if oldpath, ok := ta.SeenFiles[inode]; ok {
   226  			hdr.Typeflag = tar.TypeLink
   227  			hdr.Linkname = oldpath
   228  			hdr.Size = 0 // This Must be here for the writer math to add up!
   229  		} else {
   230  			ta.SeenFiles[inode] = name
   231  		}
   232  	}
   233  
   234  	capability, _ := system.Lgetxattr(path, "security.capability")
   235  	if capability != nil {
   236  		hdr.Xattrs = make(map[string]string)
   237  		hdr.Xattrs["security.capability"] = string(capability)
   238  	}
   239  
   240  	if err := ta.TarWriter.WriteHeader(hdr); err != nil {
   241  		return err
   242  	}
   243  
   244  	if hdr.Typeflag == tar.TypeReg {
   245  		file, err := os.Open(path)
   246  		if err != nil {
   247  			return err
   248  		}
   249  
   250  		ta.Buffer.Reset(ta.TarWriter)
   251  		defer ta.Buffer.Reset(nil)
   252  		_, err = io.Copy(ta.Buffer, file)
   253  		file.Close()
   254  		if err != nil {
   255  			return err
   256  		}
   257  		err = ta.Buffer.Flush()
   258  		if err != nil {
   259  			return err
   260  		}
   261  	}
   262  
   263  	return nil
   264  }
   265  
   266  func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error {
   267  	// hdr.Mode is in linux format, which we can use for sycalls,
   268  	// but for os.Foo() calls we need the mode converted to os.FileMode,
   269  	// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
   270  	hdrInfo := hdr.FileInfo()
   271  
   272  	switch hdr.Typeflag {
   273  	case tar.TypeDir:
   274  		// Create directory unless it exists as a directory already.
   275  		// In that case we just want to merge the two
   276  		if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
   277  			if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
   278  				return err
   279  			}
   280  		}
   281  
   282  	case tar.TypeReg, tar.TypeRegA:
   283  		// Source is regular file
   284  		file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
   285  		if err != nil {
   286  			return err
   287  		}
   288  		if _, err := io.Copy(file, reader); err != nil {
   289  			file.Close()
   290  			return err
   291  		}
   292  		file.Close()
   293  
   294  	case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
   295  		mode := uint32(hdr.Mode & 07777)
   296  		switch hdr.Typeflag {
   297  		case tar.TypeBlock:
   298  			mode |= syscall.S_IFBLK
   299  		case tar.TypeChar:
   300  			mode |= syscall.S_IFCHR
   301  		case tar.TypeFifo:
   302  			mode |= syscall.S_IFIFO
   303  		}
   304  
   305  		if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
   306  			return err
   307  		}
   308  
   309  	case tar.TypeLink:
   310  		targetPath := filepath.Join(extractDir, hdr.Linkname)
   311  		// check for hardlink breakout
   312  		if !strings.HasPrefix(targetPath, extractDir) {
   313  			return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
   314  		}
   315  		if err := os.Link(targetPath, path); err != nil {
   316  			return err
   317  		}
   318  
   319  	case tar.TypeSymlink:
   320  		// 	path 				-> hdr.Linkname = targetPath
   321  		// e.g. /extractDir/path/to/symlink 	-> ../2/file	= /extractDir/path/2/file
   322  		targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
   323  
   324  		// the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
   325  		// that symlink would first have to be created, which would be caught earlier, at this very check:
   326  		if !strings.HasPrefix(targetPath, extractDir) {
   327  			return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
   328  		}
   329  		if err := os.Symlink(hdr.Linkname, path); err != nil {
   330  			return err
   331  		}
   332  
   333  	case tar.TypeXGlobalHeader:
   334  		logrus.Debugf("PAX Global Extended Headers found and ignored")
   335  		return nil
   336  
   337  	default:
   338  		return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
   339  	}
   340  
   341  	if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
   342  		return err
   343  	}
   344  
   345  	for key, value := range hdr.Xattrs {
   346  		if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
   347  			return err
   348  		}
   349  	}
   350  
   351  	// There is no LChmod, so ignore mode for symlink. Also, this
   352  	// must happen after chown, as that can modify the file mode
   353  	if hdr.Typeflag == tar.TypeLink {
   354  		if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
   355  			if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
   356  				return err
   357  			}
   358  		}
   359  	} else if hdr.Typeflag != tar.TypeSymlink {
   360  		if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
   361  			return err
   362  		}
   363  	}
   364  
   365  	ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
   366  	// syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
   367  	if hdr.Typeflag == tar.TypeLink {
   368  		if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
   369  			if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
   370  				return err
   371  			}
   372  		}
   373  	} else if hdr.Typeflag != tar.TypeSymlink {
   374  		if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
   375  			return err
   376  		}
   377  	} else {
   378  		if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
   379  			return err
   380  		}
   381  	}
   382  	return nil
   383  }
   384  
   385  // Tar creates an archive from the directory at `path`, and returns it as a
   386  // stream of bytes.
   387  func Tar(path string, compression Compression) (io.ReadCloser, error) {
   388  	return TarWithOptions(path, &TarOptions{Compression: compression})
   389  }
   390  
   391  // TarWithOptions creates an archive from the directory at `path`, only including files whose relative
   392  // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
   393  func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
   394  	pipeReader, pipeWriter := io.Pipe()
   395  
   396  	compressWriter, err := CompressStream(pipeWriter, options.Compression)
   397  	if err != nil {
   398  		return nil, err
   399  	}
   400  
   401  	go func() {
   402  		ta := &tarAppender{
   403  			TarWriter: tar.NewWriter(compressWriter),
   404  			Buffer:    pools.BufioWriter32KPool.Get(nil),
   405  			SeenFiles: make(map[uint64]string),
   406  		}
   407  		// this buffer is needed for the duration of this piped stream
   408  		defer pools.BufioWriter32KPool.Put(ta.Buffer)
   409  
   410  		// In general we log errors here but ignore them because
   411  		// during e.g. a diff operation the container can continue
   412  		// mutating the filesystem and we can see transient errors
   413  		// from this
   414  
   415  		if options.IncludeFiles == nil {
   416  			options.IncludeFiles = []string{"."}
   417  		}
   418  
   419  		seen := make(map[string]bool)
   420  
   421  		var renamedRelFilePath string // For when tar.Options.Name is set
   422  		for _, include := range options.IncludeFiles {
   423  			filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
   424  				if err != nil {
   425  					logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
   426  					return nil
   427  				}
   428  
   429  				relFilePath, err := filepath.Rel(srcPath, filePath)
   430  				if err != nil || (relFilePath == "." && f.IsDir()) {
   431  					// Error getting relative path OR we are looking
   432  					// at the root path. Skip in both situations.
   433  					return nil
   434  				}
   435  
   436  				skip := false
   437  
   438  				// If "include" is an exact match for the current file
   439  				// then even if there's an "excludePatterns" pattern that
   440  				// matches it, don't skip it. IOW, assume an explicit 'include'
   441  				// is asking for that file no matter what - which is true
   442  				// for some files, like .dockerignore and Dockerfile (sometimes)
   443  				if include != relFilePath {
   444  					skip, err = fileutils.Matches(relFilePath, options.ExcludePatterns)
   445  					if err != nil {
   446  						logrus.Debugf("Error matching %s", relFilePath, err)
   447  						return err
   448  					}
   449  				}
   450  
   451  				if skip {
   452  					if f.IsDir() {
   453  						return filepath.SkipDir
   454  					}
   455  					return nil
   456  				}
   457  
   458  				if seen[relFilePath] {
   459  					return nil
   460  				}
   461  				seen[relFilePath] = true
   462  
   463  				// Rename the base resource
   464  				if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
   465  					renamedRelFilePath = relFilePath
   466  				}
   467  				// Set this to make sure the items underneath also get renamed
   468  				if options.Name != "" {
   469  					relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1)
   470  				}
   471  
   472  				if err := ta.addTarFile(filePath, relFilePath); err != nil {
   473  					logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
   474  				}
   475  				return nil
   476  			})
   477  		}
   478  
   479  		// Make sure to check the error on Close.
   480  		if err := ta.TarWriter.Close(); err != nil {
   481  			logrus.Debugf("Can't close tar writer: %s", err)
   482  		}
   483  		if err := compressWriter.Close(); err != nil {
   484  			logrus.Debugf("Can't close compress writer: %s", err)
   485  		}
   486  		if err := pipeWriter.Close(); err != nil {
   487  			logrus.Debugf("Can't close pipe writer: %s", err)
   488  		}
   489  	}()
   490  
   491  	return pipeReader, nil
   492  }
   493  
   494  func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
   495  	tr := tar.NewReader(decompressedArchive)
   496  	trBuf := pools.BufioReader32KPool.Get(nil)
   497  	defer pools.BufioReader32KPool.Put(trBuf)
   498  
   499  	var dirs []*tar.Header
   500  
   501  	// Iterate through the files in the archive.
   502  loop:
   503  	for {
   504  		hdr, err := tr.Next()
   505  		if err == io.EOF {
   506  			// end of tar archive
   507  			break
   508  		}
   509  		if err != nil {
   510  			return err
   511  		}
   512  
   513  		// Normalize name, for safety and for a simple is-root check
   514  		// This keeps "../" as-is, but normalizes "/../" to "/"
   515  		hdr.Name = filepath.Clean(hdr.Name)
   516  
   517  		for _, exclude := range options.ExcludePatterns {
   518  			if strings.HasPrefix(hdr.Name, exclude) {
   519  				continue loop
   520  			}
   521  		}
   522  
   523  		if !strings.HasSuffix(hdr.Name, "/") {
   524  			// Not the root directory, ensure that the parent directory exists
   525  			parent := filepath.Dir(hdr.Name)
   526  			parentPath := filepath.Join(dest, parent)
   527  			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
   528  				err = os.MkdirAll(parentPath, 0777)
   529  				if err != nil {
   530  					return err
   531  				}
   532  			}
   533  		}
   534  
   535  		path := filepath.Join(dest, hdr.Name)
   536  		rel, err := filepath.Rel(dest, path)
   537  		if err != nil {
   538  			return err
   539  		}
   540  		if strings.HasPrefix(rel, "../") {
   541  			return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
   542  		}
   543  
   544  		// If path exits we almost always just want to remove and replace it
   545  		// The only exception is when it is a directory *and* the file from
   546  		// the layer is also a directory. Then we want to merge them (i.e.
   547  		// just apply the metadata from the layer).
   548  		if fi, err := os.Lstat(path); err == nil {
   549  			if fi.IsDir() && hdr.Name == "." {
   550  				continue
   551  			}
   552  			if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
   553  				if err := os.RemoveAll(path); err != nil {
   554  					return err
   555  				}
   556  			}
   557  		}
   558  		trBuf.Reset(tr)
   559  		if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil {
   560  			return err
   561  		}
   562  
   563  		// Directory mtimes must be handled at the end to avoid further
   564  		// file creation in them to modify the directory mtime
   565  		if hdr.Typeflag == tar.TypeDir {
   566  			dirs = append(dirs, hdr)
   567  		}
   568  	}
   569  
   570  	for _, hdr := range dirs {
   571  		path := filepath.Join(dest, hdr.Name)
   572  		ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
   573  		if err := syscall.UtimesNano(path, ts); err != nil {
   574  			return err
   575  		}
   576  	}
   577  	return nil
   578  }
   579  
   580  // Untar reads a stream of bytes from `archive`, parses it as a tar archive,
   581  // and unpacks it into the directory at `dest`.
   582  // The archive may be compressed with one of the following algorithms:
   583  //  identity (uncompressed), gzip, bzip2, xz.
   584  // FIXME: specify behavior when target path exists vs. doesn't exist.
   585  func Untar(archive io.Reader, dest string, options *TarOptions) error {
   586  	if archive == nil {
   587  		return fmt.Errorf("Empty archive")
   588  	}
   589  	dest = filepath.Clean(dest)
   590  	if options == nil {
   591  		options = &TarOptions{}
   592  	}
   593  	if options.ExcludePatterns == nil {
   594  		options.ExcludePatterns = []string{}
   595  	}
   596  	decompressedArchive, err := DecompressStream(archive)
   597  	if err != nil {
   598  		return err
   599  	}
   600  	defer decompressedArchive.Close()
   601  	return Unpack(decompressedArchive, dest, options)
   602  }
   603  
   604  func (archiver *Archiver) TarUntar(src, dst string) error {
   605  	logrus.Debugf("TarUntar(%s %s)", src, dst)
   606  	archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
   607  	if err != nil {
   608  		return err
   609  	}
   610  	defer archive.Close()
   611  	return archiver.Untar(archive, dst, nil)
   612  }
   613  
   614  // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
   615  // If either Tar or Untar fails, TarUntar aborts and returns the error.
   616  func TarUntar(src, dst string) error {
   617  	return defaultArchiver.TarUntar(src, dst)
   618  }
   619  
   620  func (archiver *Archiver) UntarPath(src, dst string) error {
   621  	archive, err := os.Open(src)
   622  	if err != nil {
   623  		return err
   624  	}
   625  	defer archive.Close()
   626  	if err := archiver.Untar(archive, dst, nil); err != nil {
   627  		return err
   628  	}
   629  	return nil
   630  }
   631  
   632  // UntarPath is a convenience function which looks for an archive
   633  // at filesystem path `src`, and unpacks it at `dst`.
   634  func UntarPath(src, dst string) error {
   635  	return defaultArchiver.UntarPath(src, dst)
   636  }
   637  
   638  func (archiver *Archiver) CopyWithTar(src, dst string) error {
   639  	srcSt, err := os.Stat(src)
   640  	if err != nil {
   641  		return err
   642  	}
   643  	if !srcSt.IsDir() {
   644  		return archiver.CopyFileWithTar(src, dst)
   645  	}
   646  	// Create dst, copy src's content into it
   647  	logrus.Debugf("Creating dest directory: %s", dst)
   648  	if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
   649  		return err
   650  	}
   651  	logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
   652  	return archiver.TarUntar(src, dst)
   653  }
   654  
   655  // CopyWithTar creates a tar archive of filesystem path `src`, and
   656  // unpacks it at filesystem path `dst`.
   657  // The archive is streamed directly with fixed buffering and no
   658  // intermediary disk IO.
   659  func CopyWithTar(src, dst string) error {
   660  	return defaultArchiver.CopyWithTar(src, dst)
   661  }
   662  
   663  func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
   664  	logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
   665  	srcSt, err := os.Stat(src)
   666  	if err != nil {
   667  		return err
   668  	}
   669  	if srcSt.IsDir() {
   670  		return fmt.Errorf("Can't copy a directory")
   671  	}
   672  	// Clean up the trailing /
   673  	if dst[len(dst)-1] == '/' {
   674  		dst = path.Join(dst, filepath.Base(src))
   675  	}
   676  	// Create the holding directory if necessary
   677  	if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
   678  		return err
   679  	}
   680  
   681  	r, w := io.Pipe()
   682  	errC := promise.Go(func() error {
   683  		defer w.Close()
   684  
   685  		srcF, err := os.Open(src)
   686  		if err != nil {
   687  			return err
   688  		}
   689  		defer srcF.Close()
   690  
   691  		hdr, err := tar.FileInfoHeader(srcSt, "")
   692  		if err != nil {
   693  			return err
   694  		}
   695  		hdr.Name = filepath.Base(dst)
   696  		hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
   697  
   698  		tw := tar.NewWriter(w)
   699  		defer tw.Close()
   700  		if err := tw.WriteHeader(hdr); err != nil {
   701  			return err
   702  		}
   703  		if _, err := io.Copy(tw, srcF); err != nil {
   704  			return err
   705  		}
   706  		return nil
   707  	})
   708  	defer func() {
   709  		if er := <-errC; err != nil {
   710  			err = er
   711  		}
   712  	}()
   713  	return archiver.Untar(r, filepath.Dir(dst), nil)
   714  }
   715  
   716  // CopyFileWithTar emulates the behavior of the 'cp' command-line
   717  // for a single file. It copies a regular file from path `src` to
   718  // path `dst`, and preserves all its metadata.
   719  //
   720  // If `dst` ends with a trailing slash '/', the final destination path
   721  // will be `dst/base(src)`.
   722  func CopyFileWithTar(src, dst string) (err error) {
   723  	return defaultArchiver.CopyFileWithTar(src, dst)
   724  }
   725  
   726  // CmdStream executes a command, and returns its stdout as a stream.
   727  // If the command fails to run or doesn't complete successfully, an error
   728  // will be returned, including anything written on stderr.
   729  func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
   730  	if input != nil {
   731  		stdin, err := cmd.StdinPipe()
   732  		if err != nil {
   733  			return nil, err
   734  		}
   735  		// Write stdin if any
   736  		go func() {
   737  			io.Copy(stdin, input)
   738  			stdin.Close()
   739  		}()
   740  	}
   741  	stdout, err := cmd.StdoutPipe()
   742  	if err != nil {
   743  		return nil, err
   744  	}
   745  	stderr, err := cmd.StderrPipe()
   746  	if err != nil {
   747  		return nil, err
   748  	}
   749  	pipeR, pipeW := io.Pipe()
   750  	errChan := make(chan []byte)
   751  	// Collect stderr, we will use it in case of an error
   752  	go func() {
   753  		errText, e := ioutil.ReadAll(stderr)
   754  		if e != nil {
   755  			errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
   756  		}
   757  		errChan <- errText
   758  	}()
   759  	// Copy stdout to the returned pipe
   760  	go func() {
   761  		_, err := io.Copy(pipeW, stdout)
   762  		if err != nil {
   763  			pipeW.CloseWithError(err)
   764  		}
   765  		errText := <-errChan
   766  		if err := cmd.Wait(); err != nil {
   767  			pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
   768  		} else {
   769  			pipeW.Close()
   770  		}
   771  	}()
   772  	// Run the command and return the pipe
   773  	if err := cmd.Start(); err != nil {
   774  		return nil, err
   775  	}
   776  	return pipeR, nil
   777  }
   778  
   779  // NewTempArchive reads the content of src into a temporary file, and returns the contents
   780  // of that file as an archive. The archive can only be read once - as soon as reading completes,
   781  // the file will be deleted.
   782  func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
   783  	f, err := ioutil.TempFile(dir, "")
   784  	if err != nil {
   785  		return nil, err
   786  	}
   787  	if _, err := io.Copy(f, src); err != nil {
   788  		return nil, err
   789  	}
   790  	if err = f.Sync(); err != nil {
   791  		return nil, err
   792  	}
   793  	if _, err := f.Seek(0, 0); err != nil {
   794  		return nil, err
   795  	}
   796  	st, err := f.Stat()
   797  	if err != nil {
   798  		return nil, err
   799  	}
   800  	size := st.Size()
   801  	return &TempArchive{File: f, Size: size}, nil
   802  }
   803  
   804  type TempArchive struct {
   805  	*os.File
   806  	Size   int64 // Pre-computed from Stat().Size() as a convenience
   807  	read   int64
   808  	closed bool
   809  }
   810  
   811  // Close closes the underlying file if it's still open, or does a no-op
   812  // to allow callers to try to close the TempArchive multiple times safely.
   813  func (archive *TempArchive) Close() error {
   814  	if archive.closed {
   815  		return nil
   816  	}
   817  
   818  	archive.closed = true
   819  
   820  	return archive.File.Close()
   821  }
   822  
   823  func (archive *TempArchive) Read(data []byte) (int, error) {
   824  	n, err := archive.File.Read(data)
   825  	archive.read += int64(n)
   826  	if err != nil || archive.read == archive.Size {
   827  		archive.Close()
   828  		os.Remove(archive.File.Name())
   829  	}
   830  	return n, err
   831  }