github.com/ld86/docker@v1.7.1-rc3/builder/internals.go (about)

     1  package builder
     2  
     3  // internals for handling commands. Covers many areas and a lot of
     4  // non-contiguous functionality. Please read the comments.
     5  
     6  import (
     7  	"crypto/sha256"
     8  	"encoding/hex"
     9  	"fmt"
    10  	"io"
    11  	"io/ioutil"
    12  	"net/http"
    13  	"net/url"
    14  	"os"
    15  	"path"
    16  	"path/filepath"
    17  	"sort"
    18  	"strings"
    19  	"syscall"
    20  	"time"
    21  
    22  	"github.com/Sirupsen/logrus"
    23  	"github.com/docker/docker/builder/parser"
    24  	"github.com/docker/docker/cliconfig"
    25  	"github.com/docker/docker/daemon"
    26  	"github.com/docker/docker/graph"
    27  	imagepkg "github.com/docker/docker/image"
    28  	"github.com/docker/docker/pkg/archive"
    29  	"github.com/docker/docker/pkg/chrootarchive"
    30  	"github.com/docker/docker/pkg/httputils"
    31  	"github.com/docker/docker/pkg/ioutils"
    32  	"github.com/docker/docker/pkg/jsonmessage"
    33  	"github.com/docker/docker/pkg/parsers"
    34  	"github.com/docker/docker/pkg/progressreader"
    35  	"github.com/docker/docker/pkg/stringid"
    36  	"github.com/docker/docker/pkg/system"
    37  	"github.com/docker/docker/pkg/tarsum"
    38  	"github.com/docker/docker/pkg/urlutil"
    39  	"github.com/docker/docker/registry"
    40  	"github.com/docker/docker/runconfig"
    41  )
    42  
    43  func (b *Builder) readContext(context io.Reader) error {
    44  	tmpdirPath, err := ioutil.TempDir("", "docker-build")
    45  	if err != nil {
    46  		return err
    47  	}
    48  
    49  	decompressedStream, err := archive.DecompressStream(context)
    50  	if err != nil {
    51  		return err
    52  	}
    53  
    54  	if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
    55  		return err
    56  	}
    57  
    58  	if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
    59  		return err
    60  	}
    61  
    62  	b.contextPath = tmpdirPath
    63  	return nil
    64  }
    65  
    66  func (b *Builder) commit(id string, autoCmd *runconfig.Command, comment string) error {
    67  	if b.disableCommit {
    68  		return nil
    69  	}
    70  	if b.image == "" && !b.noBaseImage {
    71  		return fmt.Errorf("Please provide a source image with `from` prior to commit")
    72  	}
    73  	b.Config.Image = b.image
    74  	if id == "" {
    75  		cmd := b.Config.Cmd
    76  		b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", "#(nop) "+comment)
    77  		defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
    78  
    79  		hit, err := b.probeCache()
    80  		if err != nil {
    81  			return err
    82  		}
    83  		if hit {
    84  			return nil
    85  		}
    86  
    87  		container, err := b.create()
    88  		if err != nil {
    89  			return err
    90  		}
    91  		id = container.ID
    92  
    93  		if err := container.Mount(); err != nil {
    94  			return err
    95  		}
    96  		defer container.Unmount()
    97  	}
    98  	container, err := b.Daemon.Get(id)
    99  	if err != nil {
   100  		return err
   101  	}
   102  
   103  	// Note: Actually copy the struct
   104  	autoConfig := *b.Config
   105  	autoConfig.Cmd = autoCmd
   106  
   107  	// Commit the container
   108  	image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
   109  	if err != nil {
   110  		return err
   111  	}
   112  	b.image = image.ID
   113  	return nil
   114  }
   115  
   116  type copyInfo struct {
   117  	origPath   string
   118  	destPath   string
   119  	hash       string
   120  	decompress bool
   121  	tmpDir     string
   122  }
   123  
   124  func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
   125  	if b.context == nil {
   126  		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
   127  	}
   128  
   129  	if len(args) < 2 {
   130  		return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
   131  	}
   132  
   133  	dest := args[len(args)-1] // last one is always the dest
   134  
   135  	copyInfos := []*copyInfo{}
   136  
   137  	b.Config.Image = b.image
   138  
   139  	defer func() {
   140  		for _, ci := range copyInfos {
   141  			if ci.tmpDir != "" {
   142  				os.RemoveAll(ci.tmpDir)
   143  			}
   144  		}
   145  	}()
   146  
   147  	// Loop through each src file and calculate the info we need to
   148  	// do the copy (e.g. hash value if cached).  Don't actually do
   149  	// the copy until we've looked at all src files
   150  	for _, orig := range args[0 : len(args)-1] {
   151  		if err := calcCopyInfo(
   152  			b,
   153  			cmdName,
   154  			&copyInfos,
   155  			orig,
   156  			dest,
   157  			allowRemote,
   158  			allowDecompression,
   159  			true,
   160  		); err != nil {
   161  			return err
   162  		}
   163  	}
   164  
   165  	if len(copyInfos) == 0 {
   166  		return fmt.Errorf("No source files were specified")
   167  	}
   168  
   169  	if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") {
   170  		return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
   171  	}
   172  
   173  	// For backwards compat, if there's just one CI then use it as the
   174  	// cache look-up string, otherwise hash 'em all into one
   175  	var srcHash string
   176  	var origPaths string
   177  
   178  	if len(copyInfos) == 1 {
   179  		srcHash = copyInfos[0].hash
   180  		origPaths = copyInfos[0].origPath
   181  	} else {
   182  		var hashs []string
   183  		var origs []string
   184  		for _, ci := range copyInfos {
   185  			hashs = append(hashs, ci.hash)
   186  			origs = append(origs, ci.origPath)
   187  		}
   188  		hasher := sha256.New()
   189  		hasher.Write([]byte(strings.Join(hashs, ",")))
   190  		srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
   191  		origPaths = strings.Join(origs, " ")
   192  	}
   193  
   194  	cmd := b.Config.Cmd
   195  	b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
   196  	defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
   197  
   198  	hit, err := b.probeCache()
   199  	if err != nil {
   200  		return err
   201  	}
   202  
   203  	if hit {
   204  		return nil
   205  	}
   206  
   207  	container, _, err := b.Daemon.Create(b.Config, nil, "")
   208  	if err != nil {
   209  		return err
   210  	}
   211  	b.TmpContainers[container.ID] = struct{}{}
   212  
   213  	if err := container.Mount(); err != nil {
   214  		return err
   215  	}
   216  	defer container.Unmount()
   217  
   218  	for _, ci := range copyInfos {
   219  		if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
   220  			return err
   221  		}
   222  	}
   223  
   224  	if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
   225  		return err
   226  	}
   227  	return nil
   228  }
   229  
   230  func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {
   231  
   232  	if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
   233  		origPath = origPath[1:]
   234  	}
   235  	origPath = strings.TrimPrefix(origPath, "./")
   236  
   237  	// Twiddle the destPath when its a relative path - meaning, make it
   238  	// relative to the WORKINGDIR
   239  	if !filepath.IsAbs(destPath) {
   240  		hasSlash := strings.HasSuffix(destPath, "/")
   241  		destPath = filepath.Join("/", b.Config.WorkingDir, destPath)
   242  
   243  		// Make sure we preserve any trailing slash
   244  		if hasSlash {
   245  			destPath += "/"
   246  		}
   247  	}
   248  
   249  	// In the remote/URL case, download it and gen its hashcode
   250  	if urlutil.IsURL(origPath) {
   251  		if !allowRemote {
   252  			return fmt.Errorf("Source can't be a URL for %s", cmdName)
   253  		}
   254  
   255  		ci := copyInfo{}
   256  		ci.origPath = origPath
   257  		ci.hash = origPath // default to this but can change
   258  		ci.destPath = destPath
   259  		ci.decompress = false
   260  		*cInfos = append(*cInfos, &ci)
   261  
   262  		// Initiate the download
   263  		resp, err := httputils.Download(ci.origPath)
   264  		if err != nil {
   265  			return err
   266  		}
   267  
   268  		// Create a tmp dir
   269  		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
   270  		if err != nil {
   271  			return err
   272  		}
   273  		ci.tmpDir = tmpDirName
   274  
   275  		// Create a tmp file within our tmp dir
   276  		tmpFileName := path.Join(tmpDirName, "tmp")
   277  		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
   278  		if err != nil {
   279  			return err
   280  		}
   281  
   282  		// Download and dump result to tmp file
   283  		if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
   284  			In:        resp.Body,
   285  			Out:       b.OutOld,
   286  			Formatter: b.StreamFormatter,
   287  			Size:      int(resp.ContentLength),
   288  			NewLines:  true,
   289  			ID:        "",
   290  			Action:    "Downloading",
   291  		})); err != nil {
   292  			tmpFile.Close()
   293  			return err
   294  		}
   295  		fmt.Fprintf(b.OutStream, "\n")
   296  		tmpFile.Close()
   297  
   298  		// Set the mtime to the Last-Modified header value if present
   299  		// Otherwise just remove atime and mtime
   300  		times := make([]syscall.Timespec, 2)
   301  
   302  		lastMod := resp.Header.Get("Last-Modified")
   303  		if lastMod != "" {
   304  			mTime, err := http.ParseTime(lastMod)
   305  			// If we can't parse it then just let it default to 'zero'
   306  			// otherwise use the parsed time value
   307  			if err == nil {
   308  				times[1] = syscall.NsecToTimespec(mTime.UnixNano())
   309  			}
   310  		}
   311  
   312  		if err := system.UtimesNano(tmpFileName, times); err != nil {
   313  			return err
   314  		}
   315  
   316  		ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
   317  
   318  		// If the destination is a directory, figure out the filename.
   319  		if strings.HasSuffix(ci.destPath, "/") {
   320  			u, err := url.Parse(origPath)
   321  			if err != nil {
   322  				return err
   323  			}
   324  			path := u.Path
   325  			if strings.HasSuffix(path, "/") {
   326  				path = path[:len(path)-1]
   327  			}
   328  			parts := strings.Split(path, "/")
   329  			filename := parts[len(parts)-1]
   330  			if filename == "" {
   331  				return fmt.Errorf("cannot determine filename from url: %s", u)
   332  			}
   333  			ci.destPath = ci.destPath + filename
   334  		}
   335  
   336  		// Calc the checksum, even if we're using the cache
   337  		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
   338  		if err != nil {
   339  			return err
   340  		}
   341  		tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
   342  		if err != nil {
   343  			return err
   344  		}
   345  		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
   346  			return err
   347  		}
   348  		ci.hash = tarSum.Sum(nil)
   349  		r.Close()
   350  
   351  		return nil
   352  	}
   353  
   354  	// Deal with wildcards
   355  	if allowWildcards && ContainsWildcards(origPath) {
   356  		for _, fileInfo := range b.context.GetSums() {
   357  			if fileInfo.Name() == "" {
   358  				continue
   359  			}
   360  			match, _ := path.Match(origPath, fileInfo.Name())
   361  			if !match {
   362  				continue
   363  			}
   364  
   365  			// Note we set allowWildcards to false in case the name has
   366  			// a * in it
   367  			calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false)
   368  		}
   369  		return nil
   370  	}
   371  
   372  	// Must be a dir or a file
   373  
   374  	if err := b.checkPathForAddition(origPath); err != nil {
   375  		return err
   376  	}
   377  	fi, _ := os.Stat(path.Join(b.contextPath, origPath))
   378  
   379  	ci := copyInfo{}
   380  	ci.origPath = origPath
   381  	ci.hash = origPath
   382  	ci.destPath = destPath
   383  	ci.decompress = allowDecompression
   384  	*cInfos = append(*cInfos, &ci)
   385  
   386  	// Deal with the single file case
   387  	if !fi.IsDir() {
   388  		// This will match first file in sums of the archive
   389  		fis := b.context.GetSums().GetFile(ci.origPath)
   390  		if fis != nil {
   391  			ci.hash = "file:" + fis.Sum()
   392  		}
   393  		return nil
   394  	}
   395  
   396  	// Must be a dir
   397  	var subfiles []string
   398  	absOrigPath := path.Join(b.contextPath, ci.origPath)
   399  
   400  	// Add a trailing / to make sure we only pick up nested files under
   401  	// the dir and not sibling files of the dir that just happen to
   402  	// start with the same chars
   403  	if !strings.HasSuffix(absOrigPath, "/") {
   404  		absOrigPath += "/"
   405  	}
   406  
   407  	// Need path w/o / too to find matching dir w/o trailing /
   408  	absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
   409  
   410  	for _, fileInfo := range b.context.GetSums() {
   411  		absFile := path.Join(b.contextPath, fileInfo.Name())
   412  		// Any file in the context that starts with the given path will be
   413  		// picked up and its hashcode used.  However, we'll exclude the
   414  		// root dir itself.  We do this for a coupel of reasons:
   415  		// 1 - ADD/COPY will not copy the dir itself, just its children
   416  		//     so there's no reason to include it in the hash calc
   417  		// 2 - the metadata on the dir will change when any child file
   418  		//     changes.  This will lead to a miss in the cache check if that
   419  		//     child file is in the .dockerignore list.
   420  		if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
   421  			subfiles = append(subfiles, fileInfo.Sum())
   422  		}
   423  	}
   424  	sort.Strings(subfiles)
   425  	hasher := sha256.New()
   426  	hasher.Write([]byte(strings.Join(subfiles, ",")))
   427  	ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
   428  
   429  	return nil
   430  }
   431  
   432  func ContainsWildcards(name string) bool {
   433  	for i := 0; i < len(name); i++ {
   434  		ch := name[i]
   435  		if ch == '\\' {
   436  			i++
   437  		} else if ch == '*' || ch == '?' || ch == '[' {
   438  			return true
   439  		}
   440  	}
   441  	return false
   442  }
   443  
   444  func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
   445  	remote, tag := parsers.ParseRepositoryTag(name)
   446  	if tag == "" {
   447  		tag = "latest"
   448  	}
   449  
   450  	pullRegistryAuth := &cliconfig.AuthConfig{}
   451  	if len(b.AuthConfigs) > 0 {
   452  		// The request came with a full auth config file, we prefer to use that
   453  		repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote)
   454  		if err != nil {
   455  			return nil, err
   456  		}
   457  
   458  		resolvedConfig := registry.ResolveAuthConfig(
   459  			&cliconfig.ConfigFile{AuthConfigs: b.AuthConfigs},
   460  			repoInfo.Index,
   461  		)
   462  		pullRegistryAuth = &resolvedConfig
   463  	}
   464  
   465  	imagePullConfig := &graph.ImagePullConfig{
   466  		AuthConfig: pullRegistryAuth,
   467  		OutStream:  ioutils.NopWriteCloser(b.OutOld),
   468  	}
   469  
   470  	if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
   471  		return nil, err
   472  	}
   473  
   474  	image, err := b.Daemon.Repositories().LookupImage(name)
   475  	if err != nil {
   476  		return nil, err
   477  	}
   478  
   479  	return image, nil
   480  }
   481  
   482  func (b *Builder) processImageFrom(img *imagepkg.Image) error {
   483  	b.image = img.ID
   484  
   485  	if img.Config != nil {
   486  		b.Config = img.Config
   487  	}
   488  
   489  	if len(b.Config.Env) == 0 {
   490  		b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
   491  	}
   492  
   493  	// Process ONBUILD triggers if they exist
   494  	if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
   495  		fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
   496  	}
   497  
   498  	// Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
   499  	onBuildTriggers := b.Config.OnBuild
   500  	b.Config.OnBuild = []string{}
   501  
   502  	// parse the ONBUILD triggers by invoking the parser
   503  	for stepN, step := range onBuildTriggers {
   504  		ast, err := parser.Parse(strings.NewReader(step))
   505  		if err != nil {
   506  			return err
   507  		}
   508  
   509  		for i, n := range ast.Children {
   510  			switch strings.ToUpper(n.Value) {
   511  			case "ONBUILD":
   512  				return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
   513  			case "MAINTAINER", "FROM":
   514  				return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
   515  			}
   516  
   517  			fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step)
   518  
   519  			if err := b.dispatch(i, n); err != nil {
   520  				return err
   521  			}
   522  		}
   523  	}
   524  
   525  	return nil
   526  }
   527  
   528  // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
   529  // and if so attempts to look up the current `b.image` and `b.Config` pair
   530  // in the current server `b.Daemon`. If an image is found, probeCache returns
   531  // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
   532  // is any error, it returns `(false, err)`.
   533  func (b *Builder) probeCache() (bool, error) {
   534  	if !b.UtilizeCache || b.cacheBusted {
   535  		return false, nil
   536  	}
   537  
   538  	cache, err := b.Daemon.ImageGetCached(b.image, b.Config)
   539  	if err != nil {
   540  		return false, err
   541  	}
   542  	if cache == nil {
   543  		logrus.Debugf("[BUILDER] Cache miss")
   544  		b.cacheBusted = true
   545  		return false, nil
   546  	}
   547  
   548  	fmt.Fprintf(b.OutStream, " ---> Using cache\n")
   549  	logrus.Debugf("[BUILDER] Use cached version")
   550  	b.image = cache.ID
   551  	return true, nil
   552  }
   553  
   554  func (b *Builder) create() (*daemon.Container, error) {
   555  	if b.image == "" && !b.noBaseImage {
   556  		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
   557  	}
   558  	b.Config.Image = b.image
   559  
   560  	hostConfig := &runconfig.HostConfig{
   561  		CpuShares:    b.cpuShares,
   562  		CpuPeriod:    b.cpuPeriod,
   563  		CpuQuota:     b.cpuQuota,
   564  		CpusetCpus:   b.cpuSetCpus,
   565  		CpusetMems:   b.cpuSetMems,
   566  		CgroupParent: b.cgroupParent,
   567  		Memory:       b.memory,
   568  		MemorySwap:   b.memorySwap,
   569  		NetworkMode:  "bridge",
   570  	}
   571  
   572  	config := *b.Config
   573  
   574  	// Create the container
   575  	c, warnings, err := b.Daemon.Create(b.Config, hostConfig, "")
   576  	if err != nil {
   577  		return nil, err
   578  	}
   579  	for _, warning := range warnings {
   580  		fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
   581  	}
   582  
   583  	b.TmpContainers[c.ID] = struct{}{}
   584  	fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID))
   585  
   586  	if config.Cmd.Len() > 0 {
   587  		// override the entry point that may have been picked up from the base image
   588  		s := config.Cmd.Slice()
   589  		c.Path = s[0]
   590  		c.Args = s[1:]
   591  	} else {
   592  		config.Cmd = runconfig.NewCommand()
   593  	}
   594  
   595  	return c, nil
   596  }
   597  
   598  func (b *Builder) run(c *daemon.Container) error {
   599  	var errCh chan error
   600  	if b.Verbose {
   601  		errCh = c.Attach(nil, b.OutStream, b.ErrStream)
   602  	}
   603  
   604  	//start the container
   605  	if err := c.Start(); err != nil {
   606  		return err
   607  	}
   608  
   609  	finished := make(chan struct{})
   610  	defer close(finished)
   611  	go func() {
   612  		select {
   613  		case <-b.cancelled:
   614  			logrus.Debugln("Build cancelled, killing container:", c.ID)
   615  			c.Kill()
   616  		case <-finished:
   617  		}
   618  	}()
   619  
   620  	if b.Verbose {
   621  		// Block on reading output from container, stop on err or chan closed
   622  		if err := <-errCh; err != nil {
   623  			return err
   624  		}
   625  	}
   626  
   627  	// Wait for it to finish
   628  	if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
   629  		return &jsonmessage.JSONError{
   630  			Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret),
   631  			Code:    ret,
   632  		}
   633  	}
   634  
   635  	return nil
   636  }
   637  
   638  func (b *Builder) checkPathForAddition(orig string) error {
   639  	origPath := path.Join(b.contextPath, orig)
   640  	origPath, err := filepath.EvalSymlinks(origPath)
   641  	if err != nil {
   642  		if os.IsNotExist(err) {
   643  			return fmt.Errorf("%s: no such file or directory", orig)
   644  		}
   645  		return err
   646  	}
   647  	if !strings.HasPrefix(origPath, b.contextPath) {
   648  		return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
   649  	}
   650  	if _, err := os.Stat(origPath); err != nil {
   651  		if os.IsNotExist(err) {
   652  			return fmt.Errorf("%s: no such file or directory", orig)
   653  		}
   654  		return err
   655  	}
   656  	return nil
   657  }
   658  
   659  func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
   660  	var (
   661  		err        error
   662  		destExists = true
   663  		origPath   = path.Join(b.contextPath, orig)
   664  		destPath   string
   665  	)
   666  
   667  	destPath, err = container.GetResourcePath(dest)
   668  	if err != nil {
   669  		return err
   670  	}
   671  
   672  	// Preserve the trailing '/'
   673  	if strings.HasSuffix(dest, "/") || dest == "." {
   674  		destPath = destPath + "/"
   675  	}
   676  
   677  	destStat, err := os.Stat(destPath)
   678  	if err != nil {
   679  		if !os.IsNotExist(err) {
   680  			return err
   681  		}
   682  		destExists = false
   683  	}
   684  
   685  	fi, err := os.Stat(origPath)
   686  	if err != nil {
   687  		if os.IsNotExist(err) {
   688  			return fmt.Errorf("%s: no such file or directory", orig)
   689  		}
   690  		return err
   691  	}
   692  
   693  	if fi.IsDir() {
   694  		return copyAsDirectory(origPath, destPath, destExists)
   695  	}
   696  
   697  	// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
   698  	if decompress {
   699  		// First try to unpack the source as an archive
   700  		// to support the untar feature we need to clean up the path a little bit
   701  		// because tar is very forgiving.  First we need to strip off the archive's
   702  		// filename from the path but this is only added if it does not end in / .
   703  		tarDest := destPath
   704  		if strings.HasSuffix(tarDest, "/") {
   705  			tarDest = filepath.Dir(destPath)
   706  		}
   707  
   708  		// try to successfully untar the orig
   709  		if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
   710  			return nil
   711  		} else if err != io.EOF {
   712  			logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
   713  		}
   714  	}
   715  
   716  	if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
   717  		return err
   718  	}
   719  	if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
   720  		return err
   721  	}
   722  
   723  	resPath := destPath
   724  	if destExists && destStat.IsDir() {
   725  		resPath = path.Join(destPath, path.Base(origPath))
   726  	}
   727  
   728  	return fixPermissions(origPath, resPath, 0, 0, destExists)
   729  }
   730  
   731  func copyAsDirectory(source, destination string, destExisted bool) error {
   732  	if err := chrootarchive.CopyWithTar(source, destination); err != nil {
   733  		return err
   734  	}
   735  	return fixPermissions(source, destination, 0, 0, destExisted)
   736  }
   737  
   738  func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
   739  	// If the destination didn't already exist, or the destination isn't a
   740  	// directory, then we should Lchown the destination. Otherwise, we shouldn't
   741  	// Lchown the destination.
   742  	destStat, err := os.Stat(destination)
   743  	if err != nil {
   744  		// This should *never* be reached, because the destination must've already
   745  		// been created while untar-ing the context.
   746  		return err
   747  	}
   748  	doChownDestination := !destExisted || !destStat.IsDir()
   749  
   750  	// We Walk on the source rather than on the destination because we don't
   751  	// want to change permissions on things we haven't created or modified.
   752  	return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
   753  		// Do not alter the walk root iff. it existed before, as it doesn't fall under
   754  		// the domain of "things we should chown".
   755  		if !doChownDestination && (source == fullpath) {
   756  			return nil
   757  		}
   758  
   759  		// Path is prefixed by source: substitute with destination instead.
   760  		cleaned, err := filepath.Rel(source, fullpath)
   761  		if err != nil {
   762  			return err
   763  		}
   764  
   765  		fullpath = path.Join(destination, cleaned)
   766  		return os.Lchown(fullpath, uid, gid)
   767  	})
   768  }
   769  
   770  func (b *Builder) clearTmp() {
   771  	for c := range b.TmpContainers {
   772  		rmConfig := &daemon.ContainerRmConfig{
   773  			ForceRemove:  true,
   774  			RemoveVolume: true,
   775  		}
   776  		if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
   777  			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
   778  			return
   779  		}
   780  		delete(b.TmpContainers, c)
   781  		fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
   782  	}
   783  }