github.com/eikeon/docker@v1.5.0-rc4/builder/internals.go (about)

     1  package builder
     2  
     3  // internals for handling commands. Covers many areas and a lot of
     4  // non-contiguous functionality. Please read the comments.
     5  
     6  import (
     7  	"crypto/sha256"
     8  	"encoding/hex"
     9  	"fmt"
    10  	"io"
    11  	"io/ioutil"
    12  	"net/http"
    13  	"net/url"
    14  	"os"
    15  	"path"
    16  	"path/filepath"
    17  	"sort"
    18  	"strings"
    19  	"syscall"
    20  	"time"
    21  
    22  	log "github.com/Sirupsen/logrus"
    23  	"github.com/docker/docker/builder/parser"
    24  	"github.com/docker/docker/daemon"
    25  	imagepkg "github.com/docker/docker/image"
    26  	"github.com/docker/docker/pkg/archive"
    27  	"github.com/docker/docker/pkg/chrootarchive"
    28  	"github.com/docker/docker/pkg/ioutils"
    29  	"github.com/docker/docker/pkg/parsers"
    30  	"github.com/docker/docker/pkg/symlink"
    31  	"github.com/docker/docker/pkg/system"
    32  	"github.com/docker/docker/pkg/tarsum"
    33  	"github.com/docker/docker/pkg/urlutil"
    34  	"github.com/docker/docker/registry"
    35  	"github.com/docker/docker/utils"
    36  )
    37  
    38  func (b *Builder) readContext(context io.Reader) error {
    39  	tmpdirPath, err := ioutil.TempDir("", "docker-build")
    40  	if err != nil {
    41  		return err
    42  	}
    43  
    44  	decompressedStream, err := archive.DecompressStream(context)
    45  	if err != nil {
    46  		return err
    47  	}
    48  
    49  	if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
    50  		return err
    51  	}
    52  
    53  	if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
    54  		return err
    55  	}
    56  
    57  	b.contextPath = tmpdirPath
    58  	return nil
    59  }
    60  
    61  func (b *Builder) commit(id string, autoCmd []string, comment string) error {
    62  	if b.image == "" && !b.noBaseImage {
    63  		return fmt.Errorf("Please provide a source image with `from` prior to commit")
    64  	}
    65  	b.Config.Image = b.image
    66  	if id == "" {
    67  		cmd := b.Config.Cmd
    68  		b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
    69  		defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
    70  
    71  		hit, err := b.probeCache()
    72  		if err != nil {
    73  			return err
    74  		}
    75  		if hit {
    76  			return nil
    77  		}
    78  
    79  		container, err := b.create()
    80  		if err != nil {
    81  			return err
    82  		}
    83  		id = container.ID
    84  
    85  		if err := container.Mount(); err != nil {
    86  			return err
    87  		}
    88  		defer container.Unmount()
    89  	}
    90  	container := b.Daemon.Get(id)
    91  	if container == nil {
    92  		return fmt.Errorf("An error occured while creating the container")
    93  	}
    94  
    95  	// Note: Actually copy the struct
    96  	autoConfig := *b.Config
    97  	autoConfig.Cmd = autoCmd
    98  
    99  	// Commit the container
   100  	image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
   101  	if err != nil {
   102  		return err
   103  	}
   104  	b.image = image.ID
   105  	return nil
   106  }
   107  
   108  type copyInfo struct {
   109  	origPath   string
   110  	destPath   string
   111  	hash       string
   112  	decompress bool
   113  	tmpDir     string
   114  }
   115  
   116  func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
   117  	if b.context == nil {
   118  		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
   119  	}
   120  
   121  	if len(args) < 2 {
   122  		return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
   123  	}
   124  
   125  	dest := args[len(args)-1] // last one is always the dest
   126  
   127  	copyInfos := []*copyInfo{}
   128  
   129  	b.Config.Image = b.image
   130  
   131  	defer func() {
   132  		for _, ci := range copyInfos {
   133  			if ci.tmpDir != "" {
   134  				os.RemoveAll(ci.tmpDir)
   135  			}
   136  		}
   137  	}()
   138  
   139  	// Loop through each src file and calculate the info we need to
   140  	// do the copy (e.g. hash value if cached).  Don't actually do
   141  	// the copy until we've looked at all src files
   142  	for _, orig := range args[0 : len(args)-1] {
   143  		err := calcCopyInfo(b, cmdName, &copyInfos, orig, dest, allowRemote, allowDecompression)
   144  		if err != nil {
   145  			return err
   146  		}
   147  	}
   148  
   149  	if len(copyInfos) == 0 {
   150  		return fmt.Errorf("No source files were specified")
   151  	}
   152  
   153  	if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") {
   154  		return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
   155  	}
   156  
   157  	// For backwards compat, if there's just one CI then use it as the
   158  	// cache look-up string, otherwise hash 'em all into one
   159  	var srcHash string
   160  	var origPaths string
   161  
   162  	if len(copyInfos) == 1 {
   163  		srcHash = copyInfos[0].hash
   164  		origPaths = copyInfos[0].origPath
   165  	} else {
   166  		var hashs []string
   167  		var origs []string
   168  		for _, ci := range copyInfos {
   169  			hashs = append(hashs, ci.hash)
   170  			origs = append(origs, ci.origPath)
   171  		}
   172  		hasher := sha256.New()
   173  		hasher.Write([]byte(strings.Join(hashs, ",")))
   174  		srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
   175  		origPaths = strings.Join(origs, " ")
   176  	}
   177  
   178  	cmd := b.Config.Cmd
   179  	b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)}
   180  	defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
   181  
   182  	hit, err := b.probeCache()
   183  	if err != nil {
   184  		return err
   185  	}
   186  	// If we do not have at least one hash, never use the cache
   187  	if hit && b.UtilizeCache {
   188  		return nil
   189  	}
   190  
   191  	container, _, err := b.Daemon.Create(b.Config, nil, "")
   192  	if err != nil {
   193  		return err
   194  	}
   195  	b.TmpContainers[container.ID] = struct{}{}
   196  
   197  	if err := container.Mount(); err != nil {
   198  		return err
   199  	}
   200  	defer container.Unmount()
   201  
   202  	for _, ci := range copyInfos {
   203  		if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
   204  			return err
   205  		}
   206  	}
   207  
   208  	if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
   209  		return err
   210  	}
   211  	return nil
   212  }
   213  
   214  func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {
   215  
   216  	if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
   217  		origPath = origPath[1:]
   218  	}
   219  	origPath = strings.TrimPrefix(origPath, "./")
   220  
   221  	// Twiddle the destPath when its a relative path - meaning, make it
   222  	// relative to the WORKINGDIR
   223  	if !filepath.IsAbs(destPath) {
   224  		hasSlash := strings.HasSuffix(destPath, "/")
   225  		destPath = filepath.Join("/", b.Config.WorkingDir, destPath)
   226  
   227  		// Make sure we preserve any trailing slash
   228  		if hasSlash {
   229  			destPath += "/"
   230  		}
   231  	}
   232  
   233  	// In the remote/URL case, download it and gen its hashcode
   234  	if urlutil.IsURL(origPath) {
   235  		if !allowRemote {
   236  			return fmt.Errorf("Source can't be a URL for %s", cmdName)
   237  		}
   238  
   239  		ci := copyInfo{}
   240  		ci.origPath = origPath
   241  		ci.hash = origPath // default to this but can change
   242  		ci.destPath = destPath
   243  		ci.decompress = false
   244  		*cInfos = append(*cInfos, &ci)
   245  
   246  		// Initiate the download
   247  		resp, err := utils.Download(ci.origPath)
   248  		if err != nil {
   249  			return err
   250  		}
   251  
   252  		// Create a tmp dir
   253  		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
   254  		if err != nil {
   255  			return err
   256  		}
   257  		ci.tmpDir = tmpDirName
   258  
   259  		// Create a tmp file within our tmp dir
   260  		tmpFileName := path.Join(tmpDirName, "tmp")
   261  		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
   262  		if err != nil {
   263  			return err
   264  		}
   265  
   266  		// Download and dump result to tmp file
   267  		if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil {
   268  			tmpFile.Close()
   269  			return err
   270  		}
   271  		fmt.Fprintf(b.OutStream, "\n")
   272  		tmpFile.Close()
   273  
   274  		// Set the mtime to the Last-Modified header value if present
   275  		// Otherwise just remove atime and mtime
   276  		times := make([]syscall.Timespec, 2)
   277  
   278  		lastMod := resp.Header.Get("Last-Modified")
   279  		if lastMod != "" {
   280  			mTime, err := http.ParseTime(lastMod)
   281  			// If we can't parse it then just let it default to 'zero'
   282  			// otherwise use the parsed time value
   283  			if err == nil {
   284  				times[1] = syscall.NsecToTimespec(mTime.UnixNano())
   285  			}
   286  		}
   287  
   288  		if err := system.UtimesNano(tmpFileName, times); err != nil {
   289  			return err
   290  		}
   291  
   292  		ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
   293  
   294  		// If the destination is a directory, figure out the filename.
   295  		if strings.HasSuffix(ci.destPath, "/") {
   296  			u, err := url.Parse(origPath)
   297  			if err != nil {
   298  				return err
   299  			}
   300  			path := u.Path
   301  			if strings.HasSuffix(path, "/") {
   302  				path = path[:len(path)-1]
   303  			}
   304  			parts := strings.Split(path, "/")
   305  			filename := parts[len(parts)-1]
   306  			if filename == "" {
   307  				return fmt.Errorf("cannot determine filename from url: %s", u)
   308  			}
   309  			ci.destPath = ci.destPath + filename
   310  		}
   311  
   312  		// Calc the checksum, even if we're using the cache
   313  		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
   314  		if err != nil {
   315  			return err
   316  		}
   317  		tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
   318  		if err != nil {
   319  			return err
   320  		}
   321  		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
   322  			return err
   323  		}
   324  		ci.hash = tarSum.Sum(nil)
   325  		r.Close()
   326  
   327  		return nil
   328  	}
   329  
   330  	// Deal with wildcards
   331  	if ContainsWildcards(origPath) {
   332  		for _, fileInfo := range b.context.GetSums() {
   333  			if fileInfo.Name() == "" {
   334  				continue
   335  			}
   336  			match, _ := path.Match(origPath, fileInfo.Name())
   337  			if !match {
   338  				continue
   339  			}
   340  
   341  			calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
   342  		}
   343  		return nil
   344  	}
   345  
   346  	// Must be a dir or a file
   347  
   348  	if err := b.checkPathForAddition(origPath); err != nil {
   349  		return err
   350  	}
   351  	fi, _ := os.Stat(path.Join(b.contextPath, origPath))
   352  
   353  	ci := copyInfo{}
   354  	ci.origPath = origPath
   355  	ci.hash = origPath
   356  	ci.destPath = destPath
   357  	ci.decompress = allowDecompression
   358  	*cInfos = append(*cInfos, &ci)
   359  
   360  	// Deal with the single file case
   361  	if !fi.IsDir() {
   362  		// This will match first file in sums of the archive
   363  		fis := b.context.GetSums().GetFile(ci.origPath)
   364  		if fis != nil {
   365  			ci.hash = "file:" + fis.Sum()
   366  		}
   367  		return nil
   368  	}
   369  
   370  	// Must be a dir
   371  	var subfiles []string
   372  	absOrigPath := path.Join(b.contextPath, ci.origPath)
   373  
   374  	// Add a trailing / to make sure we only pick up nested files under
   375  	// the dir and not sibling files of the dir that just happen to
   376  	// start with the same chars
   377  	if !strings.HasSuffix(absOrigPath, "/") {
   378  		absOrigPath += "/"
   379  	}
   380  
   381  	// Need path w/o / too to find matching dir w/o trailing /
   382  	absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
   383  
   384  	for _, fileInfo := range b.context.GetSums() {
   385  		absFile := path.Join(b.contextPath, fileInfo.Name())
   386  		// Any file in the context that starts with the given path will be
   387  		// picked up and its hashcode used.  However, we'll exclude the
   388  		// root dir itself.  We do this for a coupel of reasons:
   389  		// 1 - ADD/COPY will not copy the dir itself, just its children
   390  		//     so there's no reason to include it in the hash calc
   391  		// 2 - the metadata on the dir will change when any child file
   392  		//     changes.  This will lead to a miss in the cache check if that
   393  		//     child file is in the .dockerignore list.
   394  		if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
   395  			subfiles = append(subfiles, fileInfo.Sum())
   396  		}
   397  	}
   398  	sort.Strings(subfiles)
   399  	hasher := sha256.New()
   400  	hasher.Write([]byte(strings.Join(subfiles, ",")))
   401  	ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
   402  
   403  	return nil
   404  }
   405  
   406  func ContainsWildcards(name string) bool {
   407  	for i := 0; i < len(name); i++ {
   408  		ch := name[i]
   409  		if ch == '\\' {
   410  			i++
   411  		} else if ch == '*' || ch == '?' || ch == '[' {
   412  			return true
   413  		}
   414  	}
   415  	return false
   416  }
   417  
   418  func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
   419  	remote, tag := parsers.ParseRepositoryTag(name)
   420  	if tag == "" {
   421  		tag = "latest"
   422  	}
   423  	job := b.Engine.Job("pull", remote, tag)
   424  	pullRegistryAuth := b.AuthConfig
   425  	if len(b.AuthConfigFile.Configs) > 0 {
   426  		// The request came with a full auth config file, we prefer to use that
   427  		repoInfo, err := registry.ResolveRepositoryInfo(job, remote)
   428  		if err != nil {
   429  			return nil, err
   430  		}
   431  		resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(repoInfo.Index)
   432  		pullRegistryAuth = &resolvedAuth
   433  	}
   434  	job.SetenvBool("json", b.StreamFormatter.Json())
   435  	job.SetenvBool("parallel", true)
   436  	job.SetenvJson("authConfig", pullRegistryAuth)
   437  	job.Stdout.Add(ioutils.NopWriteCloser(b.OutOld))
   438  	if err := job.Run(); err != nil {
   439  		return nil, err
   440  	}
   441  	image, err := b.Daemon.Repositories().LookupImage(name)
   442  	if err != nil {
   443  		return nil, err
   444  	}
   445  
   446  	return image, nil
   447  }
   448  
   449  func (b *Builder) processImageFrom(img *imagepkg.Image) error {
   450  	b.image = img.ID
   451  
   452  	if img.Config != nil {
   453  		b.Config = img.Config
   454  	}
   455  
   456  	if len(b.Config.Env) == 0 {
   457  		b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
   458  	}
   459  
   460  	// Process ONBUILD triggers if they exist
   461  	if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
   462  		fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
   463  	}
   464  
   465  	// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
   466  	onBuildTriggers := b.Config.OnBuild
   467  	b.Config.OnBuild = []string{}
   468  
   469  	// parse the ONBUILD triggers by invoking the parser
   470  	for stepN, step := range onBuildTriggers {
   471  		ast, err := parser.Parse(strings.NewReader(step))
   472  		if err != nil {
   473  			return err
   474  		}
   475  
   476  		for i, n := range ast.Children {
   477  			switch strings.ToUpper(n.Value) {
   478  			case "ONBUILD":
   479  				return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
   480  			case "MAINTAINER", "FROM":
   481  				return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
   482  			}
   483  
   484  			fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step)
   485  
   486  			if err := b.dispatch(i, n); err != nil {
   487  				return err
   488  			}
   489  		}
   490  	}
   491  
   492  	return nil
   493  }
   494  
   495  // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
   496  // and if so attempts to look up the current `b.image` and `b.Config` pair
   497  // in the current server `b.Daemon`. If an image is found, probeCache returns
   498  // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
   499  // is any error, it returns `(false, err)`.
   500  func (b *Builder) probeCache() (bool, error) {
   501  	if b.UtilizeCache {
   502  		if cache, err := b.Daemon.ImageGetCached(b.image, b.Config); err != nil {
   503  			return false, err
   504  		} else if cache != nil {
   505  			fmt.Fprintf(b.OutStream, " ---> Using cache\n")
   506  			log.Debugf("[BUILDER] Use cached version")
   507  			b.image = cache.ID
   508  			return true, nil
   509  		} else {
   510  			log.Debugf("[BUILDER] Cache miss")
   511  		}
   512  	}
   513  	return false, nil
   514  }
   515  
   516  func (b *Builder) create() (*daemon.Container, error) {
   517  	if b.image == "" && !b.noBaseImage {
   518  		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
   519  	}
   520  	b.Config.Image = b.image
   521  
   522  	config := *b.Config
   523  
   524  	// Create the container
   525  	c, warnings, err := b.Daemon.Create(b.Config, nil, "")
   526  	if err != nil {
   527  		return nil, err
   528  	}
   529  	for _, warning := range warnings {
   530  		fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
   531  	}
   532  
   533  	b.TmpContainers[c.ID] = struct{}{}
   534  	fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
   535  
   536  	if len(config.Cmd) > 0 {
   537  		// override the entry point that may have been picked up from the base image
   538  		c.Path = config.Cmd[0]
   539  		c.Args = config.Cmd[1:]
   540  	} else {
   541  		config.Cmd = []string{}
   542  	}
   543  
   544  	return c, nil
   545  }
   546  
   547  func (b *Builder) run(c *daemon.Container) error {
   548  	//start the container
   549  	if err := c.Start(); err != nil {
   550  		return err
   551  	}
   552  
   553  	if b.Verbose {
   554  		logsJob := b.Engine.Job("logs", c.ID)
   555  		logsJob.Setenv("follow", "1")
   556  		logsJob.Setenv("stdout", "1")
   557  		logsJob.Setenv("stderr", "1")
   558  		logsJob.Stdout.Add(b.OutStream)
   559  		logsJob.Stderr.Set(b.ErrStream)
   560  		if err := logsJob.Run(); err != nil {
   561  			return err
   562  		}
   563  	}
   564  
   565  	// Wait for it to finish
   566  	if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
   567  		err := &utils.JSONError{
   568  			Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
   569  			Code:    ret,
   570  		}
   571  		return err
   572  	}
   573  
   574  	return nil
   575  }
   576  
   577  func (b *Builder) checkPathForAddition(orig string) error {
   578  	origPath := path.Join(b.contextPath, orig)
   579  	origPath, err := filepath.EvalSymlinks(origPath)
   580  	if err != nil {
   581  		if os.IsNotExist(err) {
   582  			return fmt.Errorf("%s: no such file or directory", orig)
   583  		}
   584  		return err
   585  	}
   586  	if !strings.HasPrefix(origPath, b.contextPath) {
   587  		return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
   588  	}
   589  	if _, err := os.Stat(origPath); err != nil {
   590  		if os.IsNotExist(err) {
   591  			return fmt.Errorf("%s: no such file or directory", orig)
   592  		}
   593  		return err
   594  	}
   595  	return nil
   596  }
   597  
   598  func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
   599  	var (
   600  		err        error
   601  		destExists = true
   602  		origPath   = path.Join(b.contextPath, orig)
   603  		destPath   = path.Join(container.RootfsPath(), dest)
   604  	)
   605  
   606  	if destPath != container.RootfsPath() {
   607  		destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
   608  		if err != nil {
   609  			return err
   610  		}
   611  	}
   612  
   613  	// Preserve the trailing '/'
   614  	if strings.HasSuffix(dest, "/") || dest == "." {
   615  		destPath = destPath + "/"
   616  	}
   617  
   618  	destStat, err := os.Stat(destPath)
   619  	if err != nil {
   620  		if !os.IsNotExist(err) {
   621  			return err
   622  		}
   623  		destExists = false
   624  	}
   625  
   626  	fi, err := os.Stat(origPath)
   627  	if err != nil {
   628  		if os.IsNotExist(err) {
   629  			return fmt.Errorf("%s: no such file or directory", orig)
   630  		}
   631  		return err
   632  	}
   633  
   634  	if fi.IsDir() {
   635  		return copyAsDirectory(origPath, destPath, destExists)
   636  	}
   637  
   638  	// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
   639  	if decompress {
   640  		// First try to unpack the source as an archive
   641  		// to support the untar feature we need to clean up the path a little bit
   642  		// because tar is very forgiving.  First we need to strip off the archive's
   643  		// filename from the path but this is only added if it does not end in / .
   644  		tarDest := destPath
   645  		if strings.HasSuffix(tarDest, "/") {
   646  			tarDest = filepath.Dir(destPath)
   647  		}
   648  
   649  		// try to successfully untar the orig
   650  		if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
   651  			return nil
   652  		} else if err != io.EOF {
   653  			log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
   654  		}
   655  	}
   656  
   657  	if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
   658  		return err
   659  	}
   660  	if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
   661  		return err
   662  	}
   663  
   664  	resPath := destPath
   665  	if destExists && destStat.IsDir() {
   666  		resPath = path.Join(destPath, path.Base(origPath))
   667  	}
   668  
   669  	return fixPermissions(origPath, resPath, 0, 0, destExists)
   670  }
   671  
   672  func copyAsDirectory(source, destination string, destExisted bool) error {
   673  	if err := chrootarchive.CopyWithTar(source, destination); err != nil {
   674  		return err
   675  	}
   676  	return fixPermissions(source, destination, 0, 0, destExisted)
   677  }
   678  
   679  func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
   680  	// If the destination didn't already exist, or the destination isn't a
   681  	// directory, then we should Lchown the destination. Otherwise, we shouldn't
   682  	// Lchown the destination.
   683  	destStat, err := os.Stat(destination)
   684  	if err != nil {
   685  		// This should *never* be reached, because the destination must've already
   686  		// been created while untar-ing the context.
   687  		return err
   688  	}
   689  	doChownDestination := !destExisted || !destStat.IsDir()
   690  
   691  	// We Walk on the source rather than on the destination because we don't
   692  	// want to change permissions on things we haven't created or modified.
   693  	return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
   694  		// Do not alter the walk root iff. it existed before, as it doesn't fall under
   695  		// the domain of "things we should chown".
   696  		if !doChownDestination && (source == fullpath) {
   697  			return nil
   698  		}
   699  
   700  		// Path is prefixed by source: substitute with destination instead.
   701  		cleaned, err := filepath.Rel(source, fullpath)
   702  		if err != nil {
   703  			return err
   704  		}
   705  
   706  		fullpath = path.Join(destination, cleaned)
   707  		return os.Lchown(fullpath, uid, gid)
   708  	})
   709  }
   710  
   711  func (b *Builder) clearTmp() {
   712  	for c := range b.TmpContainers {
   713  		tmp := b.Daemon.Get(c)
   714  		if err := b.Daemon.Destroy(tmp); err != nil {
   715  			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
   716  			return
   717  		}
   718  		b.Daemon.DeleteVolumes(tmp.VolumePaths())
   719  		delete(b.TmpContainers, c)
   720  		fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c))
   721  	}
   722  }