github.com/narayandesai/docker@v1.6.0-rc5/builder/internals.go (about)

     1  package builder
     2  
     3  // internals for handling commands. Covers many areas and a lot of
     4  // non-contiguous functionality. Please read the comments.
     5  
     6  import (
     7  	"crypto/sha256"
     8  	"encoding/hex"
     9  	"fmt"
    10  	"io"
    11  	"io/ioutil"
    12  	"net/http"
    13  	"net/url"
    14  	"os"
    15  	"path"
    16  	"path/filepath"
    17  	"sort"
    18  	"strings"
    19  	"syscall"
    20  	"time"
    21  
    22  	log "github.com/Sirupsen/logrus"
    23  	"github.com/docker/docker/builder/parser"
    24  	"github.com/docker/docker/daemon"
    25  	imagepkg "github.com/docker/docker/image"
    26  	"github.com/docker/docker/pkg/archive"
    27  	"github.com/docker/docker/pkg/chrootarchive"
    28  	"github.com/docker/docker/pkg/common"
    29  	"github.com/docker/docker/pkg/ioutils"
    30  	"github.com/docker/docker/pkg/parsers"
    31  	"github.com/docker/docker/pkg/progressreader"
    32  	"github.com/docker/docker/pkg/symlink"
    33  	"github.com/docker/docker/pkg/system"
    34  	"github.com/docker/docker/pkg/tarsum"
    35  	"github.com/docker/docker/pkg/urlutil"
    36  	"github.com/docker/docker/registry"
    37  	"github.com/docker/docker/runconfig"
    38  	"github.com/docker/docker/utils"
    39  )
    40  
    41  func (b *Builder) readContext(context io.Reader) error {
    42  	tmpdirPath, err := ioutil.TempDir("", "docker-build")
    43  	if err != nil {
    44  		return err
    45  	}
    46  
    47  	decompressedStream, err := archive.DecompressStream(context)
    48  	if err != nil {
    49  		return err
    50  	}
    51  
    52  	if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
    53  		return err
    54  	}
    55  
    56  	if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
    57  		return err
    58  	}
    59  
    60  	b.contextPath = tmpdirPath
    61  	return nil
    62  }
    63  
    64  func (b *Builder) commit(id string, autoCmd []string, comment string) error {
    65  	if b.disableCommit {
    66  		return nil
    67  	}
    68  	if b.image == "" && !b.noBaseImage {
    69  		return fmt.Errorf("Please provide a source image with `from` prior to commit")
    70  	}
    71  	b.Config.Image = b.image
    72  	if id == "" {
    73  		cmd := b.Config.Cmd
    74  		b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
    75  		defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
    76  
    77  		hit, err := b.probeCache()
    78  		if err != nil {
    79  			return err
    80  		}
    81  		if hit {
    82  			return nil
    83  		}
    84  
    85  		container, err := b.create()
    86  		if err != nil {
    87  			return err
    88  		}
    89  		id = container.ID
    90  
    91  		if err := container.Mount(); err != nil {
    92  			return err
    93  		}
    94  		defer container.Unmount()
    95  	}
    96  	container, err := b.Daemon.Get(id)
    97  	if err != nil {
    98  		return err
    99  	}
   100  
   101  	// Note: Actually copy the struct
   102  	autoConfig := *b.Config
   103  	autoConfig.Cmd = autoCmd
   104  
   105  	// Commit the container
   106  	image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
   107  	if err != nil {
   108  		return err
   109  	}
   110  	b.image = image.ID
   111  	return nil
   112  }
   113  
   114  type copyInfo struct {
   115  	origPath   string
   116  	destPath   string
   117  	hash       string
   118  	decompress bool
   119  	tmpDir     string
   120  }
   121  
   122  func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
   123  	if b.context == nil {
   124  		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
   125  	}
   126  
   127  	if len(args) < 2 {
   128  		return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
   129  	}
   130  
   131  	dest := args[len(args)-1] // last one is always the dest
   132  
   133  	copyInfos := []*copyInfo{}
   134  
   135  	b.Config.Image = b.image
   136  
   137  	defer func() {
   138  		for _, ci := range copyInfos {
   139  			if ci.tmpDir != "" {
   140  				os.RemoveAll(ci.tmpDir)
   141  			}
   142  		}
   143  	}()
   144  
   145  	// Loop through each src file and calculate the info we need to
   146  	// do the copy (e.g. hash value if cached).  Don't actually do
   147  	// the copy until we've looked at all src files
   148  	for _, orig := range args[0 : len(args)-1] {
   149  		err := calcCopyInfo(b, cmdName, &copyInfos, orig, dest, allowRemote, allowDecompression)
   150  		if err != nil {
   151  			return err
   152  		}
   153  	}
   154  
   155  	if len(copyInfos) == 0 {
   156  		return fmt.Errorf("No source files were specified")
   157  	}
   158  
   159  	if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") {
   160  		return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
   161  	}
   162  
   163  	// For backwards compat, if there's just one CI then use it as the
   164  	// cache look-up string, otherwise hash 'em all into one
   165  	var srcHash string
   166  	var origPaths string
   167  
   168  	if len(copyInfos) == 1 {
   169  		srcHash = copyInfos[0].hash
   170  		origPaths = copyInfos[0].origPath
   171  	} else {
   172  		var hashs []string
   173  		var origs []string
   174  		for _, ci := range copyInfos {
   175  			hashs = append(hashs, ci.hash)
   176  			origs = append(origs, ci.origPath)
   177  		}
   178  		hasher := sha256.New()
   179  		hasher.Write([]byte(strings.Join(hashs, ",")))
   180  		srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
   181  		origPaths = strings.Join(origs, " ")
   182  	}
   183  
   184  	cmd := b.Config.Cmd
   185  	b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)}
   186  	defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
   187  
   188  	hit, err := b.probeCache()
   189  	if err != nil {
   190  		return err
   191  	}
   192  
   193  	if hit {
   194  		return nil
   195  	}
   196  
   197  	container, _, err := b.Daemon.Create(b.Config, nil, "")
   198  	if err != nil {
   199  		return err
   200  	}
   201  	b.TmpContainers[container.ID] = struct{}{}
   202  
   203  	if err := container.Mount(); err != nil {
   204  		return err
   205  	}
   206  	defer container.Unmount()
   207  
   208  	for _, ci := range copyInfos {
   209  		if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
   210  			return err
   211  		}
   212  	}
   213  
   214  	if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
   215  		return err
   216  	}
   217  	return nil
   218  }
   219  
   220  func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {
   221  
   222  	if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
   223  		origPath = origPath[1:]
   224  	}
   225  	origPath = strings.TrimPrefix(origPath, "./")
   226  
   227  	// Twiddle the destPath when its a relative path - meaning, make it
   228  	// relative to the WORKINGDIR
   229  	if !filepath.IsAbs(destPath) {
   230  		hasSlash := strings.HasSuffix(destPath, "/")
   231  		destPath = filepath.Join("/", b.Config.WorkingDir, destPath)
   232  
   233  		// Make sure we preserve any trailing slash
   234  		if hasSlash {
   235  			destPath += "/"
   236  		}
   237  	}
   238  
   239  	// In the remote/URL case, download it and gen its hashcode
   240  	if urlutil.IsURL(origPath) {
   241  		if !allowRemote {
   242  			return fmt.Errorf("Source can't be a URL for %s", cmdName)
   243  		}
   244  
   245  		ci := copyInfo{}
   246  		ci.origPath = origPath
   247  		ci.hash = origPath // default to this but can change
   248  		ci.destPath = destPath
   249  		ci.decompress = false
   250  		*cInfos = append(*cInfos, &ci)
   251  
   252  		// Initiate the download
   253  		resp, err := utils.Download(ci.origPath)
   254  		if err != nil {
   255  			return err
   256  		}
   257  
   258  		// Create a tmp dir
   259  		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
   260  		if err != nil {
   261  			return err
   262  		}
   263  		ci.tmpDir = tmpDirName
   264  
   265  		// Create a tmp file within our tmp dir
   266  		tmpFileName := path.Join(tmpDirName, "tmp")
   267  		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
   268  		if err != nil {
   269  			return err
   270  		}
   271  
   272  		// Download and dump result to tmp file
   273  		if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
   274  			In:        resp.Body,
   275  			Out:       b.OutOld,
   276  			Formatter: b.StreamFormatter,
   277  			Size:      int(resp.ContentLength),
   278  			NewLines:  true,
   279  			ID:        "",
   280  			Action:    "Downloading",
   281  		})); err != nil {
   282  			tmpFile.Close()
   283  			return err
   284  		}
   285  		fmt.Fprintf(b.OutStream, "\n")
   286  		tmpFile.Close()
   287  
   288  		// Set the mtime to the Last-Modified header value if present
   289  		// Otherwise just remove atime and mtime
   290  		times := make([]syscall.Timespec, 2)
   291  
   292  		lastMod := resp.Header.Get("Last-Modified")
   293  		if lastMod != "" {
   294  			mTime, err := http.ParseTime(lastMod)
   295  			// If we can't parse it then just let it default to 'zero'
   296  			// otherwise use the parsed time value
   297  			if err == nil {
   298  				times[1] = syscall.NsecToTimespec(mTime.UnixNano())
   299  			}
   300  		}
   301  
   302  		if err := system.UtimesNano(tmpFileName, times); err != nil {
   303  			return err
   304  		}
   305  
   306  		ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
   307  
   308  		// If the destination is a directory, figure out the filename.
   309  		if strings.HasSuffix(ci.destPath, "/") {
   310  			u, err := url.Parse(origPath)
   311  			if err != nil {
   312  				return err
   313  			}
   314  			path := u.Path
   315  			if strings.HasSuffix(path, "/") {
   316  				path = path[:len(path)-1]
   317  			}
   318  			parts := strings.Split(path, "/")
   319  			filename := parts[len(parts)-1]
   320  			if filename == "" {
   321  				return fmt.Errorf("cannot determine filename from url: %s", u)
   322  			}
   323  			ci.destPath = ci.destPath + filename
   324  		}
   325  
   326  		// Calc the checksum, even if we're using the cache
   327  		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
   328  		if err != nil {
   329  			return err
   330  		}
   331  		tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
   332  		if err != nil {
   333  			return err
   334  		}
   335  		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
   336  			return err
   337  		}
   338  		ci.hash = tarSum.Sum(nil)
   339  		r.Close()
   340  
   341  		return nil
   342  	}
   343  
   344  	// Deal with wildcards
   345  	if ContainsWildcards(origPath) {
   346  		for _, fileInfo := range b.context.GetSums() {
   347  			if fileInfo.Name() == "" {
   348  				continue
   349  			}
   350  			match, _ := path.Match(origPath, fileInfo.Name())
   351  			if !match {
   352  				continue
   353  			}
   354  
   355  			calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
   356  		}
   357  		return nil
   358  	}
   359  
   360  	// Must be a dir or a file
   361  
   362  	if err := b.checkPathForAddition(origPath); err != nil {
   363  		return err
   364  	}
   365  	fi, _ := os.Stat(path.Join(b.contextPath, origPath))
   366  
   367  	ci := copyInfo{}
   368  	ci.origPath = origPath
   369  	ci.hash = origPath
   370  	ci.destPath = destPath
   371  	ci.decompress = allowDecompression
   372  	*cInfos = append(*cInfos, &ci)
   373  
   374  	// Deal with the single file case
   375  	if !fi.IsDir() {
   376  		// This will match first file in sums of the archive
   377  		fis := b.context.GetSums().GetFile(ci.origPath)
   378  		if fis != nil {
   379  			ci.hash = "file:" + fis.Sum()
   380  		}
   381  		return nil
   382  	}
   383  
   384  	// Must be a dir
   385  	var subfiles []string
   386  	absOrigPath := path.Join(b.contextPath, ci.origPath)
   387  
   388  	// Add a trailing / to make sure we only pick up nested files under
   389  	// the dir and not sibling files of the dir that just happen to
   390  	// start with the same chars
   391  	if !strings.HasSuffix(absOrigPath, "/") {
   392  		absOrigPath += "/"
   393  	}
   394  
   395  	// Need path w/o / too to find matching dir w/o trailing /
   396  	absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
   397  
   398  	for _, fileInfo := range b.context.GetSums() {
   399  		absFile := path.Join(b.contextPath, fileInfo.Name())
   400  		// Any file in the context that starts with the given path will be
   401  		// picked up and its hashcode used.  However, we'll exclude the
   402  		// root dir itself.  We do this for a coupel of reasons:
   403  		// 1 - ADD/COPY will not copy the dir itself, just its children
   404  		//     so there's no reason to include it in the hash calc
   405  		// 2 - the metadata on the dir will change when any child file
   406  		//     changes.  This will lead to a miss in the cache check if that
   407  		//     child file is in the .dockerignore list.
   408  		if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
   409  			subfiles = append(subfiles, fileInfo.Sum())
   410  		}
   411  	}
   412  	sort.Strings(subfiles)
   413  	hasher := sha256.New()
   414  	hasher.Write([]byte(strings.Join(subfiles, ",")))
   415  	ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
   416  
   417  	return nil
   418  }
   419  
   420  func ContainsWildcards(name string) bool {
   421  	for i := 0; i < len(name); i++ {
   422  		ch := name[i]
   423  		if ch == '\\' {
   424  			i++
   425  		} else if ch == '*' || ch == '?' || ch == '[' {
   426  			return true
   427  		}
   428  	}
   429  	return false
   430  }
   431  
   432  func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
   433  	remote, tag := parsers.ParseRepositoryTag(name)
   434  	if tag == "" {
   435  		tag = "latest"
   436  	}
   437  	job := b.Engine.Job("pull", remote, tag)
   438  	pullRegistryAuth := b.AuthConfig
   439  	if len(b.AuthConfigFile.Configs) > 0 {
   440  		// The request came with a full auth config file, we prefer to use that
   441  		repoInfo, err := registry.ResolveRepositoryInfo(job, remote)
   442  		if err != nil {
   443  			return nil, err
   444  		}
   445  		resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(repoInfo.Index)
   446  		pullRegistryAuth = &resolvedAuth
   447  	}
   448  	job.SetenvBool("json", b.StreamFormatter.Json())
   449  	job.SetenvBool("parallel", true)
   450  	job.SetenvJson("authConfig", pullRegistryAuth)
   451  	job.Stdout.Add(ioutils.NopWriteCloser(b.OutOld))
   452  	if err := job.Run(); err != nil {
   453  		return nil, err
   454  	}
   455  	image, err := b.Daemon.Repositories().LookupImage(name)
   456  	if err != nil {
   457  		return nil, err
   458  	}
   459  
   460  	return image, nil
   461  }
   462  
   463  func (b *Builder) processImageFrom(img *imagepkg.Image) error {
   464  	b.image = img.ID
   465  
   466  	if img.Config != nil {
   467  		b.Config = img.Config
   468  	}
   469  
   470  	if len(b.Config.Env) == 0 {
   471  		b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
   472  	}
   473  
   474  	// Process ONBUILD triggers if they exist
   475  	if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
   476  		fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
   477  	}
   478  
   479  	// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
   480  	onBuildTriggers := b.Config.OnBuild
   481  	b.Config.OnBuild = []string{}
   482  
   483  	// parse the ONBUILD triggers by invoking the parser
   484  	for stepN, step := range onBuildTriggers {
   485  		ast, err := parser.Parse(strings.NewReader(step))
   486  		if err != nil {
   487  			return err
   488  		}
   489  
   490  		for i, n := range ast.Children {
   491  			switch strings.ToUpper(n.Value) {
   492  			case "ONBUILD":
   493  				return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
   494  			case "MAINTAINER", "FROM":
   495  				return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
   496  			}
   497  
   498  			fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step)
   499  
   500  			if err := b.dispatch(i, n); err != nil {
   501  				return err
   502  			}
   503  		}
   504  	}
   505  
   506  	return nil
   507  }
   508  
   509  // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
   510  // and if so attempts to look up the current `b.image` and `b.Config` pair
   511  // in the current server `b.Daemon`. If an image is found, probeCache returns
   512  // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
   513  // is any error, it returns `(false, err)`.
   514  func (b *Builder) probeCache() (bool, error) {
   515  	if !b.UtilizeCache || b.cacheBusted {
   516  		return false, nil
   517  	}
   518  
   519  	cache, err := b.Daemon.ImageGetCached(b.image, b.Config)
   520  	if err != nil {
   521  		return false, err
   522  	}
   523  	if cache == nil {
   524  		log.Debugf("[BUILDER] Cache miss")
   525  		b.cacheBusted = true
   526  		return false, nil
   527  	}
   528  
   529  	fmt.Fprintf(b.OutStream, " ---> Using cache\n")
   530  	log.Debugf("[BUILDER] Use cached version")
   531  	b.image = cache.ID
   532  	return true, nil
   533  }
   534  
   535  func (b *Builder) create() (*daemon.Container, error) {
   536  	if b.image == "" && !b.noBaseImage {
   537  		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
   538  	}
   539  	b.Config.Image = b.image
   540  
   541  	hostConfig := &runconfig.HostConfig{
   542  		CpuShares:  b.cpuShares,
   543  		CpusetCpus: b.cpuSetCpus,
   544  		Memory:     b.memory,
   545  		MemorySwap: b.memorySwap,
   546  	}
   547  
   548  	config := *b.Config
   549  
   550  	// Create the container
   551  	c, warnings, err := b.Daemon.Create(b.Config, hostConfig, "")
   552  	if err != nil {
   553  		return nil, err
   554  	}
   555  	for _, warning := range warnings {
   556  		fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
   557  	}
   558  
   559  	b.TmpContainers[c.ID] = struct{}{}
   560  	fmt.Fprintf(b.OutStream, " ---> Running in %s\n", common.TruncateID(c.ID))
   561  
   562  	if len(config.Cmd) > 0 {
   563  		// override the entry point that may have been picked up from the base image
   564  		c.Path = config.Cmd[0]
   565  		c.Args = config.Cmd[1:]
   566  	} else {
   567  		config.Cmd = []string{}
   568  	}
   569  
   570  	return c, nil
   571  }
   572  
   573  func (b *Builder) run(c *daemon.Container) error {
   574  	var errCh chan error
   575  	if b.Verbose {
   576  		errCh = b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, b.OutStream, b.ErrStream)
   577  	}
   578  
   579  	//start the container
   580  	if err := c.Start(); err != nil {
   581  		return err
   582  	}
   583  
   584  	finished := make(chan struct{})
   585  	defer close(finished)
   586  	go func() {
   587  		select {
   588  		case <-b.cancelled:
   589  			log.Debugln("Build cancelled, killing container:", c.ID)
   590  			c.Kill()
   591  		case <-finished:
   592  		}
   593  	}()
   594  
   595  	if b.Verbose {
   596  		// Block on reading output from container, stop on err or chan closed
   597  		if err := <-errCh; err != nil {
   598  			return err
   599  		}
   600  	}
   601  
   602  	// Wait for it to finish
   603  	if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
   604  		err := &utils.JSONError{
   605  			Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
   606  			Code:    ret,
   607  		}
   608  		return err
   609  	}
   610  
   611  	return nil
   612  }
   613  
   614  func (b *Builder) checkPathForAddition(orig string) error {
   615  	origPath := path.Join(b.contextPath, orig)
   616  	origPath, err := filepath.EvalSymlinks(origPath)
   617  	if err != nil {
   618  		if os.IsNotExist(err) {
   619  			return fmt.Errorf("%s: no such file or directory", orig)
   620  		}
   621  		return err
   622  	}
   623  	if !strings.HasPrefix(origPath, b.contextPath) {
   624  		return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
   625  	}
   626  	if _, err := os.Stat(origPath); err != nil {
   627  		if os.IsNotExist(err) {
   628  			return fmt.Errorf("%s: no such file or directory", orig)
   629  		}
   630  		return err
   631  	}
   632  	return nil
   633  }
   634  
   635  func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
   636  	var (
   637  		err        error
   638  		destExists = true
   639  		origPath   = path.Join(b.contextPath, orig)
   640  		destPath   = path.Join(container.RootfsPath(), dest)
   641  	)
   642  
   643  	if destPath != container.RootfsPath() {
   644  		destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
   645  		if err != nil {
   646  			return err
   647  		}
   648  	}
   649  
   650  	// Preserve the trailing '/'
   651  	if strings.HasSuffix(dest, "/") || dest == "." {
   652  		destPath = destPath + "/"
   653  	}
   654  
   655  	destStat, err := os.Stat(destPath)
   656  	if err != nil {
   657  		if !os.IsNotExist(err) {
   658  			return err
   659  		}
   660  		destExists = false
   661  	}
   662  
   663  	fi, err := os.Stat(origPath)
   664  	if err != nil {
   665  		if os.IsNotExist(err) {
   666  			return fmt.Errorf("%s: no such file or directory", orig)
   667  		}
   668  		return err
   669  	}
   670  
   671  	if fi.IsDir() {
   672  		return copyAsDirectory(origPath, destPath, destExists)
   673  	}
   674  
   675  	// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
   676  	if decompress {
   677  		// First try to unpack the source as an archive
   678  		// to support the untar feature we need to clean up the path a little bit
   679  		// because tar is very forgiving.  First we need to strip off the archive's
   680  		// filename from the path but this is only added if it does not end in / .
   681  		tarDest := destPath
   682  		if strings.HasSuffix(tarDest, "/") {
   683  			tarDest = filepath.Dir(destPath)
   684  		}
   685  
   686  		// try to successfully untar the orig
   687  		if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
   688  			return nil
   689  		} else if err != io.EOF {
   690  			log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
   691  		}
   692  	}
   693  
   694  	if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
   695  		return err
   696  	}
   697  	if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
   698  		return err
   699  	}
   700  
   701  	resPath := destPath
   702  	if destExists && destStat.IsDir() {
   703  		resPath = path.Join(destPath, path.Base(origPath))
   704  	}
   705  
   706  	return fixPermissions(origPath, resPath, 0, 0, destExists)
   707  }
   708  
   709  func copyAsDirectory(source, destination string, destExisted bool) error {
   710  	if err := chrootarchive.CopyWithTar(source, destination); err != nil {
   711  		return err
   712  	}
   713  	return fixPermissions(source, destination, 0, 0, destExisted)
   714  }
   715  
   716  func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
   717  	// If the destination didn't already exist, or the destination isn't a
   718  	// directory, then we should Lchown the destination. Otherwise, we shouldn't
   719  	// Lchown the destination.
   720  	destStat, err := os.Stat(destination)
   721  	if err != nil {
   722  		// This should *never* be reached, because the destination must've already
   723  		// been created while untar-ing the context.
   724  		return err
   725  	}
   726  	doChownDestination := !destExisted || !destStat.IsDir()
   727  
   728  	// We Walk on the source rather than on the destination because we don't
   729  	// want to change permissions on things we haven't created or modified.
   730  	return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
   731  		// Do not alter the walk root iff. it existed before, as it doesn't fall under
   732  		// the domain of "things we should chown".
   733  		if !doChownDestination && (source == fullpath) {
   734  			return nil
   735  		}
   736  
   737  		// Path is prefixed by source: substitute with destination instead.
   738  		cleaned, err := filepath.Rel(source, fullpath)
   739  		if err != nil {
   740  			return err
   741  		}
   742  
   743  		fullpath = path.Join(destination, cleaned)
   744  		return os.Lchown(fullpath, uid, gid)
   745  	})
   746  }
   747  
   748  func (b *Builder) clearTmp() {
   749  	for c := range b.TmpContainers {
   750  		tmp, err := b.Daemon.Get(c)
   751  		if err != nil {
   752  			fmt.Fprint(b.OutStream, err.Error())
   753  		}
   754  
   755  		if err := b.Daemon.Rm(tmp); err != nil {
   756  			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", common.TruncateID(c), err)
   757  			return
   758  		}
   759  		b.Daemon.DeleteVolumes(tmp.VolumePaths())
   760  		delete(b.TmpContainers, c)
   761  		fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", common.TruncateID(c))
   762  	}
   763  }