github.com/mheon/docker@v0.11.2-0.20150922122814-44f47903a831/builder/internals.go (about)

     1  package builder
     2  
     3  // internals for handling commands. Covers many areas and a lot of
     4  // non-contiguous functionality. Please read the comments.
     5  
     6  import (
     7  	"crypto/sha256"
     8  	"encoding/hex"
     9  	"fmt"
    10  	"io"
    11  	"io/ioutil"
    12  	"net/http"
    13  	"net/url"
    14  	"os"
    15  	"path/filepath"
    16  	"runtime"
    17  	"sort"
    18  	"strings"
    19  	"syscall"
    20  	"time"
    21  
    22  	"github.com/Sirupsen/logrus"
    23  	"github.com/docker/docker/builder/parser"
    24  	"github.com/docker/docker/cliconfig"
    25  	"github.com/docker/docker/daemon"
    26  	"github.com/docker/docker/graph"
    27  	"github.com/docker/docker/image"
    28  	"github.com/docker/docker/pkg/archive"
    29  	"github.com/docker/docker/pkg/chrootarchive"
    30  	"github.com/docker/docker/pkg/httputils"
    31  	"github.com/docker/docker/pkg/ioutils"
    32  	"github.com/docker/docker/pkg/jsonmessage"
    33  	"github.com/docker/docker/pkg/parsers"
    34  	"github.com/docker/docker/pkg/progressreader"
    35  	"github.com/docker/docker/pkg/stringid"
    36  	"github.com/docker/docker/pkg/stringutils"
    37  	"github.com/docker/docker/pkg/symlink"
    38  	"github.com/docker/docker/pkg/system"
    39  	"github.com/docker/docker/pkg/tarsum"
    40  	"github.com/docker/docker/pkg/urlutil"
    41  	"github.com/docker/docker/registry"
    42  	"github.com/docker/docker/runconfig"
    43  )
    44  
    45  func (b *builder) readContext(context io.Reader) (err error) {
    46  	tmpdirPath, err := getTempDir("", "docker-build")
    47  	if err != nil {
    48  		return
    49  	}
    50  
    51  	// Make sure we clean-up upon error.  In the happy case the caller
    52  	// is expected to manage the clean-up
    53  	defer func() {
    54  		if err != nil {
    55  			if e := os.RemoveAll(tmpdirPath); e != nil {
    56  				logrus.Debugf("[BUILDER] failed to remove temporary context: %s", e)
    57  			}
    58  		}
    59  	}()
    60  
    61  	decompressedStream, err := archive.DecompressStream(context)
    62  	if err != nil {
    63  		return
    64  	}
    65  
    66  	if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version1); err != nil {
    67  		return
    68  	}
    69  
    70  	if err = chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
    71  		return
    72  	}
    73  
    74  	b.contextPath = tmpdirPath
    75  	return
    76  }
    77  
    78  func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment string) error {
    79  	if b.disableCommit {
    80  		return nil
    81  	}
    82  	if b.image == "" && !b.noBaseImage {
    83  		return fmt.Errorf("Please provide a source image with `from` prior to commit")
    84  	}
    85  	b.Config.Image = b.image
    86  	if id == "" {
    87  		cmd := b.Config.Cmd
    88  		if runtime.GOOS != "windows" {
    89  			b.Config.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", "#(nop) "+comment)
    90  		} else {
    91  			b.Config.Cmd = stringutils.NewStrSlice("cmd", "/S /C", "REM (nop) "+comment)
    92  		}
    93  		defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
    94  
    95  		hit, err := b.probeCache()
    96  		if err != nil {
    97  			return err
    98  		}
    99  		if hit {
   100  			return nil
   101  		}
   102  
   103  		container, err := b.create()
   104  		if err != nil {
   105  			return err
   106  		}
   107  		id = container.ID
   108  
   109  		if err := container.Mount(); err != nil {
   110  			return err
   111  		}
   112  		defer container.Unmount()
   113  	}
   114  	container, err := b.Daemon.Get(id)
   115  	if err != nil {
   116  		return err
   117  	}
   118  
   119  	// Note: Actually copy the struct
   120  	autoConfig := *b.Config
   121  	autoConfig.Cmd = autoCmd
   122  
   123  	commitCfg := &daemon.ContainerCommitConfig{
   124  		Author: b.maintainer,
   125  		Pause:  true,
   126  		Config: &autoConfig,
   127  	}
   128  
   129  	// Commit the container
   130  	image, err := b.Daemon.Commit(container, commitCfg)
   131  	if err != nil {
   132  		return err
   133  	}
   134  	b.Daemon.Graph().Retain(b.id, image.ID)
   135  	b.activeImages = append(b.activeImages, image.ID)
   136  	b.image = image.ID
   137  	return nil
   138  }
   139  
   140  type copyInfo struct {
   141  	origPath   string
   142  	destPath   string
   143  	hash       string
   144  	decompress bool
   145  	tmpDir     string
   146  }
   147  
   148  func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
   149  	if b.context == nil {
   150  		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
   151  	}
   152  
   153  	if len(args) < 2 {
   154  		return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
   155  	}
   156  
   157  	// Work in daemon-specific filepath semantics
   158  	dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest
   159  
   160  	copyInfos := []*copyInfo{}
   161  
   162  	b.Config.Image = b.image
   163  
   164  	defer func() {
   165  		for _, ci := range copyInfos {
   166  			if ci.tmpDir != "" {
   167  				os.RemoveAll(ci.tmpDir)
   168  			}
   169  		}
   170  	}()
   171  
   172  	// Loop through each src file and calculate the info we need to
   173  	// do the copy (e.g. hash value if cached).  Don't actually do
   174  	// the copy until we've looked at all src files
   175  	for _, orig := range args[0 : len(args)-1] {
   176  		if err := calcCopyInfo(
   177  			b,
   178  			cmdName,
   179  			&copyInfos,
   180  			orig,
   181  			dest,
   182  			allowRemote,
   183  			allowDecompression,
   184  			true,
   185  		); err != nil {
   186  			return err
   187  		}
   188  	}
   189  
   190  	if len(copyInfos) == 0 {
   191  		return fmt.Errorf("No source files were specified")
   192  	}
   193  	if len(copyInfos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
   194  		return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
   195  	}
   196  
   197  	// For backwards compat, if there's just one CI then use it as the
   198  	// cache look-up string, otherwise hash 'em all into one
   199  	var srcHash string
   200  	var origPaths string
   201  
   202  	if len(copyInfos) == 1 {
   203  		srcHash = copyInfos[0].hash
   204  		origPaths = copyInfos[0].origPath
   205  	} else {
   206  		var hashs []string
   207  		var origs []string
   208  		for _, ci := range copyInfos {
   209  			hashs = append(hashs, ci.hash)
   210  			origs = append(origs, ci.origPath)
   211  		}
   212  		hasher := sha256.New()
   213  		hasher.Write([]byte(strings.Join(hashs, ",")))
   214  		srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
   215  		origPaths = strings.Join(origs, " ")
   216  	}
   217  
   218  	cmd := b.Config.Cmd
   219  	if runtime.GOOS != "windows" {
   220  		b.Config.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
   221  	} else {
   222  		b.Config.Cmd = stringutils.NewStrSlice("cmd", "/S /C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest))
   223  	}
   224  	defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
   225  
   226  	hit, err := b.probeCache()
   227  	if err != nil {
   228  		return err
   229  	}
   230  
   231  	if hit {
   232  		return nil
   233  	}
   234  
   235  	container, _, err := b.Daemon.ContainerCreate("", b.Config, nil, true)
   236  	if err != nil {
   237  		return err
   238  	}
   239  	b.TmpContainers[container.ID] = struct{}{}
   240  
   241  	if err := container.Mount(); err != nil {
   242  		return err
   243  	}
   244  	defer container.Unmount()
   245  
   246  	for _, ci := range copyInfos {
   247  		if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
   248  			return err
   249  		}
   250  	}
   251  
   252  	if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
   253  		return err
   254  	}
   255  	return nil
   256  }
   257  
   258  func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {
   259  
   260  	// Work in daemon-specific OS filepath semantics. However, we save
   261  	// the the origPath passed in here, as it might also be a URL which
   262  	// we need to check for in this function.
   263  	passedInOrigPath := origPath
   264  	origPath = filepath.FromSlash(origPath)
   265  	destPath = filepath.FromSlash(destPath)
   266  
   267  	if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
   268  		origPath = origPath[1:]
   269  	}
   270  	origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
   271  
   272  	// Twiddle the destPath when its a relative path - meaning, make it
   273  	// relative to the WORKINGDIR
   274  	if !system.IsAbs(destPath) {
   275  		hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator))
   276  		destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.Config.WorkingDir), destPath)
   277  
   278  		// Make sure we preserve any trailing slash
   279  		if hasSlash {
   280  			destPath += string(os.PathSeparator)
   281  		}
   282  	}
   283  
   284  	// In the remote/URL case, download it and gen its hashcode
   285  	if urlutil.IsURL(passedInOrigPath) {
   286  
   287  		// As it's a URL, we go back to processing on what was passed in
   288  		// to this function
   289  		origPath = passedInOrigPath
   290  
   291  		if !allowRemote {
   292  			return fmt.Errorf("Source can't be a URL for %s", cmdName)
   293  		}
   294  
   295  		ci := copyInfo{}
   296  		ci.origPath = origPath
   297  		ci.hash = origPath // default to this but can change
   298  		ci.destPath = destPath
   299  		ci.decompress = false
   300  		*cInfos = append(*cInfos, &ci)
   301  
   302  		// Initiate the download
   303  		resp, err := httputils.Download(ci.origPath)
   304  		if err != nil {
   305  			return err
   306  		}
   307  
   308  		// Create a tmp dir
   309  		tmpDirName, err := getTempDir(b.contextPath, "docker-remote")
   310  		if err != nil {
   311  			return err
   312  		}
   313  		ci.tmpDir = tmpDirName
   314  
   315  		// Create a tmp file within our tmp dir
   316  		tmpFileName := filepath.Join(tmpDirName, "tmp")
   317  		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
   318  		if err != nil {
   319  			return err
   320  		}
   321  
   322  		// Download and dump result to tmp file
   323  		if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
   324  			In:        resp.Body,
   325  			Out:       b.OutOld,
   326  			Formatter: b.StreamFormatter,
   327  			Size:      resp.ContentLength,
   328  			NewLines:  true,
   329  			ID:        "",
   330  			Action:    "Downloading",
   331  		})); err != nil {
   332  			tmpFile.Close()
   333  			return err
   334  		}
   335  		fmt.Fprintf(b.OutStream, "\n")
   336  		tmpFile.Close()
   337  
   338  		// Set the mtime to the Last-Modified header value if present
   339  		// Otherwise just remove atime and mtime
   340  		times := make([]syscall.Timespec, 2)
   341  
   342  		lastMod := resp.Header.Get("Last-Modified")
   343  		if lastMod != "" {
   344  			mTime, err := http.ParseTime(lastMod)
   345  			// If we can't parse it then just let it default to 'zero'
   346  			// otherwise use the parsed time value
   347  			if err == nil {
   348  				times[1] = syscall.NsecToTimespec(mTime.UnixNano())
   349  			}
   350  		}
   351  
   352  		// Windows does not support UtimesNano.
   353  		if runtime.GOOS != "windows" {
   354  			if err := system.UtimesNano(tmpFileName, times); err != nil {
   355  				return err
   356  			}
   357  		}
   358  
   359  		ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
   360  
   361  		// If the destination is a directory, figure out the filename.
   362  		if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) {
   363  			u, err := url.Parse(origPath)
   364  			if err != nil {
   365  				return err
   366  			}
   367  			path := filepath.FromSlash(u.Path) // Ensure in platform semantics
   368  			if strings.HasSuffix(path, string(os.PathSeparator)) {
   369  				path = path[:len(path)-1]
   370  			}
   371  			parts := strings.Split(path, string(os.PathSeparator))
   372  			filename := parts[len(parts)-1]
   373  			if filename == "" {
   374  				return fmt.Errorf("cannot determine filename from url: %s", u)
   375  			}
   376  			ci.destPath = ci.destPath + filename
   377  		}
   378  
   379  		// Calc the checksum, even if we're using the cache
   380  		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
   381  		if err != nil {
   382  			return err
   383  		}
   384  		tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1)
   385  		if err != nil {
   386  			return err
   387  		}
   388  		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
   389  			return err
   390  		}
   391  		ci.hash = tarSum.Sum(nil)
   392  		r.Close()
   393  
   394  		return nil
   395  	}
   396  
   397  	// Deal with wildcards
   398  	if allowWildcards && containsWildcards(origPath) {
   399  		for _, fileInfo := range b.context.GetSums() {
   400  			if fileInfo.Name() == "" {
   401  				continue
   402  			}
   403  			match, _ := filepath.Match(origPath, fileInfo.Name())
   404  			if !match {
   405  				continue
   406  			}
   407  
   408  			// Note we set allowWildcards to false in case the name has
   409  			// a * in it
   410  			calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false)
   411  		}
   412  		return nil
   413  	}
   414  
   415  	// Must be a dir or a file
   416  
   417  	if err := b.checkPathForAddition(origPath); err != nil {
   418  		return err
   419  	}
   420  	fi, _ := os.Stat(filepath.Join(b.contextPath, origPath))
   421  
   422  	ci := copyInfo{}
   423  	ci.origPath = origPath
   424  	ci.hash = origPath
   425  	ci.destPath = destPath
   426  	ci.decompress = allowDecompression
   427  	*cInfos = append(*cInfos, &ci)
   428  
   429  	// Deal with the single file case
   430  	if !fi.IsDir() {
   431  		// This will match first file in sums of the archive
   432  		fis := b.context.GetSums().GetFile(ci.origPath)
   433  		if fis != nil {
   434  			ci.hash = "file:" + fis.Sum()
   435  		}
   436  		return nil
   437  	}
   438  
   439  	// Must be a dir
   440  	var subfiles []string
   441  	absOrigPath := filepath.Join(b.contextPath, ci.origPath)
   442  
   443  	// Add a trailing / to make sure we only pick up nested files under
   444  	// the dir and not sibling files of the dir that just happen to
   445  	// start with the same chars
   446  	if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) {
   447  		absOrigPath += string(os.PathSeparator)
   448  	}
   449  
   450  	// Need path w/o slash too to find matching dir w/o trailing slash
   451  	absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
   452  
   453  	for _, fileInfo := range b.context.GetSums() {
   454  		absFile := filepath.Join(b.contextPath, fileInfo.Name())
   455  		// Any file in the context that starts with the given path will be
   456  		// picked up and its hashcode used.  However, we'll exclude the
   457  		// root dir itself.  We do this for a coupel of reasons:
   458  		// 1 - ADD/COPY will not copy the dir itself, just its children
   459  		//     so there's no reason to include it in the hash calc
   460  		// 2 - the metadata on the dir will change when any child file
   461  		//     changes.  This will lead to a miss in the cache check if that
   462  		//     child file is in the .dockerignore list.
   463  		if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
   464  			subfiles = append(subfiles, fileInfo.Sum())
   465  		}
   466  	}
   467  	sort.Strings(subfiles)
   468  	hasher := sha256.New()
   469  	hasher.Write([]byte(strings.Join(subfiles, ",")))
   470  	ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
   471  
   472  	return nil
   473  }
   474  
   475  func containsWildcards(name string) bool {
   476  	for i := 0; i < len(name); i++ {
   477  		ch := name[i]
   478  		if ch == '\\' {
   479  			i++
   480  		} else if ch == '*' || ch == '?' || ch == '[' {
   481  			return true
   482  		}
   483  	}
   484  	return false
   485  }
   486  
   487  func (b *builder) pullImage(name string) (*image.Image, error) {
   488  	remote, tag := parsers.ParseRepositoryTag(name)
   489  	if tag == "" {
   490  		tag = "latest"
   491  	}
   492  
   493  	pullRegistryAuth := &cliconfig.AuthConfig{}
   494  	if len(b.AuthConfigs) > 0 {
   495  		// The request came with a full auth config file, we prefer to use that
   496  		repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote)
   497  		if err != nil {
   498  			return nil, err
   499  		}
   500  
   501  		resolvedConfig := registry.ResolveAuthConfig(
   502  			&cliconfig.ConfigFile{AuthConfigs: b.AuthConfigs},
   503  			repoInfo.Index,
   504  		)
   505  		pullRegistryAuth = &resolvedConfig
   506  	}
   507  
   508  	imagePullConfig := &graph.ImagePullConfig{
   509  		AuthConfig: pullRegistryAuth,
   510  		OutStream:  ioutils.NopWriteCloser(b.OutOld),
   511  	}
   512  
   513  	if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
   514  		return nil, err
   515  	}
   516  
   517  	image, err := b.Daemon.Repositories().LookupImage(name)
   518  	if err != nil {
   519  		return nil, err
   520  	}
   521  
   522  	return image, nil
   523  }
   524  
   525  func (b *builder) processImageFrom(img *image.Image) error {
   526  	b.image = img.ID
   527  
   528  	if img.Config != nil {
   529  		b.Config = img.Config
   530  	}
   531  
   532  	// The default path will be blank on Windows (set by HCS)
   533  	if len(b.Config.Env) == 0 && daemon.DefaultPathEnv != "" {
   534  		b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
   535  	}
   536  
   537  	// Process ONBUILD triggers if they exist
   538  	if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
   539  		word := "trigger"
   540  		if nTriggers > 1 {
   541  			word = "triggers"
   542  		}
   543  		fmt.Fprintf(b.ErrStream, "# Executing %d build %s...\n", nTriggers, word)
   544  	}
   545  
   546  	// Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
   547  	onBuildTriggers := b.Config.OnBuild
   548  	b.Config.OnBuild = []string{}
   549  
   550  	// parse the ONBUILD triggers by invoking the parser
   551  	for _, step := range onBuildTriggers {
   552  		ast, err := parser.Parse(strings.NewReader(step))
   553  		if err != nil {
   554  			return err
   555  		}
   556  
   557  		for i, n := range ast.Children {
   558  			switch strings.ToUpper(n.Value) {
   559  			case "ONBUILD":
   560  				return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
   561  			case "MAINTAINER", "FROM":
   562  				return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
   563  			}
   564  
   565  			if err := b.dispatch(i, n); err != nil {
   566  				return err
   567  			}
   568  		}
   569  	}
   570  
   571  	return nil
   572  }
   573  
   574  // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
   575  // and if so attempts to look up the current `b.image` and `b.Config` pair
   576  // in the current server `b.Daemon`. If an image is found, probeCache returns
   577  // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
   578  // is any error, it returns `(false, err)`.
   579  func (b *builder) probeCache() (bool, error) {
   580  	if !b.UtilizeCache || b.cacheBusted {
   581  		return false, nil
   582  	}
   583  
   584  	cache, err := b.Daemon.ImageGetCached(b.image, b.Config)
   585  	if err != nil {
   586  		return false, err
   587  	}
   588  	if cache == nil {
   589  		logrus.Debugf("[BUILDER] Cache miss")
   590  		b.cacheBusted = true
   591  		return false, nil
   592  	}
   593  
   594  	fmt.Fprintf(b.OutStream, " ---> Using cache\n")
   595  	logrus.Debugf("[BUILDER] Use cached version")
   596  	b.image = cache.ID
   597  	b.Daemon.Graph().Retain(b.id, cache.ID)
   598  	b.activeImages = append(b.activeImages, cache.ID)
   599  	return true, nil
   600  }
   601  
   602  func (b *builder) create() (*daemon.Container, error) {
   603  	if b.image == "" && !b.noBaseImage {
   604  		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
   605  	}
   606  	b.Config.Image = b.image
   607  
   608  	hostConfig := &runconfig.HostConfig{
   609  		CPUShares:    b.cpuShares,
   610  		CPUPeriod:    b.cpuPeriod,
   611  		CPUQuota:     b.cpuQuota,
   612  		CpusetCpus:   b.cpuSetCpus,
   613  		CpusetMems:   b.cpuSetMems,
   614  		CgroupParent: b.cgroupParent,
   615  		Memory:       b.memory,
   616  		MemorySwap:   b.memorySwap,
   617  		Ulimits:      b.ulimits,
   618  	}
   619  
   620  	config := *b.Config
   621  
   622  	// Create the container
   623  	c, warnings, err := b.Daemon.ContainerCreate("", b.Config, hostConfig, true)
   624  	if err != nil {
   625  		return nil, err
   626  	}
   627  	for _, warning := range warnings {
   628  		fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
   629  	}
   630  
   631  	b.TmpContainers[c.ID] = struct{}{}
   632  	fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID))
   633  
   634  	if config.Cmd.Len() > 0 {
   635  		// override the entry point that may have been picked up from the base image
   636  		s := config.Cmd.Slice()
   637  		c.Path = s[0]
   638  		c.Args = s[1:]
   639  	} else {
   640  		config.Cmd = stringutils.NewStrSlice()
   641  	}
   642  
   643  	return c, nil
   644  }
   645  
   646  func (b *builder) run(c *daemon.Container) error {
   647  	var errCh chan error
   648  	if b.Verbose {
   649  		errCh = c.Attach(nil, b.OutStream, b.ErrStream)
   650  	}
   651  
   652  	//start the container
   653  	if err := c.Start(); err != nil {
   654  		return err
   655  	}
   656  
   657  	finished := make(chan struct{})
   658  	defer close(finished)
   659  	go func() {
   660  		select {
   661  		case <-b.cancelled:
   662  			logrus.Debugln("Build cancelled, killing container:", c.ID)
   663  			c.Kill()
   664  		case <-finished:
   665  		}
   666  	}()
   667  
   668  	if b.Verbose {
   669  		// Block on reading output from container, stop on err or chan closed
   670  		if err := <-errCh; err != nil {
   671  			return err
   672  		}
   673  	}
   674  
   675  	// Wait for it to finish
   676  	if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
   677  		return &jsonmessage.JSONError{
   678  			Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret),
   679  			Code:    ret,
   680  		}
   681  	}
   682  
   683  	return nil
   684  }
   685  
   686  func (b *builder) checkPathForAddition(orig string) error {
   687  	origPath := filepath.Join(b.contextPath, orig)
   688  	origPath, err := symlink.EvalSymlinks(origPath)
   689  	if err != nil {
   690  		if os.IsNotExist(err) {
   691  			return fmt.Errorf("%s: no such file or directory", orig)
   692  		}
   693  		return err
   694  	}
   695  	contextPath, err := symlink.EvalSymlinks(b.contextPath)
   696  	if err != nil {
   697  		return err
   698  	}
   699  	if !strings.HasPrefix(origPath, contextPath) {
   700  		return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
   701  	}
   702  	if _, err := os.Stat(origPath); err != nil {
   703  		if os.IsNotExist(err) {
   704  			return fmt.Errorf("%s: no such file or directory", orig)
   705  		}
   706  		return err
   707  	}
   708  	return nil
   709  }
   710  
   711  func (b *builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
   712  	var (
   713  		err        error
   714  		destExists = true
   715  		origPath   = filepath.Join(b.contextPath, orig)
   716  		destPath   string
   717  	)
   718  
   719  	// Work in daemon-local OS specific file paths
   720  	dest = filepath.FromSlash(dest)
   721  
   722  	destPath, err = container.GetResourcePath(dest)
   723  	if err != nil {
   724  		return err
   725  	}
   726  
   727  	// Preserve the trailing slash
   728  	if strings.HasSuffix(dest, string(os.PathSeparator)) || dest == "." {
   729  		destPath = destPath + string(os.PathSeparator)
   730  	}
   731  
   732  	destStat, err := os.Stat(destPath)
   733  	if err != nil {
   734  		if !os.IsNotExist(err) {
   735  			logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err)
   736  			return err
   737  		}
   738  		destExists = false
   739  	}
   740  
   741  	fi, err := os.Stat(origPath)
   742  	if err != nil {
   743  		if os.IsNotExist(err) {
   744  			return fmt.Errorf("%s: no such file or directory", orig)
   745  		}
   746  		return err
   747  	}
   748  
   749  	if fi.IsDir() {
   750  		return copyAsDirectory(origPath, destPath, destExists)
   751  	}
   752  
   753  	// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
   754  	if decompress {
   755  		// First try to unpack the source as an archive
   756  		// to support the untar feature we need to clean up the path a little bit
   757  		// because tar is very forgiving.  First we need to strip off the archive's
   758  		// filename from the path but this is only added if it does not end in slash
   759  		tarDest := destPath
   760  		if strings.HasSuffix(tarDest, string(os.PathSeparator)) {
   761  			tarDest = filepath.Dir(destPath)
   762  		}
   763  
   764  		// try to successfully untar the orig
   765  		if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
   766  			return nil
   767  		} else if err != io.EOF {
   768  			logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
   769  		}
   770  	}
   771  
   772  	if err := system.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
   773  		return err
   774  	}
   775  	if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
   776  		return err
   777  	}
   778  
   779  	resPath := destPath
   780  	if destExists && destStat.IsDir() {
   781  		resPath = filepath.Join(destPath, filepath.Base(origPath))
   782  	}
   783  
   784  	return fixPermissions(origPath, resPath, 0, 0, destExists)
   785  }
   786  
   787  func copyAsDirectory(source, destination string, destExisted bool) error {
   788  	if err := chrootarchive.CopyWithTar(source, destination); err != nil {
   789  		return err
   790  	}
   791  	return fixPermissions(source, destination, 0, 0, destExisted)
   792  }
   793  
   794  func (b *builder) clearTmp() {
   795  	for c := range b.TmpContainers {
   796  		rmConfig := &daemon.ContainerRmConfig{
   797  			ForceRemove:  true,
   798  			RemoveVolume: true,
   799  		}
   800  		if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
   801  			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
   802  			return
   803  		}
   804  		delete(b.TmpContainers, c)
   805  		fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
   806  	}
   807  }