github.com/slava-ustovytski/docker@v1.8.2-rc1/builder/internals.go (about) 1 package builder 2 3 // internals for handling commands. Covers many areas and a lot of 4 // non-contiguous functionality. Please read the comments. 5 6 import ( 7 "crypto/sha256" 8 "encoding/hex" 9 "fmt" 10 "io" 11 "io/ioutil" 12 "net/http" 13 "net/url" 14 "os" 15 "path/filepath" 16 "runtime" 17 "sort" 18 "strings" 19 "syscall" 20 "time" 21 22 "github.com/Sirupsen/logrus" 23 "github.com/docker/docker/builder/parser" 24 "github.com/docker/docker/cliconfig" 25 "github.com/docker/docker/daemon" 26 "github.com/docker/docker/graph" 27 "github.com/docker/docker/image" 28 "github.com/docker/docker/pkg/archive" 29 "github.com/docker/docker/pkg/chrootarchive" 30 "github.com/docker/docker/pkg/httputils" 31 "github.com/docker/docker/pkg/ioutils" 32 "github.com/docker/docker/pkg/jsonmessage" 33 "github.com/docker/docker/pkg/parsers" 34 "github.com/docker/docker/pkg/progressreader" 35 "github.com/docker/docker/pkg/stringid" 36 "github.com/docker/docker/pkg/system" 37 "github.com/docker/docker/pkg/tarsum" 38 "github.com/docker/docker/pkg/urlutil" 39 "github.com/docker/docker/registry" 40 "github.com/docker/docker/runconfig" 41 ) 42 43 func (b *builder) readContext(context io.Reader) (err error) { 44 tmpdirPath, err := ioutil.TempDir("", "docker-build") 45 if err != nil { 46 return 47 } 48 49 // Make sure we clean-up upon error. In the happy case the caller 50 // is expected to manage the clean-up 51 defer func() { 52 if err != nil { 53 if e := os.RemoveAll(tmpdirPath); e != nil { 54 logrus.Debugf("[BUILDER] failed to remove temporary context: %s", e) 55 } 56 } 57 }() 58 59 decompressedStream, err := archive.DecompressStream(context) 60 if err != nil { 61 return 62 } 63 64 if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version1); err != nil { 65 return 66 } 67 68 if err = chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil { 69 return 70 } 71 72 b.contextPath = tmpdirPath 73 return 74 } 75 76 func (b *builder) commit(id string, autoCmd *runconfig.Command, comment string) error { 77 if b.disableCommit { 78 return nil 79 } 80 if b.image == "" && !b.noBaseImage { 81 return fmt.Errorf("Please provide a source image with `from` prior to commit") 82 } 83 b.Config.Image = b.image 84 if id == "" { 85 cmd := b.Config.Cmd 86 if runtime.GOOS != "windows" { 87 b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", "#(nop) "+comment) 88 } else { 89 b.Config.Cmd = runconfig.NewCommand("cmd", "/S /C", "REM (nop) "+comment) 90 } 91 defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd) 92 93 hit, err := b.probeCache() 94 if err != nil { 95 return err 96 } 97 if hit { 98 return nil 99 } 100 101 container, err := b.create() 102 if err != nil { 103 return err 104 } 105 id = container.ID 106 107 if err := container.Mount(); err != nil { 108 return err 109 } 110 defer container.Unmount() 111 } 112 container, err := b.Daemon.Get(id) 113 if err != nil { 114 return err 115 } 116 117 // Note: Actually copy the struct 118 autoConfig := *b.Config 119 autoConfig.Cmd = autoCmd 120 121 commitCfg := &daemon.ContainerCommitConfig{ 122 Author: b.maintainer, 123 Pause: true, 124 Config: &autoConfig, 125 } 126 127 // Commit the container 128 image, err := b.Daemon.Commit(container, commitCfg) 129 if err != nil { 130 return err 131 } 132 b.Daemon.Graph().Retain(b.id, image.ID) 133 b.activeImages = append(b.activeImages, image.ID) 134 b.image = image.ID 135 return nil 136 } 137 138 type copyInfo struct { 139 origPath string 140 destPath string 141 hash string 142 decompress bool 143 tmpDir string 144 } 145 146 func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error { 147 if b.context == nil { 148 return fmt.Errorf("No context given. Impossible to use %s", cmdName) 149 } 150 151 if len(args) < 2 { 152 return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) 153 } 154 155 // Work in daemon-specific filepath semantics 156 dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest 157 158 copyInfos := []*copyInfo{} 159 160 b.Config.Image = b.image 161 162 defer func() { 163 for _, ci := range copyInfos { 164 if ci.tmpDir != "" { 165 os.RemoveAll(ci.tmpDir) 166 } 167 } 168 }() 169 170 // Loop through each src file and calculate the info we need to 171 // do the copy (e.g. hash value if cached). Don't actually do 172 // the copy until we've looked at all src files 173 for _, orig := range args[0 : len(args)-1] { 174 if err := calcCopyInfo( 175 b, 176 cmdName, 177 ©Infos, 178 orig, 179 dest, 180 allowRemote, 181 allowDecompression, 182 true, 183 ); err != nil { 184 return err 185 } 186 } 187 188 if len(copyInfos) == 0 { 189 return fmt.Errorf("No source files were specified") 190 } 191 if len(copyInfos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { 192 return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) 193 } 194 195 // For backwards compat, if there's just one CI then use it as the 196 // cache look-up string, otherwise hash 'em all into one 197 var srcHash string 198 var origPaths string 199 200 if len(copyInfos) == 1 { 201 srcHash = copyInfos[0].hash 202 origPaths = copyInfos[0].origPath 203 } else { 204 var hashs []string 205 var origs []string 206 for _, ci := range copyInfos { 207 hashs = append(hashs, ci.hash) 208 origs = append(origs, ci.origPath) 209 } 210 hasher := sha256.New() 211 hasher.Write([]byte(strings.Join(hashs, ","))) 212 srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) 213 origPaths = strings.Join(origs, " ") 214 } 215 216 cmd := b.Config.Cmd 217 if runtime.GOOS != "windows" { 218 b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)) 219 } else { 220 b.Config.Cmd = runconfig.NewCommand("cmd", "/S /C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest)) 221 } 222 defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd) 223 224 hit, err := b.probeCache() 225 if err != nil { 226 return err 227 } 228 229 if hit { 230 return nil 231 } 232 233 container, _, err := b.Daemon.Create(b.Config, nil, "") 234 if err != nil { 235 return err 236 } 237 b.TmpContainers[container.ID] = struct{}{} 238 239 if err := container.Mount(); err != nil { 240 return err 241 } 242 defer container.Unmount() 243 244 if err := container.PrepareStorage(); err != nil { 245 return err 246 } 247 248 for _, ci := range copyInfos { 249 if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil { 250 return err 251 } 252 } 253 254 if err := container.CleanupStorage(); err != nil { 255 return err 256 } 257 258 if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil { 259 return err 260 } 261 return nil 262 } 263 264 func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error { 265 266 // Work in daemon-specific OS filepath semantics. However, we save 267 // the the origPath passed in here, as it might also be a URL which 268 // we need to check for in this function. 269 passedInOrigPath := origPath 270 origPath = filepath.FromSlash(origPath) 271 destPath = filepath.FromSlash(destPath) 272 273 if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { 274 origPath = origPath[1:] 275 } 276 origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) 277 278 // Twiddle the destPath when its a relative path - meaning, make it 279 // relative to the WORKINGDIR 280 if !filepath.IsAbs(destPath) { 281 hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator)) 282 destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.Config.WorkingDir), destPath) 283 284 // Make sure we preserve any trailing slash 285 if hasSlash { 286 destPath += string(os.PathSeparator) 287 } 288 } 289 290 // In the remote/URL case, download it and gen its hashcode 291 if urlutil.IsURL(passedInOrigPath) { 292 293 // As it's a URL, we go back to processing on what was passed in 294 // to this function 295 origPath = passedInOrigPath 296 297 if !allowRemote { 298 return fmt.Errorf("Source can't be a URL for %s", cmdName) 299 } 300 301 ci := copyInfo{} 302 ci.origPath = origPath 303 ci.hash = origPath // default to this but can change 304 ci.destPath = destPath 305 ci.decompress = false 306 *cInfos = append(*cInfos, &ci) 307 308 // Initiate the download 309 resp, err := httputils.Download(ci.origPath) 310 if err != nil { 311 return err 312 } 313 314 // Create a tmp dir 315 tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") 316 if err != nil { 317 return err 318 } 319 ci.tmpDir = tmpDirName 320 321 // Create a tmp file within our tmp dir 322 tmpFileName := filepath.Join(tmpDirName, "tmp") 323 tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) 324 if err != nil { 325 return err 326 } 327 328 // Download and dump result to tmp file 329 if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{ 330 In: resp.Body, 331 Out: b.OutOld, 332 Formatter: b.StreamFormatter, 333 Size: int(resp.ContentLength), 334 NewLines: true, 335 ID: "", 336 Action: "Downloading", 337 })); err != nil { 338 tmpFile.Close() 339 return err 340 } 341 fmt.Fprintf(b.OutStream, "\n") 342 tmpFile.Close() 343 344 // Set the mtime to the Last-Modified header value if present 345 // Otherwise just remove atime and mtime 346 times := make([]syscall.Timespec, 2) 347 348 lastMod := resp.Header.Get("Last-Modified") 349 if lastMod != "" { 350 mTime, err := http.ParseTime(lastMod) 351 // If we can't parse it then just let it default to 'zero' 352 // otherwise use the parsed time value 353 if err == nil { 354 times[1] = syscall.NsecToTimespec(mTime.UnixNano()) 355 } 356 } 357 358 if err := system.UtimesNano(tmpFileName, times); err != nil { 359 return err 360 } 361 362 ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) 363 364 // If the destination is a directory, figure out the filename. 365 if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) { 366 u, err := url.Parse(origPath) 367 if err != nil { 368 return err 369 } 370 path := u.Path 371 if strings.HasSuffix(path, string(os.PathSeparator)) { 372 path = path[:len(path)-1] 373 } 374 parts := strings.Split(path, string(os.PathSeparator)) 375 filename := parts[len(parts)-1] 376 if filename == "" { 377 return fmt.Errorf("cannot determine filename from url: %s", u) 378 } 379 ci.destPath = ci.destPath + filename 380 } 381 382 // Calc the checksum, even if we're using the cache 383 r, err := archive.Tar(tmpFileName, archive.Uncompressed) 384 if err != nil { 385 return err 386 } 387 tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) 388 if err != nil { 389 return err 390 } 391 if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { 392 return err 393 } 394 ci.hash = tarSum.Sum(nil) 395 r.Close() 396 397 return nil 398 } 399 400 // Deal with wildcards 401 if allowWildcards && containsWildcards(origPath) { 402 for _, fileInfo := range b.context.GetSums() { 403 if fileInfo.Name() == "" { 404 continue 405 } 406 match, _ := filepath.Match(origPath, fileInfo.Name()) 407 if !match { 408 continue 409 } 410 411 // Note we set allowWildcards to false in case the name has 412 // a * in it 413 calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false) 414 } 415 return nil 416 } 417 418 // Must be a dir or a file 419 420 if err := b.checkPathForAddition(origPath); err != nil { 421 return err 422 } 423 fi, _ := os.Stat(filepath.Join(b.contextPath, origPath)) 424 425 ci := copyInfo{} 426 ci.origPath = origPath 427 ci.hash = origPath 428 ci.destPath = destPath 429 ci.decompress = allowDecompression 430 *cInfos = append(*cInfos, &ci) 431 432 // Deal with the single file case 433 if !fi.IsDir() { 434 // This will match first file in sums of the archive 435 fis := b.context.GetSums().GetFile(ci.origPath) 436 if fis != nil { 437 ci.hash = "file:" + fis.Sum() 438 } 439 return nil 440 } 441 442 // Must be a dir 443 var subfiles []string 444 absOrigPath := filepath.Join(b.contextPath, ci.origPath) 445 446 // Add a trailing / to make sure we only pick up nested files under 447 // the dir and not sibling files of the dir that just happen to 448 // start with the same chars 449 if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) { 450 absOrigPath += string(os.PathSeparator) 451 } 452 453 // Need path w/o slash too to find matching dir w/o trailing slash 454 absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] 455 456 for _, fileInfo := range b.context.GetSums() { 457 absFile := filepath.Join(b.contextPath, fileInfo.Name()) 458 // Any file in the context that starts with the given path will be 459 // picked up and its hashcode used. However, we'll exclude the 460 // root dir itself. We do this for a coupel of reasons: 461 // 1 - ADD/COPY will not copy the dir itself, just its children 462 // so there's no reason to include it in the hash calc 463 // 2 - the metadata on the dir will change when any child file 464 // changes. This will lead to a miss in the cache check if that 465 // child file is in the .dockerignore list. 466 if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash { 467 subfiles = append(subfiles, fileInfo.Sum()) 468 } 469 } 470 sort.Strings(subfiles) 471 hasher := sha256.New() 472 hasher.Write([]byte(strings.Join(subfiles, ","))) 473 ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) 474 475 return nil 476 } 477 478 func containsWildcards(name string) bool { 479 for i := 0; i < len(name); i++ { 480 ch := name[i] 481 if ch == '\\' { 482 i++ 483 } else if ch == '*' || ch == '?' || ch == '[' { 484 return true 485 } 486 } 487 return false 488 } 489 490 func (b *builder) pullImage(name string) (*image.Image, error) { 491 remote, tag := parsers.ParseRepositoryTag(name) 492 if tag == "" { 493 tag = "latest" 494 } 495 496 pullRegistryAuth := &cliconfig.AuthConfig{} 497 if len(b.AuthConfigs) > 0 { 498 // The request came with a full auth config file, we prefer to use that 499 repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote) 500 if err != nil { 501 return nil, err 502 } 503 504 resolvedConfig := registry.ResolveAuthConfig( 505 &cliconfig.ConfigFile{AuthConfigs: b.AuthConfigs}, 506 repoInfo.Index, 507 ) 508 pullRegistryAuth = &resolvedConfig 509 } 510 511 imagePullConfig := &graph.ImagePullConfig{ 512 AuthConfig: pullRegistryAuth, 513 OutStream: ioutils.NopWriteCloser(b.OutOld), 514 } 515 516 if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil { 517 return nil, err 518 } 519 520 image, err := b.Daemon.Repositories().LookupImage(name) 521 if err != nil { 522 return nil, err 523 } 524 525 return image, nil 526 } 527 528 func (b *builder) processImageFrom(img *image.Image) error { 529 b.image = img.ID 530 531 if img.Config != nil { 532 b.Config = img.Config 533 } 534 535 // The default path will be blank on Windows (set by HCS) 536 if len(b.Config.Env) == 0 && daemon.DefaultPathEnv != "" { 537 b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv) 538 } 539 540 // Process ONBUILD triggers if they exist 541 if nTriggers := len(b.Config.OnBuild); nTriggers != 0 { 542 fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers) 543 } 544 545 // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. 546 onBuildTriggers := b.Config.OnBuild 547 b.Config.OnBuild = []string{} 548 549 // parse the ONBUILD triggers by invoking the parser 550 for stepN, step := range onBuildTriggers { 551 ast, err := parser.Parse(strings.NewReader(step)) 552 if err != nil { 553 return err 554 } 555 556 for i, n := range ast.Children { 557 switch strings.ToUpper(n.Value) { 558 case "ONBUILD": 559 return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") 560 case "MAINTAINER", "FROM": 561 return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) 562 } 563 564 fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step) 565 566 if err := b.dispatch(i, n); err != nil { 567 return err 568 } 569 } 570 } 571 572 return nil 573 } 574 575 // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`) 576 // and if so attempts to look up the current `b.image` and `b.Config` pair 577 // in the current server `b.Daemon`. If an image is found, probeCache returns 578 // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there 579 // is any error, it returns `(false, err)`. 580 func (b *builder) probeCache() (bool, error) { 581 if !b.UtilizeCache || b.cacheBusted { 582 return false, nil 583 } 584 585 cache, err := b.Daemon.ImageGetCached(b.image, b.Config) 586 if err != nil { 587 return false, err 588 } 589 if cache == nil { 590 logrus.Debugf("[BUILDER] Cache miss") 591 b.cacheBusted = true 592 return false, nil 593 } 594 595 fmt.Fprintf(b.OutStream, " ---> Using cache\n") 596 logrus.Debugf("[BUILDER] Use cached version") 597 b.image = cache.ID 598 b.Daemon.Graph().Retain(b.id, cache.ID) 599 b.activeImages = append(b.activeImages, cache.ID) 600 return true, nil 601 } 602 603 func (b *builder) create() (*daemon.Container, error) { 604 if b.image == "" && !b.noBaseImage { 605 return nil, fmt.Errorf("Please provide a source image with `from` prior to run") 606 } 607 b.Config.Image = b.image 608 609 hostConfig := &runconfig.HostConfig{ 610 CpuShares: b.cpuShares, 611 CpuPeriod: b.cpuPeriod, 612 CpuQuota: b.cpuQuota, 613 CpusetCpus: b.cpuSetCpus, 614 CpusetMems: b.cpuSetMems, 615 CgroupParent: b.cgroupParent, 616 Memory: b.memory, 617 MemorySwap: b.memorySwap, 618 Ulimits: b.ulimits, 619 } 620 621 config := *b.Config 622 623 // Create the container 624 c, warnings, err := b.Daemon.Create(b.Config, hostConfig, "") 625 if err != nil { 626 return nil, err 627 } 628 for _, warning := range warnings { 629 fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning) 630 } 631 632 b.TmpContainers[c.ID] = struct{}{} 633 fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID)) 634 635 if config.Cmd.Len() > 0 { 636 // override the entry point that may have been picked up from the base image 637 s := config.Cmd.Slice() 638 c.Path = s[0] 639 c.Args = s[1:] 640 } else { 641 config.Cmd = runconfig.NewCommand() 642 } 643 644 return c, nil 645 } 646 647 func (b *builder) run(c *daemon.Container) error { 648 var errCh chan error 649 if b.Verbose { 650 errCh = c.Attach(nil, b.OutStream, b.ErrStream) 651 } 652 653 //start the container 654 if err := c.Start(); err != nil { 655 return err 656 } 657 658 finished := make(chan struct{}) 659 defer close(finished) 660 go func() { 661 select { 662 case <-b.cancelled: 663 logrus.Debugln("Build cancelled, killing container:", c.ID) 664 c.Kill() 665 case <-finished: 666 } 667 }() 668 669 if b.Verbose { 670 // Block on reading output from container, stop on err or chan closed 671 if err := <-errCh; err != nil { 672 return err 673 } 674 } 675 676 // Wait for it to finish 677 if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 { 678 return &jsonmessage.JSONError{ 679 Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret), 680 Code: ret, 681 } 682 } 683 684 return nil 685 } 686 687 func (b *builder) checkPathForAddition(orig string) error { 688 origPath := filepath.Join(b.contextPath, orig) 689 origPath, err := filepath.EvalSymlinks(origPath) 690 if err != nil { 691 if os.IsNotExist(err) { 692 return fmt.Errorf("%s: no such file or directory", orig) 693 } 694 return err 695 } 696 contextPath, err := filepath.EvalSymlinks(b.contextPath) 697 if err != nil { 698 return err 699 } 700 if !strings.HasPrefix(origPath, contextPath) { 701 return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) 702 } 703 if _, err := os.Stat(origPath); err != nil { 704 if os.IsNotExist(err) { 705 return fmt.Errorf("%s: no such file or directory", orig) 706 } 707 return err 708 } 709 return nil 710 } 711 712 func (b *builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error { 713 var ( 714 err error 715 destExists = true 716 origPath = filepath.Join(b.contextPath, orig) 717 destPath string 718 ) 719 720 // Work in daemon-local OS specific file paths 721 dest = filepath.FromSlash(dest) 722 723 destPath, err = container.GetResourcePath(dest) 724 if err != nil { 725 return err 726 } 727 728 // Preserve the trailing slash 729 if strings.HasSuffix(dest, string(os.PathSeparator)) || dest == "." { 730 destPath = destPath + string(os.PathSeparator) 731 } 732 733 destStat, err := os.Stat(destPath) 734 if err != nil { 735 if !os.IsNotExist(err) { 736 logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) 737 return err 738 } 739 destExists = false 740 } 741 742 fi, err := os.Stat(origPath) 743 if err != nil { 744 if os.IsNotExist(err) { 745 return fmt.Errorf("%s: no such file or directory", orig) 746 } 747 return err 748 } 749 750 if fi.IsDir() { 751 return copyAsDirectory(origPath, destPath, destExists) 752 } 753 754 // If we are adding a remote file (or we've been told not to decompress), do not try to untar it 755 if decompress { 756 // First try to unpack the source as an archive 757 // to support the untar feature we need to clean up the path a little bit 758 // because tar is very forgiving. First we need to strip off the archive's 759 // filename from the path but this is only added if it does not end in slash 760 tarDest := destPath 761 if strings.HasSuffix(tarDest, string(os.PathSeparator)) { 762 tarDest = filepath.Dir(destPath) 763 } 764 765 // try to successfully untar the orig 766 if err := chrootarchive.UntarPath(origPath, tarDest); err == nil { 767 return nil 768 } else if err != io.EOF { 769 logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) 770 } 771 } 772 773 if err := system.MkdirAll(filepath.Dir(destPath), 0755); err != nil { 774 return err 775 } 776 if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil { 777 return err 778 } 779 780 resPath := destPath 781 if destExists && destStat.IsDir() { 782 resPath = filepath.Join(destPath, filepath.Base(origPath)) 783 } 784 785 return fixPermissions(origPath, resPath, 0, 0, destExists) 786 } 787 788 func copyAsDirectory(source, destination string, destExisted bool) error { 789 if err := chrootarchive.CopyWithTar(source, destination); err != nil { 790 return err 791 } 792 return fixPermissions(source, destination, 0, 0, destExisted) 793 } 794 795 func (b *builder) clearTmp() { 796 for c := range b.TmpContainers { 797 rmConfig := &daemon.ContainerRmConfig{ 798 ForceRemove: true, 799 RemoveVolume: true, 800 } 801 if err := b.Daemon.ContainerRm(c, rmConfig); err != nil { 802 fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) 803 return 804 } 805 delete(b.TmpContainers, c) 806 fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c)) 807 } 808 }