github.com/portworx/docker@v1.12.1/builder/dockerfile/internals.go (about) 1 package dockerfile 2 3 // internals for handling commands. Covers many areas and a lot of 4 // non-contiguous functionality. Please read the comments. 5 6 import ( 7 "crypto/sha256" 8 "encoding/hex" 9 "errors" 10 "fmt" 11 "io" 12 "io/ioutil" 13 "net/http" 14 "net/url" 15 "os" 16 "path/filepath" 17 "sort" 18 "strings" 19 "sync" 20 "time" 21 22 "github.com/Sirupsen/logrus" 23 "github.com/docker/docker/api/types/backend" 24 "github.com/docker/docker/builder" 25 "github.com/docker/docker/builder/dockerfile/parser" 26 "github.com/docker/docker/pkg/archive" 27 "github.com/docker/docker/pkg/httputils" 28 "github.com/docker/docker/pkg/ioutils" 29 "github.com/docker/docker/pkg/jsonmessage" 30 "github.com/docker/docker/pkg/progress" 31 "github.com/docker/docker/pkg/streamformatter" 32 "github.com/docker/docker/pkg/stringid" 33 "github.com/docker/docker/pkg/system" 34 "github.com/docker/docker/pkg/tarsum" 35 "github.com/docker/docker/pkg/urlutil" 36 "github.com/docker/docker/runconfig/opts" 37 "github.com/docker/engine-api/types" 38 "github.com/docker/engine-api/types/container" 39 "github.com/docker/engine-api/types/strslice" 40 ) 41 42 func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error { 43 if b.disableCommit { 44 return nil 45 } 46 if b.image == "" && !b.noBaseImage { 47 return fmt.Errorf("Please provide a source image with `from` prior to commit") 48 } 49 b.runConfig.Image = b.image 50 51 if id == "" { 52 cmd := b.runConfig.Cmd 53 b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) ", comment)) 54 defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) 55 56 hit, err := b.probeCache() 57 if err != nil { 58 return err 59 } else if hit { 60 return nil 61 } 62 id, err = b.create() 63 if err != nil { 64 return err 65 } 66 } 67 68 // Note: Actually copy the struct 69 autoConfig := *b.runConfig 70 autoConfig.Cmd = autoCmd 71 72 commitCfg := &backend.ContainerCommitConfig{ 73 ContainerCommitConfig: types.ContainerCommitConfig{ 74 Author: b.maintainer, 75 Pause: true, 76 Config: &autoConfig, 77 }, 78 } 79 80 // Commit the container 81 imageID, err := b.docker.Commit(id, commitCfg) 82 if err != nil { 83 return err 84 } 85 86 b.image = imageID 87 return nil 88 } 89 90 type copyInfo struct { 91 builder.FileInfo 92 decompress bool 93 } 94 95 func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { 96 if b.context == nil { 97 return fmt.Errorf("No context given. Impossible to use %s", cmdName) 98 } 99 100 if len(args) < 2 { 101 return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) 102 } 103 104 // Work in daemon-specific filepath semantics 105 dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest 106 107 b.runConfig.Image = b.image 108 109 var infos []copyInfo 110 111 // Loop through each src file and calculate the info we need to 112 // do the copy (e.g. hash value if cached). Don't actually do 113 // the copy until we've looked at all src files 114 var err error 115 for _, orig := range args[0 : len(args)-1] { 116 var fi builder.FileInfo 117 decompress := allowLocalDecompression 118 if urlutil.IsURL(orig) { 119 if !allowRemote { 120 return fmt.Errorf("Source can't be a URL for %s", cmdName) 121 } 122 fi, err = b.download(orig) 123 if err != nil { 124 return err 125 } 126 defer os.RemoveAll(filepath.Dir(fi.Path())) 127 decompress = false 128 infos = append(infos, copyInfo{fi, decompress}) 129 continue 130 } 131 // not a URL 132 subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) 133 if err != nil { 134 return err 135 } 136 137 infos = append(infos, subInfos...) 138 } 139 140 if len(infos) == 0 { 141 return fmt.Errorf("No source files were specified") 142 } 143 if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { 144 return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) 145 } 146 147 // For backwards compat, if there's just one info then use it as the 148 // cache look-up string, otherwise hash 'em all into one 149 var srcHash string 150 var origPaths string 151 152 if len(infos) == 1 { 153 fi := infos[0].FileInfo 154 origPaths = fi.Name() 155 if hfi, ok := fi.(builder.Hashed); ok { 156 srcHash = hfi.Hash() 157 } 158 } else { 159 var hashs []string 160 var origs []string 161 for _, info := range infos { 162 fi := info.FileInfo 163 origs = append(origs, fi.Name()) 164 if hfi, ok := fi.(builder.Hashed); ok { 165 hashs = append(hashs, hfi.Hash()) 166 } 167 } 168 hasher := sha256.New() 169 hasher.Write([]byte(strings.Join(hashs, ","))) 170 srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) 171 origPaths = strings.Join(origs, " ") 172 } 173 174 cmd := b.runConfig.Cmd 175 b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), fmt.Sprintf("#(nop) %s %s in %s ", cmdName, srcHash, dest))) 176 defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) 177 178 if hit, err := b.probeCache(); err != nil { 179 return err 180 } else if hit { 181 return nil 182 } 183 184 container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}, true) 185 if err != nil { 186 return err 187 } 188 b.tmpContainers[container.ID] = struct{}{} 189 190 comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) 191 192 // Twiddle the destination when its a relative path - meaning, make it 193 // relative to the WORKINGDIR 194 if dest, err = normaliseDest(cmdName, b.runConfig.WorkingDir, dest); err != nil { 195 return err 196 } 197 198 for _, info := range infos { 199 if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil { 200 return err 201 } 202 } 203 204 return b.commit(container.ID, cmd, comment) 205 } 206 207 func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { 208 // get filename from URL 209 u, err := url.Parse(srcURL) 210 if err != nil { 211 return 212 } 213 path := filepath.FromSlash(u.Path) // Ensure in platform semantics 214 if strings.HasSuffix(path, string(os.PathSeparator)) { 215 path = path[:len(path)-1] 216 } 217 parts := strings.Split(path, string(os.PathSeparator)) 218 filename := parts[len(parts)-1] 219 if filename == "" { 220 err = fmt.Errorf("cannot determine filename from url: %s", u) 221 return 222 } 223 224 // Initiate the download 225 resp, err := httputils.Download(srcURL) 226 if err != nil { 227 return 228 } 229 230 // Prepare file in a tmp dir 231 tmpDir, err := ioutils.TempDir("", "docker-remote") 232 if err != nil { 233 return 234 } 235 defer func() { 236 if err != nil { 237 os.RemoveAll(tmpDir) 238 } 239 }() 240 tmpFileName := filepath.Join(tmpDir, filename) 241 tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) 242 if err != nil { 243 return 244 } 245 246 stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) 247 progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) 248 progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") 249 // Download and dump result to tmp file 250 if _, err = io.Copy(tmpFile, progressReader); err != nil { 251 tmpFile.Close() 252 return 253 } 254 fmt.Fprintln(b.Stdout) 255 // ignoring error because the file was already opened successfully 256 tmpFileSt, err := tmpFile.Stat() 257 if err != nil { 258 return 259 } 260 tmpFile.Close() 261 262 // Set the mtime to the Last-Modified header value if present 263 // Otherwise just remove atime and mtime 264 mTime := time.Time{} 265 266 lastMod := resp.Header.Get("Last-Modified") 267 if lastMod != "" { 268 // If we can't parse it then just let it default to 'zero' 269 // otherwise use the parsed time value 270 if parsedMTime, err := http.ParseTime(lastMod); err == nil { 271 mTime = parsedMTime 272 } 273 } 274 275 if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { 276 return 277 } 278 279 // Calc the checksum, even if we're using the cache 280 r, err := archive.Tar(tmpFileName, archive.Uncompressed) 281 if err != nil { 282 return 283 } 284 tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) 285 if err != nil { 286 return 287 } 288 if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { 289 return 290 } 291 hash := tarSum.Sum(nil) 292 r.Close() 293 return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil 294 } 295 296 func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) { 297 298 // Work in daemon-specific OS filepath semantics 299 origPath = filepath.FromSlash(origPath) 300 301 if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { 302 origPath = origPath[1:] 303 } 304 origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) 305 306 // Deal with wildcards 307 if allowWildcards && containsWildcards(origPath) { 308 var copyInfos []copyInfo 309 if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error { 310 if err != nil { 311 return err 312 } 313 if info.Name() == "" { 314 // Why are we doing this check? 315 return nil 316 } 317 if match, _ := filepath.Match(origPath, path); !match { 318 return nil 319 } 320 321 // Note we set allowWildcards to false in case the name has 322 // a * in it 323 subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) 324 if err != nil { 325 return err 326 } 327 copyInfos = append(copyInfos, subInfos...) 328 return nil 329 }); err != nil { 330 return nil, err 331 } 332 return copyInfos, nil 333 } 334 335 // Must be a dir or a file 336 337 statPath, fi, err := b.context.Stat(origPath) 338 if err != nil { 339 return nil, err 340 } 341 342 copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}} 343 344 hfi, handleHash := fi.(builder.Hashed) 345 if !handleHash { 346 return copyInfos, nil 347 } 348 349 // Deal with the single file case 350 if !fi.IsDir() { 351 hfi.SetHash("file:" + hfi.Hash()) 352 return copyInfos, nil 353 } 354 // Must be a dir 355 var subfiles []string 356 err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { 357 if err != nil { 358 return err 359 } 360 // we already checked handleHash above 361 subfiles = append(subfiles, info.(builder.Hashed).Hash()) 362 return nil 363 }) 364 if err != nil { 365 return nil, err 366 } 367 368 sort.Strings(subfiles) 369 hasher := sha256.New() 370 hasher.Write([]byte(strings.Join(subfiles, ","))) 371 hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) 372 373 return copyInfos, nil 374 } 375 376 func containsWildcards(name string) bool { 377 for i := 0; i < len(name); i++ { 378 ch := name[i] 379 if ch == '\\' { 380 i++ 381 } else if ch == '*' || ch == '?' || ch == '[' { 382 return true 383 } 384 } 385 return false 386 } 387 388 func (b *Builder) processImageFrom(img builder.Image) error { 389 if img != nil { 390 b.image = img.ImageID() 391 392 if img.RunConfig() != nil { 393 b.runConfig = img.RunConfig() 394 } 395 } 396 397 // Check to see if we have a default PATH, note that windows won't 398 // have one as its set by HCS 399 if system.DefaultPathEnv != "" { 400 // Convert the slice of strings that represent the current list 401 // of env vars into a map so we can see if PATH is already set. 402 // If its not set then go ahead and give it our default value 403 configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env) 404 if _, ok := configEnv["PATH"]; !ok { 405 b.runConfig.Env = append(b.runConfig.Env, 406 "PATH="+system.DefaultPathEnv) 407 } 408 } 409 410 if img == nil { 411 // Typically this means they used "FROM scratch" 412 return nil 413 } 414 415 // Process ONBUILD triggers if they exist 416 if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { 417 word := "trigger" 418 if nTriggers > 1 { 419 word = "triggers" 420 } 421 fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) 422 } 423 424 // Copy the ONBUILD triggers, and remove them from the config, since the config will be comitted. 425 onBuildTriggers := b.runConfig.OnBuild 426 b.runConfig.OnBuild = []string{} 427 428 // parse the ONBUILD triggers by invoking the parser 429 for _, step := range onBuildTriggers { 430 ast, err := parser.Parse(strings.NewReader(step), &b.directive) 431 if err != nil { 432 return err 433 } 434 435 for i, n := range ast.Children { 436 switch strings.ToUpper(n.Value) { 437 case "ONBUILD": 438 return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") 439 case "MAINTAINER", "FROM": 440 return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) 441 } 442 443 if err := b.dispatch(i, n); err != nil { 444 return err 445 } 446 } 447 } 448 449 return nil 450 } 451 452 // probeCache checks if `b.docker` implements builder.ImageCache and image-caching 453 // is enabled (`b.UseCache`). 454 // If so attempts to look up the current `b.image` and `b.runConfig` pair with `b.docker`. 455 // If an image is found, probeCache returns `(true, nil)`. 456 // If no image is found, it returns `(false, nil)`. 457 // If there is any error, it returns `(false, err)`. 458 func (b *Builder) probeCache() (bool, error) { 459 c, ok := b.docker.(builder.ImageCache) 460 if !ok || b.options.NoCache || b.cacheBusted { 461 return false, nil 462 } 463 cache, err := c.GetCachedImageOnBuild(b.image, b.runConfig) 464 if err != nil { 465 return false, err 466 } 467 if len(cache) == 0 { 468 logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd) 469 b.cacheBusted = true 470 return false, nil 471 } 472 473 fmt.Fprintf(b.Stdout, " ---> Using cache\n") 474 logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) 475 b.image = string(cache) 476 477 return true, nil 478 } 479 480 func (b *Builder) create() (string, error) { 481 if b.image == "" && !b.noBaseImage { 482 return "", fmt.Errorf("Please provide a source image with `from` prior to run") 483 } 484 b.runConfig.Image = b.image 485 486 resources := container.Resources{ 487 CgroupParent: b.options.CgroupParent, 488 CPUShares: b.options.CPUShares, 489 CPUPeriod: b.options.CPUPeriod, 490 CPUQuota: b.options.CPUQuota, 491 CpusetCpus: b.options.CPUSetCPUs, 492 CpusetMems: b.options.CPUSetMems, 493 Memory: b.options.Memory, 494 MemorySwap: b.options.MemorySwap, 495 Ulimits: b.options.Ulimits, 496 } 497 498 // TODO: why not embed a hostconfig in builder? 499 hostConfig := &container.HostConfig{ 500 Isolation: b.options.Isolation, 501 ShmSize: b.options.ShmSize, 502 Resources: resources, 503 } 504 505 config := *b.runConfig 506 507 // Create the container 508 c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ 509 Config: b.runConfig, 510 HostConfig: hostConfig, 511 }, true) 512 if err != nil { 513 return "", err 514 } 515 for _, warning := range c.Warnings { 516 fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) 517 } 518 519 b.tmpContainers[c.ID] = struct{}{} 520 fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) 521 522 // override the entry point that may have been picked up from the base image 523 if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd); err != nil { 524 return "", err 525 } 526 527 return c.ID, nil 528 } 529 530 var errCancelled = errors.New("build cancelled") 531 532 func (b *Builder) run(cID string) (err error) { 533 errCh := make(chan error) 534 go func() { 535 errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true) 536 }() 537 538 finished := make(chan struct{}) 539 var once sync.Once 540 finish := func() { close(finished) } 541 cancelErrCh := make(chan error, 1) 542 defer once.Do(finish) 543 go func() { 544 select { 545 case <-b.clientCtx.Done(): 546 logrus.Debugln("Build cancelled, killing and removing container:", cID) 547 b.docker.ContainerKill(cID, 0) 548 b.removeContainer(cID) 549 cancelErrCh <- errCancelled 550 case <-finished: 551 cancelErrCh <- nil 552 } 553 }() 554 555 if err := b.docker.ContainerStart(cID, nil, true); err != nil { 556 return err 557 } 558 559 // Block on reading output from container, stop on err or chan closed 560 if err := <-errCh; err != nil { 561 return err 562 } 563 564 if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 { 565 // TODO: change error type, because jsonmessage.JSONError assumes HTTP 566 return &jsonmessage.JSONError{ 567 Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(b.runConfig.Cmd, " "), ret), 568 Code: ret, 569 } 570 } 571 once.Do(finish) 572 return <-cancelErrCh 573 } 574 575 func (b *Builder) removeContainer(c string) error { 576 rmConfig := &types.ContainerRmConfig{ 577 ForceRemove: true, 578 RemoveVolume: true, 579 } 580 if err := b.docker.ContainerRm(c, rmConfig); err != nil { 581 fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) 582 return err 583 } 584 return nil 585 } 586 587 func (b *Builder) clearTmp() { 588 for c := range b.tmpContainers { 589 if err := b.removeContainer(c); err != nil { 590 return 591 } 592 delete(b.tmpContainers, c) 593 fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) 594 } 595 } 596 597 // readDockerfile reads a Dockerfile from the current context. 598 func (b *Builder) readDockerfile() error { 599 // If no -f was specified then look for 'Dockerfile'. If we can't find 600 // that then look for 'dockerfile'. If neither are found then default 601 // back to 'Dockerfile' and use that in the error message. 602 if b.options.Dockerfile == "" { 603 b.options.Dockerfile = builder.DefaultDockerfileName 604 if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) { 605 lowercase := strings.ToLower(b.options.Dockerfile) 606 if _, _, err := b.context.Stat(lowercase); err == nil { 607 b.options.Dockerfile = lowercase 608 } 609 } 610 } 611 612 err := b.parseDockerfile() 613 614 if err != nil { 615 return err 616 } 617 618 // After the Dockerfile has been parsed, we need to check the .dockerignore 619 // file for either "Dockerfile" or ".dockerignore", and if either are 620 // present then erase them from the build context. These files should never 621 // have been sent from the client but we did send them to make sure that 622 // we had the Dockerfile to actually parse, and then we also need the 623 // .dockerignore file to know whether either file should be removed. 624 // Note that this assumes the Dockerfile has been read into memory and 625 // is now safe to be removed. 626 if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok { 627 dockerIgnore.Process([]string{b.options.Dockerfile}) 628 } 629 return nil 630 } 631 632 func (b *Builder) parseDockerfile() error { 633 f, err := b.context.Open(b.options.Dockerfile) 634 if err != nil { 635 if os.IsNotExist(err) { 636 return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) 637 } 638 return err 639 } 640 defer f.Close() 641 if f, ok := f.(*os.File); ok { 642 // ignoring error because Open already succeeded 643 fi, err := f.Stat() 644 if err != nil { 645 return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) 646 } 647 if fi.Size() == 0 { 648 return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) 649 } 650 } 651 b.dockerfile, err = parser.Parse(f, &b.directive) 652 if err != nil { 653 return err 654 } 655 656 return nil 657 } 658 659 // determine if build arg is part of built-in args or user 660 // defined args in Dockerfile at any point in time. 661 func (b *Builder) isBuildArgAllowed(arg string) bool { 662 if _, ok := BuiltinAllowedBuildArgs[arg]; ok { 663 return true 664 } 665 if _, ok := b.allowedBuildArgs[arg]; ok { 666 return true 667 } 668 return false 669 }