github.com/secure-build/gitlab-runner@v12.5.0+incompatible/executors/docker/executor_docker.go (about) 1 package docker 2 3 import ( 4 "bytes" 5 "context" 6 "errors" 7 "fmt" 8 "io" 9 "os" 10 "path" 11 "path/filepath" 12 "regexp" 13 "strconv" 14 "strings" 15 "sync" 16 "time" 17 18 "github.com/docker/distribution/reference" 19 "github.com/docker/docker/api/types" 20 "github.com/docker/docker/api/types/container" 21 "github.com/docker/docker/pkg/stdcopy" 22 "github.com/kardianos/osext" 23 "github.com/mattn/go-zglob" 24 "github.com/sirupsen/logrus" 25 26 "gitlab.com/gitlab-org/gitlab-runner/common" 27 "gitlab.com/gitlab-org/gitlab-runner/executors" 28 "gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes" 29 "gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser" 30 "gitlab.com/gitlab-org/gitlab-runner/helpers" 31 docker_helpers "gitlab.com/gitlab-org/gitlab-runner/helpers/docker" 32 "gitlab.com/gitlab-org/gitlab-runner/helpers/docker/helperimage" 33 "gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags" 34 ) 35 36 const ( 37 DockerExecutorStagePrepare common.ExecutorStage = "docker_prepare" 38 DockerExecutorStageRun common.ExecutorStage = "docker_run" 39 DockerExecutorStageCleanup common.ExecutorStage = "docker_cleanup" 40 41 DockerExecutorStageCreatingBuildVolumes common.ExecutorStage = "docker_creating_build_volumes" 42 DockerExecutorStageCreatingServices common.ExecutorStage = "docker_creating_services" 43 DockerExecutorStageCreatingUserVolumes common.ExecutorStage = "docker_creating_user_volumes" 44 DockerExecutorStagePullingImage common.ExecutorStage = "docker_pulling_image" 45 ) 46 47 const ( 48 AuthConfigSourceNameUserVariable = "$DOCKER_AUTH_CONFIG" 49 AuthConfigSourceNameJobPayload = "job payload (GitLab Registry)" 50 ) 51 52 var DockerPrebuiltImagesPaths []string 53 54 var neverRestartPolicy = container.RestartPolicy{Name: "no"} 55 56 var errVolumesManagerUndefined = errors.New("volumesManager is undefined") 57 58 type executor struct { 59 executors.AbstractExecutor 60 client docker_helpers.Client 61 volumeParser parser.Parser 62 info types.Info 63 64 temporary []string // IDs of containers that should be removed 65 66 builds []string // IDs of successfully created build containers 67 services []*types.Container 68 69 links []string 70 71 devices []container.DeviceMapping 72 73 helperImageInfo helperimage.Info 74 75 usedImages map[string]string 76 usedImagesLock sync.RWMutex 77 78 volumesManager volumes.Manager 79 } 80 81 func init() { 82 runnerFolder, err := osext.ExecutableFolder() 83 if err != nil { 84 logrus.Errorln("Docker executor: unable to detect gitlab-runner folder, prebuilt image helpers will be loaded from DockerHub.", err) 85 } 86 87 DockerPrebuiltImagesPaths = []string{ 88 filepath.Join(runnerFolder, "helper-images"), 89 filepath.Join(runnerFolder, "out/helper-images"), 90 } 91 } 92 93 func (e *executor) getServiceVariables() []string { 94 return e.Build.GetAllVariables().PublicOrInternal().StringList() 95 } 96 97 func (e *executor) getUserAuthConfiguration(indexName string) (string, *types.AuthConfig) { 98 if e.Build == nil { 99 return "", nil 100 } 101 102 buf := bytes.NewBufferString(e.Build.GetDockerAuthConfig()) 103 authConfigs, _ := docker_helpers.ReadAuthConfigsFromReader(buf) 104 105 if authConfigs == nil { 106 return "", nil 107 } 108 109 return AuthConfigSourceNameUserVariable, docker_helpers.ResolveDockerAuthConfig(indexName, authConfigs) 110 } 111 112 func (e *executor) getBuildAuthConfiguration(indexName string) (string, *types.AuthConfig) { 113 if e.Build == nil { 114 return "", nil 115 } 116 117 authConfigs := make(map[string]types.AuthConfig) 118 119 for _, credentials := range e.Build.Credentials { 120 if credentials.Type != "registry" { 121 continue 122 } 123 124 authConfigs[credentials.URL] = types.AuthConfig{ 125 Username: credentials.Username, 126 Password: credentials.Password, 127 ServerAddress: credentials.URL, 128 } 129 } 130 131 return AuthConfigSourceNameJobPayload, docker_helpers.ResolveDockerAuthConfig(indexName, authConfigs) 132 } 133 134 func (e *executor) getHomeDirAuthConfiguration(indexName string) (string, *types.AuthConfig) { 135 sourceFile, authConfigs, _ := docker_helpers.ReadDockerAuthConfigsFromHomeDir(e.Shell().User) 136 137 if authConfigs == nil { 138 return "", nil 139 } 140 return sourceFile, docker_helpers.ResolveDockerAuthConfig(indexName, authConfigs) 141 142 } 143 144 type authConfigResolver func(indexName string) (string, *types.AuthConfig) 145 146 func (e *executor) getAuthConfig(imageName string) *types.AuthConfig { 147 indexName, _ := docker_helpers.SplitDockerImageName(imageName) 148 149 resolvers := []authConfigResolver{ 150 e.getUserAuthConfiguration, 151 e.getHomeDirAuthConfiguration, 152 e.getBuildAuthConfiguration, 153 } 154 155 for _, resolver := range resolvers { 156 source, authConfig := resolver(indexName) 157 158 if authConfig != nil { 159 e.Println("Authenticating with credentials from", source) 160 e.Debugln("Using", authConfig.Username, "to connect to", authConfig.ServerAddress, 161 "in order to resolve", imageName, "...") 162 163 return authConfig 164 } 165 } 166 167 e.Debugln(fmt.Sprintf("No credentials found for %v", indexName)) 168 169 return nil 170 } 171 172 func (e *executor) pullDockerImage(imageName string, ac *types.AuthConfig) (*types.ImageInspect, error) { 173 e.SetCurrentStage(DockerExecutorStagePullingImage) 174 e.Println("Pulling docker image", imageName, "...") 175 176 ref := imageName 177 // Add :latest to limit the download results 178 if !strings.ContainsAny(ref, ":@") { 179 ref += ":latest" 180 } 181 182 options := types.ImagePullOptions{} 183 if ac != nil { 184 options.RegistryAuth, _ = docker_helpers.EncodeAuthConfig(ac) 185 } 186 187 errorRegexp := regexp.MustCompile("(repository does not exist|not found)") 188 if err := e.client.ImagePullBlocking(e.Context, ref, options); err != nil { 189 if errorRegexp.MatchString(err.Error()) { 190 return nil, &common.BuildError{Inner: err} 191 } 192 return nil, err 193 } 194 195 image, _, err := e.client.ImageInspectWithRaw(e.Context, imageName) 196 return &image, err 197 } 198 199 func (e *executor) getDockerImage(imageName string) (image *types.ImageInspect, err error) { 200 pullPolicy, err := e.Config.Docker.PullPolicy.Get() 201 if err != nil { 202 return nil, err 203 } 204 205 authConfig := e.getAuthConfig(imageName) 206 207 e.Debugln("Looking for image", imageName, "...") 208 existingImage, _, err := e.client.ImageInspectWithRaw(e.Context, imageName) 209 210 // Return early if we already used that image 211 if err == nil && e.wasImageUsed(imageName, existingImage.ID) { 212 return &existingImage, nil 213 } 214 215 defer func() { 216 if err == nil { 217 e.markImageAsUsed(imageName, image.ID) 218 } 219 }() 220 221 // If never is specified then we return what inspect did return 222 if pullPolicy == common.PullPolicyNever { 223 return &existingImage, err 224 } 225 226 if err == nil { 227 // Don't pull image that is passed by ID 228 if existingImage.ID == imageName { 229 return &existingImage, nil 230 } 231 232 // If not-present is specified 233 if pullPolicy == common.PullPolicyIfNotPresent { 234 e.Println("Using locally found image version due to if-not-present pull policy") 235 return &existingImage, err 236 } 237 } 238 239 return e.pullDockerImage(imageName, authConfig) 240 } 241 242 func (e *executor) expandAndGetDockerImage(imageName string, allowedImages []string) (*types.ImageInspect, error) { 243 imageName, err := e.expandImageName(imageName, allowedImages) 244 if err != nil { 245 return nil, err 246 } 247 248 image, err := e.getDockerImage(imageName) 249 if err != nil { 250 return nil, err 251 } 252 253 return image, nil 254 } 255 256 func (e *executor) loadPrebuiltImage(path, ref, tag string) (*types.ImageInspect, error) { 257 file, err := os.OpenFile(path, os.O_RDONLY, 0600) 258 if err != nil { 259 if os.IsNotExist(err) { 260 return nil, err 261 } 262 263 return nil, fmt.Errorf("Cannot load prebuilt image: %s: %q", path, err.Error()) 264 } 265 defer file.Close() 266 267 e.Debugln("Loading prebuilt image...") 268 269 source := types.ImageImportSource{ 270 Source: file, 271 SourceName: "-", 272 } 273 options := types.ImageImportOptions{Tag: tag} 274 275 if err := e.client.ImageImportBlocking(e.Context, source, ref, options); err != nil { 276 return nil, fmt.Errorf("Failed to import image: %s", err) 277 } 278 279 image, _, err := e.client.ImageInspectWithRaw(e.Context, ref+":"+tag) 280 if err != nil { 281 e.Debugln("Inspecting imported image", ref, "failed:", err) 282 return nil, err 283 } 284 285 return &image, err 286 } 287 288 func (e *executor) getPrebuiltImage() (*types.ImageInspect, error) { 289 if imageNameFromConfig := e.Config.Docker.HelperImage; imageNameFromConfig != "" { 290 imageNameFromConfig = common.AppVersion.Variables().ExpandValue(imageNameFromConfig) 291 292 e.Debugln("Pull configured helper_image for predefined container instead of import bundled image", imageNameFromConfig, "...") 293 294 return e.getDockerImage(imageNameFromConfig) 295 } 296 297 e.Debugln(fmt.Sprintf("Looking for prebuilt image %s...", e.helperImageInfo)) 298 image, _, err := e.client.ImageInspectWithRaw(e.Context, e.helperImageInfo.String()) 299 if err == nil { 300 return &image, nil 301 } 302 303 // Try to load prebuilt image from local filesystem 304 loadedImage := e.getLocalHelperImage() 305 if loadedImage != nil { 306 return loadedImage, nil 307 } 308 309 // Fallback to getting image from DockerHub 310 e.Debugln(fmt.Sprintf("Loading image form registry: %s", e.helperImageInfo)) 311 return e.getDockerImage(e.helperImageInfo.String()) 312 } 313 314 func (e *executor) getLocalHelperImage() *types.ImageInspect { 315 if !e.helperImageInfo.IsSupportingLocalImport { 316 return nil 317 } 318 319 architecture := e.helperImageInfo.Architecture 320 for _, dockerPrebuiltImagesPath := range DockerPrebuiltImagesPaths { 321 dockerPrebuiltImageFilePath := filepath.Join(dockerPrebuiltImagesPath, "prebuilt-"+architecture+prebuiltImageExtension) 322 image, err := e.loadPrebuiltImage(dockerPrebuiltImageFilePath, prebuiltImageName, e.helperImageInfo.Tag) 323 if err != nil { 324 e.Debugln("Failed to load prebuilt image from:", dockerPrebuiltImageFilePath, "error:", err) 325 continue 326 } 327 328 return image 329 } 330 331 return nil 332 } 333 334 func (e *executor) getBuildImage() (*types.ImageInspect, error) { 335 imageName, err := e.expandImageName(e.Build.Image.Name, []string{}) 336 if err != nil { 337 return nil, err 338 } 339 340 // Fetch image 341 image, err := e.getDockerImage(imageName) 342 if err != nil { 343 return nil, err 344 } 345 346 return image, nil 347 } 348 349 func (e *executor) getLabels(containerType string, otherLabels ...string) map[string]string { 350 labels := make(map[string]string) 351 labels[dockerLabelPrefix+".job.id"] = strconv.Itoa(e.Build.ID) 352 labels[dockerLabelPrefix+".job.sha"] = e.Build.GitInfo.Sha 353 labels[dockerLabelPrefix+".job.before_sha"] = e.Build.GitInfo.BeforeSha 354 labels[dockerLabelPrefix+".job.ref"] = e.Build.GitInfo.Ref 355 labels[dockerLabelPrefix+".project.id"] = strconv.Itoa(e.Build.JobInfo.ProjectID) 356 labels[dockerLabelPrefix+".pipeline.id"] = e.Build.GetAllVariables().Get("CI_PIPELINE_ID") 357 labels[dockerLabelPrefix+".runner.id"] = e.Build.Runner.ShortDescription() 358 labels[dockerLabelPrefix+".runner.local_id"] = strconv.Itoa(e.Build.RunnerID) 359 labels[dockerLabelPrefix+".type"] = containerType 360 for _, label := range otherLabels { 361 keyValue := strings.SplitN(label, "=", 2) 362 if len(keyValue) == 2 { 363 labels[dockerLabelPrefix+"."+keyValue[0]] = keyValue[1] 364 } 365 } 366 return labels 367 } 368 369 func fakeContainer(id string, names ...string) *types.Container { 370 return &types.Container{ID: id, Names: names} 371 } 372 373 func (e *executor) parseDeviceString(deviceString string) (device container.DeviceMapping, err error) { 374 // Split the device string PathOnHost[:PathInContainer[:CgroupPermissions]] 375 parts := strings.Split(deviceString, ":") 376 377 if len(parts) > 3 { 378 err = fmt.Errorf("Too many colons") 379 return 380 } 381 382 device.PathOnHost = parts[0] 383 384 // Optional container path 385 if len(parts) >= 2 { 386 device.PathInContainer = parts[1] 387 } else { 388 // default: device at same path in container 389 device.PathInContainer = device.PathOnHost 390 } 391 392 // Optional permissions 393 if len(parts) >= 3 { 394 device.CgroupPermissions = parts[2] 395 } else { 396 // default: rwm, just like 'docker run' 397 device.CgroupPermissions = "rwm" 398 } 399 400 return 401 } 402 403 func (e *executor) bindDevices() (err error) { 404 for _, deviceString := range e.Config.Docker.Devices { 405 device, err := e.parseDeviceString(deviceString) 406 if err != nil { 407 err = fmt.Errorf("Failed to parse device string %q: %s", deviceString, err) 408 return err 409 } 410 411 e.devices = append(e.devices, device) 412 } 413 return nil 414 } 415 416 func (e *executor) wasImageUsed(imageName, imageID string) bool { 417 e.usedImagesLock.RLock() 418 defer e.usedImagesLock.RUnlock() 419 420 if e.usedImages[imageName] == imageID { 421 return true 422 } 423 return false 424 } 425 426 func (e *executor) markImageAsUsed(imageName, imageID string) { 427 e.usedImagesLock.Lock() 428 defer e.usedImagesLock.Unlock() 429 430 if e.usedImages == nil { 431 e.usedImages = make(map[string]string) 432 } 433 e.usedImages[imageName] = imageID 434 435 if imageName != imageID { 436 e.Println("Using docker image", imageID, "for", imageName, "...") 437 } 438 } 439 440 func (e *executor) splitServiceAndVersion(serviceDescription string) (service, version, imageName string, linkNames []string) { 441 ReferenceRegexpNoPort := regexp.MustCompile(`^(.*?)(|:[0-9]+)(|/.*)$`) 442 imageName = serviceDescription 443 version = "latest" 444 445 if match := reference.ReferenceRegexp.FindStringSubmatch(serviceDescription); match != nil { 446 matchService := ReferenceRegexpNoPort.FindStringSubmatch(match[1]) 447 service = matchService[1] + matchService[3] 448 449 if len(match[2]) > 0 { 450 version = match[2] 451 } else { 452 imageName = match[1] + ":" + version 453 } 454 } else { 455 return 456 } 457 458 linkName := strings.Replace(service, "/", "__", -1) 459 linkNames = append(linkNames, linkName) 460 461 // Create alternative link name according to RFC 1123 462 // Where you can use only `a-zA-Z0-9-` 463 if alternativeName := strings.Replace(service, "/", "-", -1); linkName != alternativeName { 464 linkNames = append(linkNames, alternativeName) 465 } 466 return 467 } 468 469 func (e *executor) createService(serviceIndex int, service, version, image string, serviceDefinition common.Image) (*types.Container, error) { 470 if len(service) == 0 { 471 return nil, fmt.Errorf("invalid service name: %s", serviceDefinition.Name) 472 } 473 474 if e.volumesManager == nil { 475 return nil, errVolumesManagerUndefined 476 } 477 478 e.Println("Starting service", service+":"+version, "...") 479 serviceImage, err := e.getDockerImage(image) 480 if err != nil { 481 return nil, err 482 } 483 484 serviceSlug := strings.Replace(service, "/", "__", -1) 485 containerName := fmt.Sprintf("%s-%s-%d", e.Build.ProjectUniqueName(), serviceSlug, serviceIndex) 486 487 // this will fail potentially some builds if there's name collision 488 e.removeContainer(e.Context, containerName) 489 490 config := &container.Config{ 491 Image: serviceImage.ID, 492 Labels: e.getLabels("service", "service="+service, "service.version="+version), 493 Env: e.getServiceVariables(), 494 } 495 496 if len(serviceDefinition.Command) > 0 { 497 config.Cmd = serviceDefinition.Command 498 } 499 config.Entrypoint = e.overwriteEntrypoint(&serviceDefinition) 500 501 hostConfig := &container.HostConfig{ 502 DNS: e.Config.Docker.DNS, 503 DNSSearch: e.Config.Docker.DNSSearch, 504 RestartPolicy: neverRestartPolicy, 505 ExtraHosts: e.Config.Docker.ExtraHosts, 506 Privileged: e.Config.Docker.Privileged, 507 NetworkMode: container.NetworkMode(e.Config.Docker.NetworkMode), 508 Binds: e.volumesManager.Binds(), 509 ShmSize: e.Config.Docker.ShmSize, 510 VolumesFrom: e.volumesManager.ContainerIDs(), 511 Tmpfs: e.Config.Docker.ServicesTmpfs, 512 LogConfig: container.LogConfig{ 513 Type: "json-file", 514 }, 515 } 516 517 e.Debugln("Creating service container", containerName, "...") 518 resp, err := e.client.ContainerCreate(e.Context, config, hostConfig, nil, containerName) 519 if err != nil { 520 return nil, err 521 } 522 523 e.Debugln("Starting service container", resp.ID, "...") 524 err = e.client.ContainerStart(e.Context, resp.ID, types.ContainerStartOptions{}) 525 if err != nil { 526 e.temporary = append(e.temporary, resp.ID) 527 return nil, err 528 } 529 530 return fakeContainer(resp.ID, containerName), nil 531 } 532 533 func (e *executor) getServicesDefinitions() (common.Services, error) { 534 serviceDefinitions := common.Services{} 535 for _, service := range e.Config.Docker.Services { 536 serviceDefinitions = append(serviceDefinitions, common.Image{Name: service}) 537 } 538 539 for _, service := range e.Build.Services { 540 serviceName := e.Build.GetAllVariables().ExpandValue(service.Name) 541 err := e.verifyAllowedImage(serviceName, "services", e.Config.Docker.AllowedServices, e.Config.Docker.Services) 542 if err != nil { 543 return nil, err 544 } 545 546 service.Name = serviceName 547 serviceDefinitions = append(serviceDefinitions, service) 548 } 549 550 return serviceDefinitions, nil 551 } 552 553 func (e *executor) waitForServices() { 554 waitForServicesTimeout := e.Config.Docker.WaitForServicesTimeout 555 if waitForServicesTimeout == 0 { 556 waitForServicesTimeout = common.DefaultWaitForServicesTimeout 557 } 558 559 // wait for all services to came up 560 if waitForServicesTimeout > 0 && len(e.services) > 0 { 561 e.Println("Waiting for services to be up and running...") 562 wg := sync.WaitGroup{} 563 for _, service := range e.services { 564 wg.Add(1) 565 go func(service *types.Container) { 566 e.waitForServiceContainer(service, time.Duration(waitForServicesTimeout)*time.Second) 567 wg.Done() 568 }(service) 569 } 570 wg.Wait() 571 } 572 } 573 574 func (e *executor) buildServiceLinks(linksMap map[string]*types.Container) (links []string) { 575 for linkName, linkee := range linksMap { 576 newContainer, err := e.client.ContainerInspect(e.Context, linkee.ID) 577 if err != nil { 578 continue 579 } 580 if newContainer.State.Running { 581 links = append(links, linkee.ID+":"+linkName) 582 } 583 } 584 return 585 } 586 587 func (e *executor) createFromServiceDefinition(serviceIndex int, serviceDefinition common.Image, linksMap map[string]*types.Container) (err error) { 588 var container *types.Container 589 590 service, version, imageName, linkNames := e.splitServiceAndVersion(serviceDefinition.Name) 591 592 if serviceDefinition.Alias != "" { 593 linkNames = append(linkNames, serviceDefinition.Alias) 594 } 595 596 for _, linkName := range linkNames { 597 if linksMap[linkName] != nil { 598 e.Warningln("Service", serviceDefinition.Name, "is already created. Ignoring.") 599 continue 600 } 601 602 // Create service if not yet created 603 if container == nil { 604 container, err = e.createService(serviceIndex, service, version, imageName, serviceDefinition) 605 if err != nil { 606 return 607 } 608 e.Debugln("Created service", serviceDefinition.Name, "as", container.ID) 609 e.services = append(e.services, container) 610 e.temporary = append(e.temporary, container.ID) 611 } 612 linksMap[linkName] = container 613 } 614 return 615 } 616 617 func (e *executor) createServices() (err error) { 618 e.SetCurrentStage(DockerExecutorStageCreatingServices) 619 e.Debugln("Creating services...") 620 621 servicesDefinitions, err := e.getServicesDefinitions() 622 if err != nil { 623 return 624 } 625 626 linksMap := make(map[string]*types.Container) 627 628 for index, serviceDefinition := range servicesDefinitions { 629 err = e.createFromServiceDefinition(index, serviceDefinition, linksMap) 630 if err != nil { 631 return 632 } 633 } 634 635 e.waitForServices() 636 637 e.links = e.buildServiceLinks(linksMap) 638 return 639 } 640 641 func (e *executor) getValidContainers(containers []string) []string { 642 var newContainers []string 643 644 for _, container := range containers { 645 if _, err := e.client.ContainerInspect(e.Context, container); err == nil { 646 newContainers = append(newContainers, container) 647 } 648 } 649 650 return newContainers 651 } 652 653 func (e *executor) createContainer(containerType string, imageDefinition common.Image, cmd []string, allowedInternalImages []string) (*types.ContainerJSON, error) { 654 if e.volumesManager == nil { 655 return nil, errVolumesManagerUndefined 656 } 657 658 image, err := e.expandAndGetDockerImage(imageDefinition.Name, allowedInternalImages) 659 if err != nil { 660 return nil, err 661 } 662 663 hostname := e.Config.Docker.Hostname 664 if hostname == "" { 665 hostname = e.Build.ProjectUniqueName() 666 } 667 668 // Always create unique, but sequential name 669 containerIndex := len(e.builds) 670 containerName := e.Build.ProjectUniqueName() + "-" + 671 containerType + "-" + strconv.Itoa(containerIndex) 672 673 config := &container.Config{ 674 Image: image.ID, 675 Hostname: hostname, 676 Cmd: cmd, 677 Labels: e.getLabels(containerType), 678 Tty: false, 679 AttachStdin: true, 680 AttachStdout: true, 681 AttachStderr: true, 682 OpenStdin: true, 683 StdinOnce: true, 684 Env: append(e.Build.GetAllVariables().StringList(), e.BuildShell.Environment...), 685 } 686 687 config.Entrypoint = e.overwriteEntrypoint(&imageDefinition) 688 689 nanoCPUs, err := e.Config.Docker.GetNanoCPUs() 690 if err != nil { 691 return nil, err 692 } 693 694 // By default we use caches container, 695 // but in later phases we hook to previous build container 696 volumesFrom := e.volumesManager.ContainerIDs() 697 if len(e.builds) > 0 { 698 volumesFrom = []string{ 699 e.builds[len(e.builds)-1], 700 } 701 } 702 703 hostConfig := &container.HostConfig{ 704 Resources: container.Resources{ 705 Memory: e.Config.Docker.GetMemory(), 706 MemorySwap: e.Config.Docker.GetMemorySwap(), 707 MemoryReservation: e.Config.Docker.GetMemoryReservation(), 708 CpusetCpus: e.Config.Docker.CPUSetCPUs, 709 NanoCPUs: nanoCPUs, 710 Devices: e.devices, 711 OomKillDisable: e.Config.Docker.GetOomKillDisable(), 712 }, 713 DNS: e.Config.Docker.DNS, 714 DNSSearch: e.Config.Docker.DNSSearch, 715 Runtime: e.Config.Docker.Runtime, 716 Privileged: e.Config.Docker.Privileged, 717 UsernsMode: container.UsernsMode(e.Config.Docker.UsernsMode), 718 CapAdd: e.Config.Docker.CapAdd, 719 CapDrop: e.Config.Docker.CapDrop, 720 SecurityOpt: e.Config.Docker.SecurityOpt, 721 RestartPolicy: neverRestartPolicy, 722 ExtraHosts: e.Config.Docker.ExtraHosts, 723 NetworkMode: container.NetworkMode(e.Config.Docker.NetworkMode), 724 Links: append(e.Config.Docker.Links, e.links...), 725 Binds: e.volumesManager.Binds(), 726 ShmSize: e.Config.Docker.ShmSize, 727 VolumeDriver: e.Config.Docker.VolumeDriver, 728 VolumesFrom: append(e.Config.Docker.VolumesFrom, volumesFrom...), 729 LogConfig: container.LogConfig{ 730 Type: "json-file", 731 }, 732 Tmpfs: e.Config.Docker.Tmpfs, 733 Sysctls: e.Config.Docker.SysCtls, 734 } 735 736 // this will fail potentially some builds if there's name collision 737 e.removeContainer(e.Context, containerName) 738 739 e.Debugln("Creating container", containerName, "...") 740 resp, err := e.client.ContainerCreate(e.Context, config, hostConfig, nil, containerName) 741 if err != nil { 742 if resp.ID != "" { 743 e.temporary = append(e.temporary, resp.ID) 744 } 745 return nil, err 746 } 747 748 inspect, err := e.client.ContainerInspect(e.Context, resp.ID) 749 if err != nil { 750 e.temporary = append(e.temporary, resp.ID) 751 return nil, err 752 } 753 754 e.builds = append(e.builds, resp.ID) 755 e.temporary = append(e.temporary, resp.ID) 756 return &inspect, nil 757 } 758 759 func (e *executor) killContainer(id string, waitCh chan error) (err error) { 760 for { 761 e.disconnectNetwork(e.Context, id) 762 e.Debugln("Killing container", id, "...") 763 e.client.ContainerKill(e.Context, id, "SIGKILL") 764 765 // Wait for signal that container were killed 766 // or retry after some time 767 select { 768 case err = <-waitCh: 769 return 770 771 case <-time.After(time.Second): 772 } 773 } 774 } 775 776 func (e *executor) waitForContainer(ctx context.Context, id string) error { 777 e.Debugln("Waiting for container", id, "...") 778 779 retries := 0 780 781 // Use active wait 782 for ctx.Err() == nil { 783 container, err := e.client.ContainerInspect(ctx, id) 784 if err != nil { 785 if docker_helpers.IsErrNotFound(err) { 786 return err 787 } 788 789 if retries > 3 { 790 return err 791 } 792 793 retries++ 794 time.Sleep(time.Second) 795 continue 796 } 797 798 // Reset retry timer 799 retries = 0 800 801 if container.State.Running { 802 time.Sleep(time.Second) 803 continue 804 } 805 806 if container.State.ExitCode != 0 { 807 return &common.BuildError{ 808 Inner: fmt.Errorf("exit code %d", container.State.ExitCode), 809 } 810 } 811 812 return nil 813 } 814 815 return ctx.Err() 816 } 817 818 func (e *executor) watchContainer(ctx context.Context, id string, input io.Reader) (err error) { 819 options := types.ContainerAttachOptions{ 820 Stream: true, 821 Stdin: true, 822 Stdout: true, 823 Stderr: true, 824 } 825 826 e.Debugln("Attaching to container", id, "...") 827 hijacked, err := e.client.ContainerAttach(ctx, id, options) 828 if err != nil { 829 return 830 } 831 defer hijacked.Close() 832 833 e.Debugln("Starting container", id, "...") 834 err = e.client.ContainerStart(ctx, id, types.ContainerStartOptions{}) 835 if err != nil { 836 return 837 } 838 839 e.Debugln("Waiting for attach to finish", id, "...") 840 attachCh := make(chan error, 2) 841 842 // Copy any output to the build trace 843 go func() { 844 _, err := stdcopy.StdCopy(e.Trace, e.Trace, hijacked.Reader) 845 if err != nil { 846 attachCh <- err 847 } 848 }() 849 850 // Write the input to the container and close its STDIN to get it to finish 851 go func() { 852 _, err := io.Copy(hijacked.Conn, input) 853 hijacked.CloseWrite() 854 if err != nil { 855 attachCh <- err 856 } 857 }() 858 859 waitCh := make(chan error, 1) 860 go func() { 861 waitCh <- e.waitForContainer(e.Context, id) 862 }() 863 864 select { 865 case <-ctx.Done(): 866 e.killContainer(id, waitCh) 867 err = errors.New("Aborted") 868 869 case err = <-attachCh: 870 e.killContainer(id, waitCh) 871 e.Debugln("Container", id, "finished with", err) 872 873 case err = <-waitCh: 874 e.Debugln("Container", id, "finished with", err) 875 } 876 return 877 } 878 879 func (e *executor) removeContainer(ctx context.Context, id string) error { 880 e.disconnectNetwork(ctx, id) 881 options := types.ContainerRemoveOptions{ 882 RemoveVolumes: true, 883 Force: true, 884 } 885 err := e.client.ContainerRemove(ctx, id, options) 886 e.Debugln("Removed container", id, "with", err) 887 return err 888 } 889 890 func (e *executor) disconnectNetwork(ctx context.Context, id string) error { 891 netList, err := e.client.NetworkList(ctx, types.NetworkListOptions{}) 892 if err != nil { 893 e.Debugln("Can't get network list. ListNetworks exited with", err) 894 return err 895 } 896 897 for _, network := range netList { 898 for _, pluggedContainer := range network.Containers { 899 if id == pluggedContainer.Name { 900 err = e.client.NetworkDisconnect(ctx, network.ID, id, true) 901 if err != nil { 902 e.Warningln("Can't disconnect possibly zombie container", pluggedContainer.Name, "from network", network.Name, "->", err) 903 } else { 904 e.Warningln("Possibly zombie container", pluggedContainer.Name, "is disconnected from network", network.Name) 905 } 906 break 907 } 908 } 909 } 910 return err 911 } 912 913 func (e *executor) verifyAllowedImage(image, optionName string, allowedImages []string, internalImages []string) error { 914 for _, allowedImage := range allowedImages { 915 ok, _ := zglob.Match(allowedImage, image) 916 if ok { 917 return nil 918 } 919 } 920 921 for _, internalImage := range internalImages { 922 if internalImage == image { 923 return nil 924 } 925 } 926 927 if len(allowedImages) != 0 { 928 e.Println() 929 e.Errorln("The", image, "is not present on list of allowed", optionName) 930 for _, allowedImage := range allowedImages { 931 e.Println("-", allowedImage) 932 } 933 e.Println() 934 } else { 935 // by default allow to override the image name 936 return nil 937 } 938 939 e.Println("Please check runner's configuration: http://doc.gitlab.com/ci/docker/using_docker_images.html#overwrite-image-and-services") 940 return errors.New("invalid image") 941 } 942 943 func (e *executor) expandImageName(imageName string, allowedInternalImages []string) (string, error) { 944 if imageName != "" { 945 image := e.Build.GetAllVariables().ExpandValue(imageName) 946 allowedInternalImages = append(allowedInternalImages, e.Config.Docker.Image) 947 err := e.verifyAllowedImage(image, "images", e.Config.Docker.AllowedImages, allowedInternalImages) 948 if err != nil { 949 return "", err 950 } 951 return image, nil 952 } 953 954 if e.Config.Docker.Image == "" { 955 return "", errors.New("No Docker image specified to run the build in") 956 } 957 958 return e.Config.Docker.Image, nil 959 } 960 961 func (e *executor) overwriteEntrypoint(image *common.Image) []string { 962 if len(image.Entrypoint) > 0 { 963 if !e.Config.Docker.DisableEntrypointOverwrite { 964 return image.Entrypoint 965 } 966 967 e.Warningln("Entrypoint override disabled") 968 } 969 970 return nil 971 } 972 973 func (e *executor) connectDocker() error { 974 client, err := docker_helpers.New(e.Config.Docker.DockerCredentials, "") 975 if err != nil { 976 return err 977 } 978 e.client = client 979 980 e.info, err = client.Info(e.Context) 981 if err != nil { 982 return err 983 } 984 985 err = e.validateOSType() 986 if err != nil { 987 return err 988 } 989 990 e.helperImageInfo, err = helperimage.Get(common.REVISION, helperimage.Config{ 991 OSType: e.info.OSType, 992 Architecture: e.info.Architecture, 993 OperatingSystem: e.info.OperatingSystem, 994 }) 995 996 return err 997 } 998 999 // validateOSType checks if the ExecutorOptions metadata matches with the docker 1000 // info response. 1001 func (e *executor) validateOSType() error { 1002 executorOSType := e.ExecutorOptions.Metadata[metadataOSType] 1003 if executorOSType == "" { 1004 return common.MakeBuildError("%s does not have any OSType specified", e.Config.Executor) 1005 } 1006 1007 if executorOSType != e.info.OSType { 1008 return common.MakeBuildError( 1009 "executor requires OSType=%s, but Docker Engine supports only OSType=%s", 1010 executorOSType, e.info.OSType, 1011 ) 1012 } 1013 1014 return nil 1015 } 1016 1017 func (e *executor) createDependencies() error { 1018 createDependenciesStrategy := []func() error{ 1019 e.bindDevices, 1020 e.createVolumesManager, 1021 e.createVolumes, 1022 e.createBuildVolume, 1023 e.createServices, 1024 } 1025 1026 if e.Build.IsFeatureFlagOn(featureflags.UseLegacyVolumesMountingOrder) { 1027 // TODO: Remove in 12.6 1028 createDependenciesStrategy = []func() error{ 1029 e.bindDevices, 1030 e.createVolumesManager, 1031 e.createBuildVolume, 1032 e.createServices, 1033 e.createVolumes, 1034 } 1035 } 1036 1037 for _, setup := range createDependenciesStrategy { 1038 err := setup() 1039 if err != nil { 1040 return err 1041 } 1042 } 1043 1044 return nil 1045 } 1046 1047 func (e *executor) createVolumes() error { 1048 e.SetCurrentStage(DockerExecutorStageCreatingUserVolumes) 1049 e.Debugln("Creating user-defined volumes...") 1050 1051 if e.volumesManager == nil { 1052 return errVolumesManagerUndefined 1053 } 1054 1055 for _, volume := range e.Config.Docker.Volumes { 1056 err := e.volumesManager.Create(volume) 1057 if err == volumes.ErrCacheVolumesDisabled { 1058 e.Warningln(fmt.Sprintf( 1059 "Container based cache volumes creation is disabled. Will not create volume for %q", 1060 volume, 1061 )) 1062 continue 1063 } 1064 1065 if err != nil { 1066 return err 1067 } 1068 } 1069 1070 return nil 1071 } 1072 1073 func (e *executor) createBuildVolume() error { 1074 e.SetCurrentStage(DockerExecutorStageCreatingBuildVolumes) 1075 e.Debugln("Creating build volume...") 1076 1077 if e.volumesManager == nil { 1078 return errVolumesManagerUndefined 1079 } 1080 1081 jobsDir := e.Build.RootDir 1082 1083 // TODO: Remove in 12.3 1084 if e.Build.IsFeatureFlagOn(featureflags.UseLegacyBuildsDirForDocker) { 1085 // Cache Git sources: 1086 // take path of the projects directory, 1087 // because we use `rm -rf` which could remove the mounted volume 1088 jobsDir = path.Dir(e.Build.FullProjectDir()) 1089 } 1090 1091 var err error 1092 1093 if e.Build.GetGitStrategy() == common.GitFetch { 1094 err = e.volumesManager.Create(jobsDir) 1095 if err == nil { 1096 return nil 1097 } 1098 1099 if err == volumes.ErrCacheVolumesDisabled { 1100 err = e.volumesManager.CreateTemporary(jobsDir) 1101 } 1102 } else { 1103 err = e.volumesManager.CreateTemporary(jobsDir) 1104 } 1105 1106 if err != nil { 1107 if _, ok := err.(*volumes.ErrVolumeAlreadyDefined); !ok { 1108 return err 1109 } 1110 } 1111 1112 return nil 1113 } 1114 1115 func (e *executor) Prepare(options common.ExecutorPrepareOptions) error { 1116 e.SetCurrentStage(DockerExecutorStagePrepare) 1117 1118 if options.Config.Docker == nil { 1119 return errors.New("missing docker configuration") 1120 } 1121 1122 e.AbstractExecutor.PrepareConfiguration(options) 1123 1124 err := e.connectDocker() 1125 if err != nil { 1126 return err 1127 } 1128 1129 err = e.prepareBuildsDir(options) 1130 if err != nil { 1131 return err 1132 } 1133 1134 err = e.AbstractExecutor.PrepareBuildAndShell() 1135 if err != nil { 1136 return err 1137 } 1138 1139 if e.BuildShell.PassFile { 1140 return errors.New("docker doesn't support shells that require script file") 1141 } 1142 1143 imageName, err := e.expandImageName(e.Build.Image.Name, []string{}) 1144 if err != nil { 1145 return err 1146 } 1147 1148 e.Println("Using Docker executor with image", imageName, "...") 1149 1150 err = e.createDependencies() 1151 if err != nil { 1152 return err 1153 } 1154 return nil 1155 } 1156 1157 func (e *executor) prepareBuildsDir(options common.ExecutorPrepareOptions) error { 1158 if e.volumeParser == nil { 1159 return common.MakeBuildError("missing volume parser") 1160 } 1161 1162 isHostMounted, err := volumes.IsHostMountedVolume(e.volumeParser, e.RootDir(), options.Config.Docker.Volumes...) 1163 if err != nil { 1164 return &common.BuildError{Inner: err} 1165 } 1166 1167 // We need to set proper value for e.SharedBuildsDir because 1168 // it's required to properly start the job, what is done inside of 1169 // e.AbstractExecutor.Prepare() 1170 // And a started job is required for Volumes Manager to work, so it's 1171 // done before the manager is even created. 1172 if isHostMounted { 1173 e.SharedBuildsDir = true 1174 } 1175 1176 return nil 1177 } 1178 1179 func (e *executor) Cleanup() { 1180 e.SetCurrentStage(DockerExecutorStageCleanup) 1181 1182 var wg sync.WaitGroup 1183 1184 ctx, cancel := context.WithTimeout(context.Background(), dockerCleanupTimeout) 1185 defer cancel() 1186 1187 remove := func(id string) { 1188 wg.Add(1) 1189 go func() { 1190 e.removeContainer(ctx, id) 1191 wg.Done() 1192 }() 1193 } 1194 1195 for _, temporaryID := range e.temporary { 1196 remove(temporaryID) 1197 } 1198 1199 if e.volumesManager != nil { 1200 <-e.volumesManager.Cleanup(ctx) 1201 } 1202 1203 wg.Wait() 1204 1205 if e.client != nil { 1206 e.client.Close() 1207 } 1208 1209 e.AbstractExecutor.Cleanup() 1210 } 1211 1212 type serviceHealthCheckError struct { 1213 Inner error 1214 Logs string 1215 } 1216 1217 func (e *serviceHealthCheckError) Error() string { 1218 if e.Inner == nil { 1219 return "serviceHealthCheckError" 1220 } 1221 1222 return e.Inner.Error() 1223 } 1224 1225 func (e *executor) runServiceHealthCheckContainer(service *types.Container, timeout time.Duration) error { 1226 waitImage, err := e.getPrebuiltImage() 1227 if err != nil { 1228 return fmt.Errorf("getPrebuiltImage: %v", err) 1229 } 1230 1231 containerName := service.Names[0] + "-wait-for-service" 1232 1233 cmd := []string{"gitlab-runner-helper", "health-check"} 1234 1235 config := &container.Config{ 1236 Cmd: cmd, 1237 Image: waitImage.ID, 1238 Labels: e.getLabels("wait", "wait="+service.ID), 1239 } 1240 hostConfig := &container.HostConfig{ 1241 RestartPolicy: neverRestartPolicy, 1242 Links: []string{service.Names[0] + ":service"}, 1243 NetworkMode: container.NetworkMode(e.Config.Docker.NetworkMode), 1244 LogConfig: container.LogConfig{ 1245 Type: "json-file", 1246 }, 1247 } 1248 e.Debugln("Waiting for service container", containerName, "to be up and running...") 1249 resp, err := e.client.ContainerCreate(e.Context, config, hostConfig, nil, containerName) 1250 if err != nil { 1251 return fmt.Errorf("ContainerCreate: %v", err) 1252 } 1253 defer e.removeContainer(e.Context, resp.ID) 1254 err = e.client.ContainerStart(e.Context, resp.ID, types.ContainerStartOptions{}) 1255 if err != nil { 1256 return fmt.Errorf("ContainerStart: %v", err) 1257 } 1258 1259 waitResult := make(chan error, 1) 1260 go func() { 1261 waitResult <- e.waitForContainer(e.Context, resp.ID) 1262 }() 1263 1264 // these are warnings and they don't make the build fail 1265 select { 1266 case err := <-waitResult: 1267 if err == nil { 1268 return nil 1269 } 1270 1271 return &serviceHealthCheckError{ 1272 Inner: err, 1273 Logs: e.readContainerLogs(resp.ID), 1274 } 1275 case <-time.After(timeout): 1276 return &serviceHealthCheckError{ 1277 Inner: fmt.Errorf("service %q timeout", containerName), 1278 Logs: e.readContainerLogs(resp.ID), 1279 } 1280 } 1281 } 1282 1283 func (e *executor) waitForServiceContainer(service *types.Container, timeout time.Duration) error { 1284 err := e.runServiceHealthCheckContainer(service, timeout) 1285 if err == nil { 1286 return nil 1287 } 1288 1289 var buffer bytes.Buffer 1290 buffer.WriteString("\n") 1291 buffer.WriteString(helpers.ANSI_YELLOW + "*** WARNING:" + helpers.ANSI_RESET + " Service " + service.Names[0] + " probably didn't start properly.\n") 1292 buffer.WriteString("\n") 1293 buffer.WriteString("Health check error:\n") 1294 buffer.WriteString(strings.TrimSpace(err.Error())) 1295 buffer.WriteString("\n") 1296 1297 if healtCheckErr, ok := err.(*serviceHealthCheckError); ok { 1298 buffer.WriteString("\n") 1299 buffer.WriteString("Health check container logs:\n") 1300 buffer.WriteString(healtCheckErr.Logs) 1301 buffer.WriteString("\n") 1302 } 1303 1304 buffer.WriteString("\n") 1305 buffer.WriteString("Service container logs:\n") 1306 buffer.WriteString(e.readContainerLogs(service.ID)) 1307 buffer.WriteString("\n") 1308 1309 buffer.WriteString("\n") 1310 buffer.WriteString(helpers.ANSI_YELLOW + "*********" + helpers.ANSI_RESET + "\n") 1311 buffer.WriteString("\n") 1312 io.Copy(e.Trace, &buffer) 1313 return err 1314 } 1315 1316 func (e *executor) readContainerLogs(containerID string) string { 1317 var containerBuffer bytes.Buffer 1318 1319 options := types.ContainerLogsOptions{ 1320 ShowStdout: true, 1321 ShowStderr: true, 1322 Timestamps: true, 1323 } 1324 1325 hijacked, err := e.client.ContainerLogs(e.Context, containerID, options) 1326 if err != nil { 1327 return strings.TrimSpace(err.Error()) 1328 } 1329 defer hijacked.Close() 1330 1331 stdcopy.StdCopy(&containerBuffer, &containerBuffer, hijacked) 1332 containerLog := containerBuffer.String() 1333 return strings.TrimSpace(containerLog) 1334 }