github.com/alloyci/alloy-runner@v1.0.1-0.20180222164613-925503ccafd6/executors/docker/executor_docker.go (about)

     1  package docker
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"crypto/md5"
     7  	"errors"
     8  	"fmt"
     9  	"io"
    10  	"path"
    11  	"path/filepath"
    12  	"regexp"
    13  	"runtime"
    14  	"strconv"
    15  	"strings"
    16  	"sync"
    17  	"time"
    18  
    19  	"github.com/docker/distribution/reference"
    20  	"github.com/docker/docker/api/types"
    21  	"github.com/docker/docker/api/types/container"
    22  	"github.com/docker/docker/pkg/stdcopy"
    23  	"github.com/mattn/go-zglob"
    24  
    25  	"gitlab.com/gitlab-org/gitlab-runner/common"
    26  	"gitlab.com/gitlab-org/gitlab-runner/executors"
    27  	"gitlab.com/gitlab-org/gitlab-runner/helpers"
    28  	docker_helpers "gitlab.com/gitlab-org/gitlab-runner/helpers/docker"
    29  )
    30  
    31  const (
    32  	DockerExecutorStagePrepare common.ExecutorStage = "docker_prepare"
    33  	DockerExecutorStageRun     common.ExecutorStage = "docker_run"
    34  	DockerExecutorStageCleanup common.ExecutorStage = "docker_cleanup"
    35  
    36  	DockerExecutorStageCreatingBuildVolumes common.ExecutorStage = "docker_creating_build_volumes"
    37  	DockerExecutorStageCreatingServices     common.ExecutorStage = "docker_creating_services"
    38  	DockerExecutorStageCreatingUserVolumes  common.ExecutorStage = "docker_creating_user_volumes"
    39  	DockerExecutorStagePullingImage         common.ExecutorStage = "docker_pulling_image"
    40  )
    41  
    42  var neverRestartPolicy = container.RestartPolicy{Name: "no"}
    43  
    44  type executor struct {
    45  	executors.AbstractExecutor
    46  	client docker_helpers.Client
    47  	info   types.Info
    48  
    49  	temporary []string // IDs of containers that should be removed
    50  
    51  	builds   []string // IDs of successfully created build containers
    52  	services []*types.Container
    53  	caches   []string // IDs of cache containers
    54  
    55  	binds []string
    56  	links []string
    57  
    58  	devices []container.DeviceMapping
    59  
    60  	usedImages     map[string]string
    61  	usedImagesLock sync.RWMutex
    62  }
    63  
    64  func (s *executor) getServiceVariables() []string {
    65  	return s.Build.GetAllVariables().PublicOrInternal().StringList()
    66  }
    67  
    68  func (s *executor) getUserAuthConfiguration(indexName string) *types.AuthConfig {
    69  	if s.Build == nil {
    70  		return nil
    71  	}
    72  
    73  	buf := bytes.NewBufferString(s.Build.GetDockerAuthConfig())
    74  	authConfigs, _ := docker_helpers.ReadAuthConfigsFromReader(buf)
    75  	if authConfigs != nil {
    76  		return docker_helpers.ResolveDockerAuthConfig(indexName, authConfigs)
    77  	}
    78  	return nil
    79  }
    80  
    81  func (s *executor) getBuildAuthConfiguration(indexName string) *types.AuthConfig {
    82  	if s.Build == nil {
    83  		return nil
    84  	}
    85  
    86  	authConfigs := make(map[string]types.AuthConfig)
    87  
    88  	for _, credentials := range s.Build.Credentials {
    89  		if credentials.Type != "registry" {
    90  			continue
    91  		}
    92  
    93  		authConfigs[credentials.URL] = types.AuthConfig{
    94  			Username:      credentials.Username,
    95  			Password:      credentials.Password,
    96  			ServerAddress: credentials.URL,
    97  		}
    98  	}
    99  
   100  	if authConfigs != nil {
   101  		return docker_helpers.ResolveDockerAuthConfig(indexName, authConfigs)
   102  	}
   103  	return nil
   104  }
   105  
   106  func (s *executor) getHomeDirAuthConfiguration(indexName string) *types.AuthConfig {
   107  	authConfigs, _ := docker_helpers.ReadDockerAuthConfigsFromHomeDir(s.Shell().User)
   108  	if authConfigs != nil {
   109  		return docker_helpers.ResolveDockerAuthConfig(indexName, authConfigs)
   110  	}
   111  	return nil
   112  }
   113  
   114  func (s *executor) getAuthConfig(imageName string) *types.AuthConfig {
   115  	indexName, _ := docker_helpers.SplitDockerImageName(imageName)
   116  
   117  	authConfig := s.getUserAuthConfiguration(indexName)
   118  	if authConfig == nil {
   119  		authConfig = s.getHomeDirAuthConfiguration(indexName)
   120  	}
   121  	if authConfig == nil {
   122  		authConfig = s.getBuildAuthConfiguration(indexName)
   123  	}
   124  
   125  	if authConfig != nil {
   126  		s.Debugln("Using", authConfig.Username, "to connect to", authConfig.ServerAddress,
   127  			"in order to resolve", imageName, "...")
   128  		return authConfig
   129  	}
   130  
   131  	s.Debugln(fmt.Sprintf("No credentials found for %v", indexName))
   132  	return nil
   133  }
   134  
   135  func (s *executor) pullDockerImage(imageName string, ac *types.AuthConfig) (*types.ImageInspect, error) {
   136  	s.SetCurrentStage(DockerExecutorStagePullingImage)
   137  	s.Println("Pulling docker image", imageName, "...")
   138  
   139  	ref := imageName
   140  	// Add :latest to limit the download results
   141  	if !strings.ContainsAny(ref, ":@") {
   142  		ref += ":latest"
   143  	}
   144  
   145  	options := types.ImagePullOptions{}
   146  	if ac != nil {
   147  		options.RegistryAuth, _ = docker_helpers.EncodeAuthConfig(ac)
   148  	}
   149  
   150  	errorRegexp := regexp.MustCompile("(repository does not exist|not found)")
   151  	if err := s.client.ImagePullBlocking(s.Context, ref, options); err != nil {
   152  		if errorRegexp.MatchString(err.Error()) {
   153  			return nil, &common.BuildError{Inner: err}
   154  		}
   155  		return nil, err
   156  	}
   157  
   158  	image, _, err := s.client.ImageInspectWithRaw(s.Context, imageName)
   159  	return &image, err
   160  }
   161  
   162  func (s *executor) getDockerImage(imageName string) (image *types.ImageInspect, err error) {
   163  	pullPolicy, err := s.Config.Docker.PullPolicy.Get()
   164  	if err != nil {
   165  		return nil, err
   166  	}
   167  
   168  	authConfig := s.getAuthConfig(imageName)
   169  
   170  	s.Debugln("Looking for image", imageName, "...")
   171  	existingImage, _, err := s.client.ImageInspectWithRaw(s.Context, imageName)
   172  
   173  	// Return early if we already used that image
   174  	if err == nil && s.wasImageUsed(imageName, existingImage.ID) {
   175  		return &existingImage, nil
   176  	}
   177  
   178  	defer func() {
   179  		if err == nil {
   180  			s.markImageAsUsed(imageName, image.ID)
   181  		}
   182  	}()
   183  
   184  	// If never is specified then we return what inspect did return
   185  	if pullPolicy == common.PullPolicyNever {
   186  		return &existingImage, err
   187  	}
   188  
   189  	if err == nil {
   190  		// Don't pull image that is passed by ID
   191  		if existingImage.ID == imageName {
   192  			return &existingImage, nil
   193  		}
   194  
   195  		// If not-present is specified
   196  		if pullPolicy == common.PullPolicyIfNotPresent {
   197  			s.Println("Using locally found image version due to if-not-present pull policy")
   198  			return &existingImage, err
   199  		}
   200  	}
   201  
   202  	return s.pullDockerImage(imageName, authConfig)
   203  }
   204  
   205  func (s *executor) expandAndGetDockerImage(imageName string, allowedImages []string) (*types.ImageInspect, error) {
   206  	imageName, err := s.expandImageName(imageName, allowedImages)
   207  	if err != nil {
   208  		return nil, err
   209  	}
   210  
   211  	image, err := s.getDockerImage(imageName)
   212  	if err != nil {
   213  		return nil, err
   214  	}
   215  
   216  	return image, nil
   217  }
   218  
   219  func (s *executor) getArchitecture() string {
   220  	architecture := s.info.Architecture
   221  	switch architecture {
   222  	case "armv6l", "armv7l", "aarch64":
   223  		architecture = "arm"
   224  	case "amd64":
   225  		architecture = "x86_64"
   226  	}
   227  
   228  	if architecture != "" {
   229  		return architecture
   230  	}
   231  
   232  	switch runtime.GOARCH {
   233  	case "amd64":
   234  		return "x86_64"
   235  	default:
   236  		return runtime.GOARCH
   237  	}
   238  }
   239  
   240  func (s *executor) getPrebuiltImage() (*types.ImageInspect, error) {
   241  	if imageNameFromConfig := s.Config.Docker.HelperImage; imageNameFromConfig != "" {
   242  		s.Debugln("Pull configured helper_image for predefined container instead of import bundled image", imageNameFromConfig, "...")
   243  		return s.getDockerImage(imageNameFromConfig)
   244  	}
   245  
   246  	architecture := s.getArchitecture()
   247  	if architecture == "" {
   248  		return nil, errors.New("unsupported docker architecture")
   249  	}
   250  
   251  	imageName := prebuiltImageName + ":" + architecture + "-" + common.REVISION
   252  	s.Debugln("Looking for prebuilt image", imageName, "...")
   253  	image, _, err := s.client.ImageInspectWithRaw(s.Context, imageName)
   254  	if err == nil {
   255  		return &image, nil
   256  	}
   257  
   258  	data, err := Asset("prebuilt-" + architecture + prebuiltImageExtension)
   259  	if err != nil {
   260  		return nil, fmt.Errorf("Unsupported architecture: %s: %q", architecture, err.Error())
   261  	}
   262  
   263  	s.Debugln("Loading prebuilt image...")
   264  
   265  	ref := prebuiltImageName
   266  	source := types.ImageImportSource{
   267  		Source:     bytes.NewBuffer(data),
   268  		SourceName: "-",
   269  	}
   270  	options := types.ImageImportOptions{
   271  		Tag: architecture + "-" + common.REVISION,
   272  	}
   273  
   274  	if err := s.client.ImageImportBlocking(s.Context, source, ref, options); err != nil {
   275  		return nil, fmt.Errorf("Failed to import image: %s", err)
   276  	}
   277  
   278  	image, _, err = s.client.ImageInspectWithRaw(s.Context, imageName)
   279  	if err != nil {
   280  		s.Debugln("Inspecting imported image", imageName, "failed:", err)
   281  		return nil, err
   282  	}
   283  
   284  	return &image, err
   285  }
   286  
   287  func (s *executor) getBuildImage() (*types.ImageInspect, error) {
   288  	imageName, err := s.expandImageName(s.Build.Image.Name, []string{})
   289  	if err != nil {
   290  		return nil, err
   291  	}
   292  
   293  	// Fetch image
   294  	image, err := s.getDockerImage(imageName)
   295  	if err != nil {
   296  		return nil, err
   297  	}
   298  
   299  	return image, nil
   300  }
   301  
   302  func (s *executor) getAbsoluteContainerPath(dir string) string {
   303  	if path.IsAbs(dir) {
   304  		return dir
   305  	}
   306  	return path.Join(s.Build.FullProjectDir(), dir)
   307  }
   308  
   309  func (s *executor) addHostVolume(hostPath, containerPath string) error {
   310  	containerPath = s.getAbsoluteContainerPath(containerPath)
   311  	s.Debugln("Using host-based", hostPath, "for", containerPath, "...")
   312  	s.binds = append(s.binds, fmt.Sprintf("%v:%v", hostPath, containerPath))
   313  	return nil
   314  }
   315  
   316  func (s *executor) getLabels(containerType string, otherLabels ...string) map[string]string {
   317  	labels := make(map[string]string)
   318  	labels[dockerLabelPrefix+".job.id"] = strconv.Itoa(s.Build.ID)
   319  	labels[dockerLabelPrefix+".job.sha"] = s.Build.GitInfo.Sha
   320  	labels[dockerLabelPrefix+".job.before_sha"] = s.Build.GitInfo.BeforeSha
   321  	labels[dockerLabelPrefix+".job.ref"] = s.Build.GitInfo.Ref
   322  	labels[dockerLabelPrefix+".project.id"] = strconv.Itoa(s.Build.JobInfo.ProjectID)
   323  	labels[dockerLabelPrefix+".runner.id"] = s.Build.Runner.ShortDescription()
   324  	labels[dockerLabelPrefix+".runner.local_id"] = strconv.Itoa(s.Build.RunnerID)
   325  	labels[dockerLabelPrefix+".type"] = containerType
   326  	for _, label := range otherLabels {
   327  		keyValue := strings.SplitN(label, "=", 2)
   328  		if len(keyValue) == 2 {
   329  			labels[dockerLabelPrefix+"."+keyValue[0]] = keyValue[1]
   330  		}
   331  	}
   332  	return labels
   333  }
   334  
   335  // createCacheVolume returns the id of the created container, or an error
   336  func (s *executor) createCacheVolume(containerName, containerPath string) (string, error) {
   337  	// get busybox image
   338  	cacheImage, err := s.getPrebuiltImage()
   339  	if err != nil {
   340  		return "", err
   341  	}
   342  
   343  	config := &container.Config{
   344  		Image: cacheImage.ID,
   345  		Cmd: []string{
   346  			"gitlab-runner-cache", containerPath,
   347  		},
   348  		Volumes: map[string]struct{}{
   349  			containerPath: {},
   350  		},
   351  		Labels: s.getLabels("cache", "cache.dir="+containerPath),
   352  	}
   353  
   354  	hostConfig := &container.HostConfig{
   355  		LogConfig: container.LogConfig{
   356  			Type: "json-file",
   357  		},
   358  	}
   359  
   360  	resp, err := s.client.ContainerCreate(s.Context, config, hostConfig, nil, containerName)
   361  	if err != nil {
   362  		if resp.ID != "" {
   363  			s.temporary = append(s.temporary, resp.ID)
   364  		}
   365  		return "", err
   366  	}
   367  
   368  	s.Debugln("Starting cache container", resp.ID, "...")
   369  	err = s.client.ContainerStart(s.Context, resp.ID, types.ContainerStartOptions{})
   370  	if err != nil {
   371  		s.temporary = append(s.temporary, resp.ID)
   372  		return "", err
   373  	}
   374  
   375  	s.Debugln("Waiting for cache container", resp.ID, "...")
   376  	err = s.waitForContainer(resp.ID)
   377  	if err != nil {
   378  		s.temporary = append(s.temporary, resp.ID)
   379  		return "", err
   380  	}
   381  
   382  	return resp.ID, nil
   383  }
   384  
   385  func (s *executor) addCacheVolume(containerPath string) error {
   386  	var err error
   387  	containerPath = s.getAbsoluteContainerPath(containerPath)
   388  
   389  	// disable cache for automatic container cache, but leave it for host volumes (they are shared on purpose)
   390  	if s.Config.Docker.DisableCache {
   391  		s.Debugln("Container cache for", containerPath, " is disabled.")
   392  		return nil
   393  	}
   394  
   395  	hash := md5.Sum([]byte(containerPath))
   396  
   397  	// use host-based cache
   398  	if cacheDir := s.Config.Docker.CacheDir; cacheDir != "" {
   399  		hostPath := fmt.Sprintf("%s/%s/%x", cacheDir, s.Build.ProjectUniqueName(), hash)
   400  		hostPath, err := filepath.Abs(hostPath)
   401  		if err != nil {
   402  			return err
   403  		}
   404  		s.Debugln("Using path", hostPath, "as cache for", containerPath, "...")
   405  		s.binds = append(s.binds, fmt.Sprintf("%v:%v", filepath.ToSlash(hostPath), containerPath))
   406  		return nil
   407  	}
   408  
   409  	// get existing cache container
   410  	var containerID string
   411  	containerName := fmt.Sprintf("%s-cache-%x", s.Build.ProjectUniqueName(), hash)
   412  	if inspected, err := s.client.ContainerInspect(s.Context, containerName); err == nil {
   413  		// check if we have valid cache, if not remove the broken container
   414  		if _, ok := inspected.Config.Volumes[containerPath]; !ok {
   415  			s.Debugln("Removing broken cache container for ", containerPath, "path")
   416  			s.removeContainer(s.Context, inspected.ID)
   417  		} else {
   418  			containerID = inspected.ID
   419  		}
   420  	}
   421  
   422  	// create new cache container for that project
   423  	if containerID == "" {
   424  		containerID, err = s.createCacheVolume(containerName, containerPath)
   425  		if err != nil {
   426  			return err
   427  		}
   428  	}
   429  
   430  	s.Debugln("Using container", containerID, "as cache", containerPath, "...")
   431  	s.caches = append(s.caches, containerID)
   432  	return nil
   433  }
   434  
   435  func (s *executor) addVolume(volume string) error {
   436  	var err error
   437  	hostVolume := strings.SplitN(volume, ":", 2)
   438  	switch len(hostVolume) {
   439  	case 2:
   440  		err = s.addHostVolume(hostVolume[0], hostVolume[1])
   441  
   442  	case 1:
   443  		// disable cache disables
   444  		err = s.addCacheVolume(hostVolume[0])
   445  	}
   446  
   447  	if err != nil {
   448  		s.Errorln("Failed to create container volume for", volume, err)
   449  	}
   450  	return err
   451  }
   452  
   453  func fakeContainer(id string, names ...string) *types.Container {
   454  	return &types.Container{ID: id, Names: names}
   455  }
   456  
   457  func (s *executor) createBuildVolume() error {
   458  	// Cache Git sources:
   459  	// take path of the projects directory,
   460  	// because we use `rm -rf` which could remove the mounted volume
   461  	parentDir := path.Dir(s.Build.FullProjectDir())
   462  
   463  	if !path.IsAbs(parentDir) && parentDir != "/" {
   464  		return errors.New("build directory needs to be absolute and non-root path")
   465  	}
   466  
   467  	if s.isHostMountedVolume(s.Build.RootDir, s.Config.Docker.Volumes...) {
   468  		return nil
   469  	}
   470  
   471  	if s.Build.GetGitStrategy() == common.GitFetch && !s.Config.Docker.DisableCache {
   472  		// create persistent cache container
   473  		return s.addVolume(parentDir)
   474  	}
   475  
   476  	// create temporary cache container
   477  	id, err := s.createCacheVolume("", parentDir)
   478  	if err != nil {
   479  		return err
   480  	}
   481  
   482  	s.caches = append(s.caches, id)
   483  	s.temporary = append(s.temporary, id)
   484  
   485  	return nil
   486  }
   487  
   488  func (s *executor) createUserVolumes() (err error) {
   489  	for _, volume := range s.Config.Docker.Volumes {
   490  		err = s.addVolume(volume)
   491  		if err != nil {
   492  			return
   493  		}
   494  	}
   495  	return nil
   496  }
   497  
   498  func (s *executor) isHostMountedVolume(dir string, volumes ...string) bool {
   499  	isParentOf := func(parent string, dir string) bool {
   500  		for dir != "/" && dir != "." {
   501  			if dir == parent {
   502  				return true
   503  			}
   504  			dir = path.Dir(dir)
   505  		}
   506  		return false
   507  	}
   508  
   509  	for _, volume := range volumes {
   510  		hostVolume := strings.Split(volume, ":")
   511  		if len(hostVolume) < 2 {
   512  			continue
   513  		}
   514  
   515  		if isParentOf(path.Clean(hostVolume[1]), path.Clean(dir)) {
   516  			return true
   517  		}
   518  	}
   519  	return false
   520  }
   521  
   522  func (s *executor) parseDeviceString(deviceString string) (device container.DeviceMapping, err error) {
   523  	// Split the device string PathOnHost[:PathInContainer[:CgroupPermissions]]
   524  	parts := strings.Split(deviceString, ":")
   525  
   526  	if len(parts) > 3 {
   527  		err = fmt.Errorf("Too many colons")
   528  		return
   529  	}
   530  
   531  	device.PathOnHost = parts[0]
   532  
   533  	// Optional container path
   534  	if len(parts) >= 2 {
   535  		device.PathInContainer = parts[1]
   536  	} else {
   537  		// default: device at same path in container
   538  		device.PathInContainer = device.PathOnHost
   539  	}
   540  
   541  	// Optional permissions
   542  	if len(parts) >= 3 {
   543  		device.CgroupPermissions = parts[2]
   544  	} else {
   545  		// default: rwm, just like 'docker run'
   546  		device.CgroupPermissions = "rwm"
   547  	}
   548  
   549  	return
   550  }
   551  
   552  func (s *executor) bindDevices() (err error) {
   553  	for _, deviceString := range s.Config.Docker.Devices {
   554  		device, err := s.parseDeviceString(deviceString)
   555  		if err != nil {
   556  			err = fmt.Errorf("Failed to parse device string %q: %s", deviceString, err)
   557  			return err
   558  		}
   559  
   560  		s.devices = append(s.devices, device)
   561  	}
   562  	return nil
   563  }
   564  
   565  func (s *executor) wasImageUsed(imageName, imageID string) bool {
   566  	s.usedImagesLock.RLock()
   567  	defer s.usedImagesLock.RUnlock()
   568  
   569  	if s.usedImages[imageName] == imageID {
   570  		return true
   571  	}
   572  	return false
   573  }
   574  
   575  func (s *executor) markImageAsUsed(imageName, imageID string) {
   576  	s.usedImagesLock.Lock()
   577  	defer s.usedImagesLock.Unlock()
   578  
   579  	if s.usedImages == nil {
   580  		s.usedImages = make(map[string]string)
   581  	}
   582  	s.usedImages[imageName] = imageID
   583  
   584  	if imageName != imageID {
   585  		s.Println("Using docker image", imageID, "for", imageName, "...")
   586  	}
   587  }
   588  
   589  func (s *executor) splitServiceAndVersion(serviceDescription string) (service, version, imageName string, linkNames []string) {
   590  	ReferenceRegexpNoPort := regexp.MustCompile(`^(.*?)(|:[0-9]+)(|/.*)$`)
   591  	imageName = serviceDescription
   592  	version = "latest"
   593  
   594  	if match := reference.ReferenceRegexp.FindStringSubmatch(serviceDescription); match != nil {
   595  		matchService := ReferenceRegexpNoPort.FindStringSubmatch(match[1])
   596  		service = matchService[1] + matchService[3]
   597  
   598  		if len(match[2]) > 0 {
   599  			version = match[2]
   600  		} else {
   601  			imageName = match[1] + ":" + version
   602  		}
   603  	} else {
   604  		return
   605  	}
   606  
   607  	linkName := strings.Replace(service, "/", "__", -1)
   608  	linkNames = append(linkNames, linkName)
   609  
   610  	// Create alternative link name according to RFC 1123
   611  	// Where you can use only `a-zA-Z0-9-`
   612  	if alternativeName := strings.Replace(service, "/", "-", -1); linkName != alternativeName {
   613  		linkNames = append(linkNames, alternativeName)
   614  	}
   615  	return
   616  }
   617  
   618  func (s *executor) createService(serviceIndex int, service, version, image string, serviceDefinition common.Image) (*types.Container, error) {
   619  	if len(service) == 0 {
   620  		return nil, errors.New("invalid service name")
   621  	}
   622  
   623  	s.Println("Starting service", service+":"+version, "...")
   624  	serviceImage, err := s.getDockerImage(image)
   625  	if err != nil {
   626  		return nil, err
   627  	}
   628  
   629  	serviceSlug := strings.Replace(service, "/", "__", -1)
   630  	containerName := fmt.Sprintf("%s-%s-%d", s.Build.ProjectUniqueName(), serviceSlug, serviceIndex)
   631  
   632  	// this will fail potentially some builds if there's name collision
   633  	s.removeContainer(s.Context, containerName)
   634  
   635  	config := &container.Config{
   636  		Image:  serviceImage.ID,
   637  		Labels: s.getLabels("service", "service="+service, "service.version="+version),
   638  		Env:    s.getServiceVariables(),
   639  	}
   640  
   641  	if len(serviceDefinition.Command) > 0 {
   642  		config.Cmd = serviceDefinition.Command
   643  	}
   644  	if len(serviceDefinition.Entrypoint) > 0 {
   645  		config.Entrypoint = serviceDefinition.Entrypoint
   646  	}
   647  
   648  	hostConfig := &container.HostConfig{
   649  		RestartPolicy: neverRestartPolicy,
   650  		Privileged:    s.Config.Docker.Privileged,
   651  		NetworkMode:   container.NetworkMode(s.Config.Docker.NetworkMode),
   652  		Binds:         s.binds,
   653  		ShmSize:       s.Config.Docker.ShmSize,
   654  		VolumesFrom:   s.caches,
   655  		Tmpfs:         s.Config.Docker.ServicesTmpfs,
   656  		LogConfig: container.LogConfig{
   657  			Type: "json-file",
   658  		},
   659  	}
   660  
   661  	s.Debugln("Creating service container", containerName, "...")
   662  	resp, err := s.client.ContainerCreate(s.Context, config, hostConfig, nil, containerName)
   663  	if err != nil {
   664  		return nil, err
   665  	}
   666  
   667  	s.Debugln("Starting service container", resp.ID, "...")
   668  	err = s.client.ContainerStart(s.Context, resp.ID, types.ContainerStartOptions{})
   669  	if err != nil {
   670  		s.temporary = append(s.temporary, resp.ID)
   671  		return nil, err
   672  	}
   673  
   674  	return fakeContainer(resp.ID, containerName), nil
   675  }
   676  
   677  func (s *executor) getServicesDefinitions() (common.Services, error) {
   678  	serviceDefinitions := common.Services{}
   679  	for _, service := range s.Config.Docker.Services {
   680  		serviceDefinitions = append(serviceDefinitions, common.Image{Name: service})
   681  	}
   682  
   683  	for _, service := range s.Build.Services {
   684  		serviceName := s.Build.GetAllVariables().ExpandValue(service.Name)
   685  		err := s.verifyAllowedImage(serviceName, "services", s.Config.Docker.AllowedServices, s.Config.Docker.Services)
   686  		if err != nil {
   687  			return nil, err
   688  		}
   689  
   690  		service.Name = serviceName
   691  		serviceDefinitions = append(serviceDefinitions, service)
   692  	}
   693  
   694  	return serviceDefinitions, nil
   695  }
   696  
   697  func (s *executor) waitForServices() {
   698  	waitForServicesTimeout := s.Config.Docker.WaitForServicesTimeout
   699  	if waitForServicesTimeout == 0 {
   700  		waitForServicesTimeout = common.DefaultWaitForServicesTimeout
   701  	}
   702  
   703  	// wait for all services to came up
   704  	if waitForServicesTimeout > 0 && len(s.services) > 0 {
   705  		s.Println("Waiting for services to be up and running...")
   706  		wg := sync.WaitGroup{}
   707  		for _, service := range s.services {
   708  			wg.Add(1)
   709  			go func(service *types.Container) {
   710  				s.waitForServiceContainer(service, time.Duration(waitForServicesTimeout)*time.Second)
   711  				wg.Done()
   712  			}(service)
   713  		}
   714  		wg.Wait()
   715  	}
   716  }
   717  
   718  func (s *executor) buildServiceLinks(linksMap map[string]*types.Container) (links []string) {
   719  	for linkName, linkee := range linksMap {
   720  		newContainer, err := s.client.ContainerInspect(s.Context, linkee.ID)
   721  		if err != nil {
   722  			continue
   723  		}
   724  		if newContainer.State.Running {
   725  			links = append(links, linkee.ID+":"+linkName)
   726  		}
   727  	}
   728  	return
   729  }
   730  
   731  func (s *executor) createFromServiceDefinition(serviceIndex int, serviceDefinition common.Image, linksMap map[string]*types.Container) (err error) {
   732  	var container *types.Container
   733  
   734  	service, version, imageName, linkNames := s.splitServiceAndVersion(serviceDefinition.Name)
   735  
   736  	if serviceDefinition.Alias != "" {
   737  		linkNames = append(linkNames, serviceDefinition.Alias)
   738  	}
   739  
   740  	for _, linkName := range linkNames {
   741  		if linksMap[linkName] != nil {
   742  			s.Warningln("Service", serviceDefinition.Name, "is already created. Ignoring.")
   743  			continue
   744  		}
   745  
   746  		// Create service if not yet created
   747  		if container == nil {
   748  			container, err = s.createService(serviceIndex, service, version, imageName, serviceDefinition)
   749  			if err != nil {
   750  				return
   751  			}
   752  			s.Debugln("Created service", serviceDefinition.Name, "as", container.ID)
   753  			s.services = append(s.services, container)
   754  			s.temporary = append(s.temporary, container.ID)
   755  		}
   756  		linksMap[linkName] = container
   757  	}
   758  	return
   759  }
   760  
   761  func (s *executor) createServices() (err error) {
   762  	servicesDefinitions, err := s.getServicesDefinitions()
   763  	if err != nil {
   764  		return
   765  	}
   766  
   767  	linksMap := make(map[string]*types.Container)
   768  
   769  	for index, serviceDefinition := range servicesDefinitions {
   770  		err = s.createFromServiceDefinition(index, serviceDefinition, linksMap)
   771  		if err != nil {
   772  			return
   773  		}
   774  	}
   775  
   776  	s.waitForServices()
   777  
   778  	s.links = s.buildServiceLinks(linksMap)
   779  	return
   780  }
   781  
   782  func (s *executor) getValidContainers(containers []string) []string {
   783  	var newContainers []string
   784  
   785  	for _, container := range containers {
   786  		if _, err := s.client.ContainerInspect(s.Context, container); err == nil {
   787  			newContainers = append(newContainers, container)
   788  		}
   789  	}
   790  
   791  	return newContainers
   792  }
   793  
   794  func (s *executor) createContainer(containerType string, imageDefinition common.Image, cmd []string, allowedInternalImages []string) (*types.ContainerJSON, error) {
   795  	image, err := s.expandAndGetDockerImage(imageDefinition.Name, allowedInternalImages)
   796  	if err != nil {
   797  		return nil, err
   798  	}
   799  
   800  	hostname := s.Config.Docker.Hostname
   801  	if hostname == "" {
   802  		hostname = s.Build.ProjectUniqueName()
   803  	}
   804  
   805  	// Always create unique, but sequential name
   806  	containerIndex := len(s.builds)
   807  	containerName := s.Build.ProjectUniqueName() + "-" +
   808  		containerType + "-" + strconv.Itoa(containerIndex)
   809  
   810  	config := &container.Config{
   811  		Image:        image.ID,
   812  		Hostname:     hostname,
   813  		Cmd:          cmd,
   814  		Labels:       s.getLabels(containerType),
   815  		Tty:          false,
   816  		AttachStdin:  true,
   817  		AttachStdout: true,
   818  		AttachStderr: true,
   819  		OpenStdin:    true,
   820  		StdinOnce:    true,
   821  		Env:          append(s.Build.GetAllVariables().StringList(), s.BuildShell.Environment...),
   822  	}
   823  
   824  	if len(imageDefinition.Entrypoint) > 0 {
   825  		config.Entrypoint = imageDefinition.Entrypoint
   826  	}
   827  
   828  	nanoCPUs, err := s.Config.Docker.GetNanoCPUs()
   829  	if err != nil {
   830  		return nil, err
   831  	}
   832  
   833  	// By default we use caches container,
   834  	// but in later phases we hook to previous build container
   835  	volumesFrom := s.caches
   836  	if len(s.builds) > 0 {
   837  		volumesFrom = []string{
   838  			s.builds[len(s.builds)-1],
   839  		}
   840  	}
   841  
   842  	hostConfig := &container.HostConfig{
   843  		Resources: container.Resources{
   844  			CpusetCpus: s.Config.Docker.CPUSetCPUs,
   845  			NanoCPUs:   nanoCPUs,
   846  			Devices:    s.devices,
   847  		},
   848  		DNS:           s.Config.Docker.DNS,
   849  		DNSSearch:     s.Config.Docker.DNSSearch,
   850  		Runtime:       s.Config.Docker.Runtime,
   851  		Privileged:    s.Config.Docker.Privileged,
   852  		UsernsMode:    container.UsernsMode(s.Config.Docker.UsernsMode),
   853  		CapAdd:        s.Config.Docker.CapAdd,
   854  		CapDrop:       s.Config.Docker.CapDrop,
   855  		SecurityOpt:   s.Config.Docker.SecurityOpt,
   856  		RestartPolicy: neverRestartPolicy,
   857  		ExtraHosts:    s.Config.Docker.ExtraHosts,
   858  		NetworkMode:   container.NetworkMode(s.Config.Docker.NetworkMode),
   859  		Links:         append(s.Config.Docker.Links, s.links...),
   860  		Binds:         s.binds,
   861  		ShmSize:       s.Config.Docker.ShmSize,
   862  		VolumeDriver:  s.Config.Docker.VolumeDriver,
   863  		VolumesFrom:   append(s.Config.Docker.VolumesFrom, volumesFrom...),
   864  		LogConfig: container.LogConfig{
   865  			Type: "json-file",
   866  		},
   867  		Tmpfs:   s.Config.Docker.Tmpfs,
   868  		Sysctls: s.Config.Docker.SysCtls,
   869  	}
   870  
   871  	// this will fail potentially some builds if there's name collision
   872  	s.removeContainer(s.Context, containerName)
   873  
   874  	s.Debugln("Creating container", containerName, "...")
   875  	resp, err := s.client.ContainerCreate(s.Context, config, hostConfig, nil, containerName)
   876  	if err != nil {
   877  		if resp.ID != "" {
   878  			s.temporary = append(s.temporary, resp.ID)
   879  		}
   880  		return nil, err
   881  	}
   882  
   883  	inspect, err := s.client.ContainerInspect(s.Context, resp.ID)
   884  	if err != nil {
   885  		s.temporary = append(s.temporary, resp.ID)
   886  		return nil, err
   887  	}
   888  
   889  	s.builds = append(s.builds, resp.ID)
   890  	s.temporary = append(s.temporary, resp.ID)
   891  	return &inspect, nil
   892  }
   893  
   894  func (s *executor) killContainer(id string, waitCh chan error) (err error) {
   895  	for {
   896  		s.disconnectNetwork(s.Context, id)
   897  		s.Debugln("Killing container", id, "...")
   898  		s.client.ContainerKill(s.Context, id, "SIGKILL")
   899  
   900  		// Wait for signal that container were killed
   901  		// or retry after some time
   902  		select {
   903  		case err = <-waitCh:
   904  			return
   905  
   906  		case <-time.After(time.Second):
   907  		}
   908  	}
   909  }
   910  
   911  func (s *executor) waitForContainer(id string) error {
   912  	s.Debugln("Waiting for container", id, "...")
   913  
   914  	retries := 0
   915  
   916  	// Use active wait
   917  	for {
   918  		container, err := s.client.ContainerInspect(s.Context, id)
   919  		if err != nil {
   920  			if docker_helpers.IsErrNotFound(err) {
   921  				return err
   922  			}
   923  
   924  			if retries > 3 {
   925  				return err
   926  			}
   927  
   928  			retries++
   929  			time.Sleep(time.Second)
   930  			continue
   931  		}
   932  
   933  		// Reset retry timer
   934  		retries = 0
   935  
   936  		if container.State.Running {
   937  			time.Sleep(time.Second)
   938  			continue
   939  		}
   940  
   941  		if container.State.ExitCode != 0 {
   942  			return &common.BuildError{
   943  				Inner: fmt.Errorf("exit code %d", container.State.ExitCode),
   944  			}
   945  		}
   946  
   947  		return nil
   948  	}
   949  }
   950  
   951  func (s *executor) watchContainer(ctx context.Context, id string, input io.Reader) (err error) {
   952  	options := types.ContainerAttachOptions{
   953  		Stream: true,
   954  		Stdin:  true,
   955  		Stdout: true,
   956  		Stderr: true,
   957  	}
   958  
   959  	s.Debugln("Attaching to container", id, "...")
   960  	hijacked, err := s.client.ContainerAttach(ctx, id, options)
   961  	if err != nil {
   962  		return
   963  	}
   964  	defer hijacked.Close()
   965  
   966  	s.Debugln("Starting container", id, "...")
   967  	err = s.client.ContainerStart(ctx, id, types.ContainerStartOptions{})
   968  	if err != nil {
   969  		return
   970  	}
   971  
   972  	s.Debugln("Waiting for attach to finish", id, "...")
   973  	attachCh := make(chan error, 2)
   974  
   975  	// Copy any output to the build trace
   976  	go func() {
   977  		_, err := stdcopy.StdCopy(s.Trace, s.Trace, hijacked.Reader)
   978  		if err != nil {
   979  			attachCh <- err
   980  		}
   981  	}()
   982  
   983  	// Write the input to the container and close its STDIN to get it to finish
   984  	go func() {
   985  		_, err := io.Copy(hijacked.Conn, input)
   986  		hijacked.CloseWrite()
   987  		if err != nil {
   988  			attachCh <- err
   989  		}
   990  	}()
   991  
   992  	waitCh := make(chan error, 1)
   993  	go func() {
   994  		waitCh <- s.waitForContainer(id)
   995  	}()
   996  
   997  	select {
   998  	case <-ctx.Done():
   999  		s.killContainer(id, waitCh)
  1000  		err = errors.New("Aborted")
  1001  
  1002  	case err = <-attachCh:
  1003  		s.killContainer(id, waitCh)
  1004  		s.Debugln("Container", id, "finished with", err)
  1005  
  1006  	case err = <-waitCh:
  1007  		s.Debugln("Container", id, "finished with", err)
  1008  	}
  1009  	return
  1010  }
  1011  
  1012  func (s *executor) removeContainer(ctx context.Context, id string) error {
  1013  	s.disconnectNetwork(ctx, id)
  1014  	options := types.ContainerRemoveOptions{
  1015  		RemoveVolumes: true,
  1016  		Force:         true,
  1017  	}
  1018  	err := s.client.ContainerRemove(ctx, id, options)
  1019  	s.Debugln("Removed container", id, "with", err)
  1020  	return err
  1021  }
  1022  
  1023  func (s *executor) disconnectNetwork(ctx context.Context, id string) error {
  1024  	netList, err := s.client.NetworkList(ctx, types.NetworkListOptions{})
  1025  	if err != nil {
  1026  		s.Debugln("Can't get network list. ListNetworks exited with", err)
  1027  		return err
  1028  	}
  1029  
  1030  	for _, network := range netList {
  1031  		for _, pluggedContainer := range network.Containers {
  1032  			if id == pluggedContainer.Name {
  1033  				err = s.client.NetworkDisconnect(ctx, network.ID, id, true)
  1034  				if err != nil {
  1035  					s.Warningln("Can't disconnect possibly zombie container", pluggedContainer.Name, "from network", network.Name, "->", err)
  1036  				} else {
  1037  					s.Warningln("Possibly zombie container", pluggedContainer.Name, "is disconnected from network", network.Name)
  1038  				}
  1039  				break
  1040  			}
  1041  		}
  1042  	}
  1043  	return err
  1044  }
  1045  
  1046  func (s *executor) verifyAllowedImage(image, optionName string, allowedImages []string, internalImages []string) error {
  1047  	for _, allowedImage := range allowedImages {
  1048  		ok, _ := zglob.Match(allowedImage, image)
  1049  		if ok {
  1050  			return nil
  1051  		}
  1052  	}
  1053  
  1054  	for _, internalImage := range internalImages {
  1055  		if internalImage == image {
  1056  			return nil
  1057  		}
  1058  	}
  1059  
  1060  	if len(allowedImages) != 0 {
  1061  		s.Println()
  1062  		s.Errorln("The", image, "is not present on list of allowed", optionName)
  1063  		for _, allowedImage := range allowedImages {
  1064  			s.Println("-", allowedImage)
  1065  		}
  1066  		s.Println()
  1067  	} else {
  1068  		// by default allow to override the image name
  1069  		return nil
  1070  	}
  1071  
  1072  	s.Println("Please check runner's configuration: http://doc.gitlab.com/ci/docker/using_docker_images.html#overwrite-image-and-services")
  1073  	return errors.New("invalid image")
  1074  }
  1075  
  1076  func (s *executor) expandImageName(imageName string, allowedInternalImages []string) (string, error) {
  1077  	if imageName != "" {
  1078  		image := s.Build.GetAllVariables().ExpandValue(imageName)
  1079  		allowedInternalImages = append(allowedInternalImages, s.Config.Docker.Image)
  1080  		err := s.verifyAllowedImage(image, "images", s.Config.Docker.AllowedImages, allowedInternalImages)
  1081  		if err != nil {
  1082  			return "", err
  1083  		}
  1084  		return image, nil
  1085  	}
  1086  
  1087  	if s.Config.Docker.Image == "" {
  1088  		return "", errors.New("No Docker image specified to run the build in")
  1089  	}
  1090  
  1091  	return s.Config.Docker.Image, nil
  1092  }
  1093  
  1094  func (s *executor) connectDocker() (err error) {
  1095  	client, err := docker_helpers.New(s.Config.Docker.DockerCredentials, DockerAPIVersion)
  1096  	if err != nil {
  1097  		return err
  1098  	}
  1099  	s.client = client
  1100  
  1101  	s.info, err = client.Info(s.Context)
  1102  	if err != nil {
  1103  		return err
  1104  	}
  1105  
  1106  	return
  1107  }
  1108  
  1109  func (s *executor) createDependencies() (err error) {
  1110  	err = s.bindDevices()
  1111  	if err != nil {
  1112  		return err
  1113  	}
  1114  
  1115  	s.SetCurrentStage(DockerExecutorStageCreatingBuildVolumes)
  1116  	s.Debugln("Creating build volume...")
  1117  	err = s.createBuildVolume()
  1118  	if err != nil {
  1119  		return err
  1120  	}
  1121  
  1122  	s.SetCurrentStage(DockerExecutorStageCreatingServices)
  1123  	s.Debugln("Creating services...")
  1124  	err = s.createServices()
  1125  	if err != nil {
  1126  		return err
  1127  	}
  1128  
  1129  	s.SetCurrentStage(DockerExecutorStageCreatingUserVolumes)
  1130  	s.Debugln("Creating user-defined volumes...")
  1131  	err = s.createUserVolumes()
  1132  	if err != nil {
  1133  		return err
  1134  	}
  1135  
  1136  	return
  1137  }
  1138  
  1139  func (s *executor) Prepare(options common.ExecutorPrepareOptions) error {
  1140  	err := s.prepareBuildsDir(options.Config)
  1141  	if err != nil {
  1142  		return err
  1143  	}
  1144  
  1145  	err = s.AbstractExecutor.Prepare(options)
  1146  	if err != nil {
  1147  		return err
  1148  	}
  1149  
  1150  	if s.BuildShell.PassFile {
  1151  		return errors.New("Docker doesn't support shells that require script file")
  1152  	}
  1153  
  1154  	if options.Config.Docker == nil {
  1155  		return errors.New("Missing docker configuration")
  1156  	}
  1157  
  1158  	s.SetCurrentStage(DockerExecutorStagePrepare)
  1159  	imageName, err := s.expandImageName(s.Build.Image.Name, []string{})
  1160  	if err != nil {
  1161  		return err
  1162  	}
  1163  
  1164  	s.Println("Using Docker executor with image", imageName, "...")
  1165  
  1166  	err = s.connectDocker()
  1167  	if err != nil {
  1168  		return err
  1169  	}
  1170  
  1171  	err = s.createDependencies()
  1172  	if err != nil {
  1173  		return err
  1174  	}
  1175  	return nil
  1176  }
  1177  
  1178  func (s *executor) prepareBuildsDir(config *common.RunnerConfig) error {
  1179  	rootDir := config.BuildsDir
  1180  	if rootDir == "" {
  1181  		rootDir = s.DefaultBuildsDir
  1182  	}
  1183  	if s.isHostMountedVolume(rootDir, config.Docker.Volumes...) {
  1184  		s.SharedBuildsDir = true
  1185  	}
  1186  	return nil
  1187  }
  1188  
  1189  func (s *executor) Cleanup() {
  1190  	s.SetCurrentStage(DockerExecutorStageCleanup)
  1191  
  1192  	var wg sync.WaitGroup
  1193  
  1194  	ctx, cancel := context.WithTimeout(context.Background(), dockerCleanupTimeout)
  1195  	defer cancel()
  1196  
  1197  	remove := func(id string) {
  1198  		wg.Add(1)
  1199  		go func() {
  1200  			s.removeContainer(ctx, id)
  1201  			wg.Done()
  1202  		}()
  1203  	}
  1204  
  1205  	for _, temporaryID := range s.temporary {
  1206  		remove(temporaryID)
  1207  	}
  1208  
  1209  	wg.Wait()
  1210  
  1211  	if s.client != nil {
  1212  		s.client.Close()
  1213  	}
  1214  
  1215  	s.AbstractExecutor.Cleanup()
  1216  }
  1217  
  1218  func (s *executor) runServiceHealthCheckContainer(service *types.Container, timeout time.Duration) error {
  1219  	waitImage, err := s.getPrebuiltImage()
  1220  	if err != nil {
  1221  		return err
  1222  	}
  1223  
  1224  	containerName := service.Names[0] + "-wait-for-service"
  1225  
  1226  	config := &container.Config{
  1227  		Cmd:    []string{"gitlab-runner-service"},
  1228  		Image:  waitImage.ID,
  1229  		Labels: s.getLabels("wait", "wait="+service.ID),
  1230  	}
  1231  	hostConfig := &container.HostConfig{
  1232  		RestartPolicy: neverRestartPolicy,
  1233  		Links:         []string{service.Names[0] + ":" + service.Names[0]},
  1234  		NetworkMode:   container.NetworkMode(s.Config.Docker.NetworkMode),
  1235  		LogConfig: container.LogConfig{
  1236  			Type: "json-file",
  1237  		},
  1238  	}
  1239  	s.Debugln("Waiting for service container", containerName, "to be up and running...")
  1240  	resp, err := s.client.ContainerCreate(s.Context, config, hostConfig, nil, containerName)
  1241  	if err != nil {
  1242  		return err
  1243  	}
  1244  	defer s.removeContainer(s.Context, resp.ID)
  1245  	err = s.client.ContainerStart(s.Context, resp.ID, types.ContainerStartOptions{})
  1246  	if err != nil {
  1247  		return err
  1248  	}
  1249  
  1250  	waitResult := make(chan error, 1)
  1251  	go func() {
  1252  		waitResult <- s.waitForContainer(resp.ID)
  1253  	}()
  1254  
  1255  	// these are warnings and they don't make the build fail
  1256  	select {
  1257  	case err := <-waitResult:
  1258  		return err
  1259  	case <-time.After(timeout):
  1260  		return fmt.Errorf("service %v did timeout", containerName)
  1261  	}
  1262  }
  1263  
  1264  func (s *executor) waitForServiceContainer(service *types.Container, timeout time.Duration) error {
  1265  	err := s.runServiceHealthCheckContainer(service, timeout)
  1266  	if err == nil {
  1267  		return nil
  1268  	}
  1269  
  1270  	var buffer bytes.Buffer
  1271  	buffer.WriteString("\n")
  1272  	buffer.WriteString(helpers.ANSI_YELLOW + "*** WARNING:" + helpers.ANSI_RESET + " Service " + service.Names[0] + " probably didn't start properly.\n")
  1273  	buffer.WriteString("\n")
  1274  	buffer.WriteString(strings.TrimSpace(err.Error()) + "\n")
  1275  
  1276  	var containerBuffer bytes.Buffer
  1277  
  1278  	options := types.ContainerLogsOptions{
  1279  		ShowStdout: true,
  1280  		ShowStderr: true,
  1281  		Timestamps: true,
  1282  	}
  1283  
  1284  	hijacked, err := s.client.ContainerLogs(s.Context, service.ID, options)
  1285  	if err == nil {
  1286  		defer hijacked.Close()
  1287  		stdcopy.StdCopy(&containerBuffer, &containerBuffer, hijacked)
  1288  		if containerLog := containerBuffer.String(); containerLog != "" {
  1289  			buffer.WriteString("\n")
  1290  			buffer.WriteString(strings.TrimSpace(containerLog))
  1291  			buffer.WriteString("\n")
  1292  		}
  1293  	} else {
  1294  		buffer.WriteString(strings.TrimSpace(err.Error()) + "\n")
  1295  	}
  1296  
  1297  	buffer.WriteString("\n")
  1298  	buffer.WriteString(helpers.ANSI_YELLOW + "*********" + helpers.ANSI_RESET + "\n")
  1299  	buffer.WriteString("\n")
  1300  	io.Copy(s.Trace, &buffer)
  1301  	return err
  1302  }