github.com/pf-qiu/concourse/v6@v6.7.3-0.20201207032516-1f455d73275f/atc/worker/worker.go (about)

     1  // Package worker will eventually evolve to becoming a concrete implementation of a runtime
     2  // As such, Concourse core shouldn't depend on abstractions defined in this package or its child packages
     3  // General Runtime abstractions will be ported over to the Runtime package
     4  // The Client interface is the main interface that is consumed by Concourse core that will be shifted to the Runtime package
     5  package worker
     6  
     7  import (
     8  	"context"
     9  	"errors"
    10  	"fmt"
    11  	"path/filepath"
    12  	"sort"
    13  	"strings"
    14  	"time"
    15  
    16  	"code.cloudfoundry.org/garden"
    17  	"code.cloudfoundry.org/lager"
    18  	"github.com/concourse/baggageclaim"
    19  	"github.com/cppforlife/go-semi-semantic/version"
    20  	"golang.org/x/sync/errgroup"
    21  
    22  	"github.com/pf-qiu/concourse/v6/atc"
    23  	"github.com/pf-qiu/concourse/v6/atc/db"
    24  	"github.com/pf-qiu/concourse/v6/atc/metric"
    25  	"github.com/pf-qiu/concourse/v6/atc/resource"
    26  	"github.com/pf-qiu/concourse/v6/atc/runtime"
    27  	"github.com/pf-qiu/concourse/v6/atc/worker/gclient"
    28  	"github.com/pf-qiu/concourse/v6/tracing"
    29  )
    30  
    31  const userPropertyName = "user"
    32  
    33  var ResourceConfigCheckSessionExpiredError = errors.New("no db container was found for owner")
    34  
    35  //go:generate counterfeiter . Worker
    36  
    37  type Worker interface {
    38  	BuildContainers() int
    39  
    40  	Description() string
    41  	Name() string
    42  	ResourceTypes() []atc.WorkerResourceType
    43  	Tags() atc.Tags
    44  	Uptime() time.Duration
    45  	IsOwnedByTeam() bool
    46  	Ephemeral() bool
    47  	IsVersionCompatible(lager.Logger, version.Version) bool
    48  	Satisfies(lager.Logger, WorkerSpec) bool
    49  	FindContainerByHandle(lager.Logger, int, string) (Container, bool, error)
    50  
    51  	FindOrCreateContainer(
    52  		context.Context,
    53  		lager.Logger,
    54  		db.ContainerOwner,
    55  		db.ContainerMetadata,
    56  		ContainerSpec,
    57  	) (Container, error)
    58  
    59  	FindVolumeForResourceCache(logger lager.Logger, resourceCache db.UsedResourceCache) (Volume, bool, error)
    60  	FindResourceCacheForVolume(volume Volume) (db.UsedResourceCache, bool, error)
    61  	FindVolumeForTaskCache(lager.Logger, int, int, string, string) (Volume, bool, error)
    62  	Fetch(
    63  		context.Context,
    64  		lager.Logger,
    65  		db.ContainerMetadata,
    66  		Worker,
    67  		ContainerSpec,
    68  		runtime.ProcessSpec,
    69  		resource.Resource,
    70  		db.ContainerOwner,
    71  		db.UsedResourceCache,
    72  		string,
    73  	) (GetResult, Volume, error)
    74  
    75  	CertsVolume(lager.Logger) (volume Volume, found bool, err error)
    76  	LookupVolume(lager.Logger, string) (Volume, bool, error)
    77  	CreateVolume(logger lager.Logger, spec VolumeSpec, teamID int, volumeType db.VolumeType) (Volume, error)
    78  
    79  	GardenClient() gclient.Client
    80  	ActiveTasks() (int, error)
    81  	IncreaseActiveTasks() error
    82  	DecreaseActiveTasks() error
    83  }
    84  
    85  type gardenWorker struct {
    86  	gardenClient         gclient.Client
    87  	volumeClient         VolumeClient
    88  	imageFactory         ImageFactory
    89  	resourceCacheFactory db.ResourceCacheFactory
    90  	Fetcher
    91  	dbWorker        db.Worker
    92  	buildContainers int
    93  	helper          workerHelper
    94  }
    95  
    96  // NewGardenWorker constructs a Worker using the gardenWorker runtime implementation and allows container and volume
    97  // creation on a specific Garden worker.
    98  // A Garden Worker is comprised of: db.Worker, garden Client, container provider, and a volume client
    99  func NewGardenWorker(
   100  	gardenClient gclient.Client,
   101  	volumeRepository db.VolumeRepository,
   102  	volumeClient VolumeClient,
   103  	imageFactory ImageFactory,
   104  	fetcher Fetcher,
   105  	dbTeamFactory db.TeamFactory,
   106  	dbWorker db.Worker,
   107  	resourceCacheFactory db.ResourceCacheFactory,
   108  	numBuildContainers int,
   109  	// TODO: numBuildContainers is only needed for placement strategy but this
   110  	// method is called in ContainerProvider.FindOrCreateContainer as well and
   111  	// hence we pass in 0 values for numBuildContainers everywhere.
   112  ) Worker {
   113  	workerHelper := workerHelper{
   114  		gardenClient:  gardenClient,
   115  		volumeClient:  volumeClient,
   116  		volumeRepo:    volumeRepository,
   117  		dbTeamFactory: dbTeamFactory,
   118  		dbWorker:      dbWorker,
   119  	}
   120  
   121  	return &gardenWorker{
   122  		gardenClient:         gardenClient,
   123  		volumeClient:         volumeClient,
   124  		imageFactory:         imageFactory,
   125  		Fetcher:              fetcher,
   126  		dbWorker:             dbWorker,
   127  		resourceCacheFactory: resourceCacheFactory,
   128  		buildContainers:      numBuildContainers,
   129  		helper:               workerHelper,
   130  	}
   131  }
   132  
   133  func (worker *gardenWorker) GardenClient() gclient.Client {
   134  	return worker.gardenClient
   135  }
   136  
   137  func (worker *gardenWorker) IsVersionCompatible(logger lager.Logger, comparedVersion version.Version) bool {
   138  	workerVersion := worker.dbWorker.Version()
   139  	logger = logger.Session("check-version", lager.Data{
   140  		"want-worker-version": comparedVersion.String(),
   141  		"have-worker-version": workerVersion,
   142  	})
   143  
   144  	if workerVersion == nil {
   145  		logger.Info("empty-worker-version")
   146  		return false
   147  	}
   148  
   149  	v, err := version.NewVersionFromString(*workerVersion)
   150  	if err != nil {
   151  		logger.Error("failed-to-parse-version", err)
   152  		return false
   153  	}
   154  
   155  	switch v.Release.Compare(comparedVersion.Release) {
   156  	case 0:
   157  		return true
   158  	case -1:
   159  		return false
   160  	default:
   161  		if v.Release.Components[0].Compare(comparedVersion.Release.Components[0]) == 0 {
   162  			return true
   163  		}
   164  
   165  		return false
   166  	}
   167  }
   168  
   169  func (worker *gardenWorker) FindResourceTypeByPath(path string) (atc.WorkerResourceType, bool) {
   170  	for _, rt := range worker.dbWorker.ResourceTypes() {
   171  		if path == rt.Image {
   172  			return rt, true
   173  		}
   174  	}
   175  
   176  	return atc.WorkerResourceType{}, false
   177  }
   178  
   179  func (worker *gardenWorker) FindVolumeForResourceCache(logger lager.Logger, resourceCache db.UsedResourceCache) (Volume, bool, error) {
   180  	return worker.volumeClient.FindVolumeForResourceCache(logger, resourceCache)
   181  }
   182  
   183  func (worker *gardenWorker) FindResourceCacheForVolume(volume Volume) (db.UsedResourceCache, bool, error) {
   184  	if volume.GetResourceCacheID() != 0 {
   185  		return worker.resourceCacheFactory.FindResourceCacheByID(volume.GetResourceCacheID())
   186  	} else {
   187  		return nil, false, nil
   188  	}
   189  
   190  }
   191  
   192  func (worker *gardenWorker) FindVolumeForTaskCache(logger lager.Logger, teamID int, jobID int, stepName string, path string) (Volume, bool, error) {
   193  	return worker.volumeClient.FindVolumeForTaskCache(logger, teamID, jobID, stepName, path)
   194  }
   195  
   196  func (worker *gardenWorker) CertsVolume(logger lager.Logger) (Volume, bool, error) {
   197  	return worker.volumeClient.FindOrCreateVolumeForResourceCerts(logger.Session("find-or-create"))
   198  }
   199  
   200  func (worker *gardenWorker) CreateVolume(logger lager.Logger, spec VolumeSpec, teamID int, volumeType db.VolumeType) (Volume, error) {
   201  	return worker.volumeClient.CreateVolume(logger.Session("find-or-create"), spec, teamID, worker.dbWorker.Name(), volumeType)
   202  }
   203  
   204  func (worker *gardenWorker) LookupVolume(logger lager.Logger, handle string) (Volume, bool, error) {
   205  	return worker.volumeClient.LookupVolume(logger, handle)
   206  }
   207  
   208  func (worker *gardenWorker) FindOrCreateContainer(
   209  	ctx context.Context,
   210  	logger lager.Logger,
   211  	owner db.ContainerOwner,
   212  	metadata db.ContainerMetadata,
   213  	containerSpec ContainerSpec,
   214  ) (Container, error) {
   215  	c, err := worker.findOrCreateContainer(ctx, logger, owner, metadata, containerSpec)
   216  	if err != nil {
   217  		return c, fmt.Errorf("find or create container on worker %s: %w", worker.Name(), err)
   218  	}
   219  	return c, err
   220  }
   221  
   222  func (worker *gardenWorker) findOrCreateContainer(
   223  	ctx context.Context,
   224  	logger lager.Logger,
   225  	owner db.ContainerOwner,
   226  	metadata db.ContainerMetadata,
   227  	containerSpec ContainerSpec,
   228  ) (Container, error) {
   229  
   230  	var (
   231  		gardenContainer   gclient.Container
   232  		createdContainer  db.CreatedContainer
   233  		creatingContainer db.CreatingContainer
   234  		containerHandle   string
   235  		err               error
   236  	)
   237  
   238  	// ensure either creatingContainer or createdContainer exists
   239  	creatingContainer, createdContainer, err = worker.dbWorker.FindContainer(owner)
   240  	if err != nil {
   241  		return nil, err
   242  	}
   243  
   244  	if creatingContainer != nil {
   245  		containerHandle = creatingContainer.Handle()
   246  	} else if createdContainer != nil {
   247  		containerHandle = createdContainer.Handle()
   248  	} else {
   249  		logger.Debug("creating-container-in-db")
   250  		creatingContainer, err = worker.dbWorker.CreateContainer(
   251  			owner,
   252  			metadata,
   253  		)
   254  		if err != nil {
   255  			logger.Error("failed-to-create-container-in-db", err)
   256  			if _, ok := err.(db.ContainerOwnerDisappearedError); ok {
   257  				return nil, ResourceConfigCheckSessionExpiredError
   258  			}
   259  
   260  			return nil, fmt.Errorf("create container: %w", err)
   261  		}
   262  		logger.Debug("created-creating-container-in-db")
   263  		containerHandle = creatingContainer.Handle()
   264  	}
   265  
   266  	logger = logger.WithData(lager.Data{"container": containerHandle})
   267  
   268  	gardenContainer, err = worker.gardenClient.Lookup(containerHandle)
   269  	if err != nil {
   270  		if _, ok := err.(garden.ContainerNotFoundError); !ok {
   271  			logger.Error("failed-to-lookup-creating-container-in-garden", err)
   272  			return nil, err
   273  		}
   274  	}
   275  
   276  	// if createdContainer exists, gardenContainer should also exist
   277  	if createdContainer != nil {
   278  		logger = logger.WithData(lager.Data{"container": containerHandle})
   279  		logger.Debug("found-created-container-in-db")
   280  
   281  		if gardenContainer == nil {
   282  			return nil, garden.ContainerNotFoundError{Handle: containerHandle}
   283  		}
   284  		return worker.helper.constructGardenWorkerContainer(
   285  			logger,
   286  			createdContainer,
   287  			gardenContainer,
   288  		)
   289  	}
   290  
   291  	// we now have a creatingContainer. If a gardenContainer does not exist, we
   292  	// will create one. If it does exist, we will transition the creatingContainer
   293  	// to created and return a worker.Container
   294  	if gardenContainer == nil {
   295  		fetchedImage, err := worker.fetchImageForContainer(
   296  			ctx,
   297  			logger,
   298  			containerSpec.ImageSpec,
   299  			containerSpec.TeamID,
   300  			creatingContainer,
   301  		)
   302  		if err != nil {
   303  			creatingContainer.Failed()
   304  			logger.Error("failed-to-fetch-image-for-container", err)
   305  			return nil, err
   306  		}
   307  
   308  		volumeMounts, err := worker.createVolumes(ctx, logger, fetchedImage.Privileged, creatingContainer, containerSpec)
   309  		if err != nil {
   310  			creatingContainer.Failed()
   311  			logger.Error("failed-to-create-volume-mounts-for-container", err)
   312  			return nil, err
   313  		}
   314  		bindMounts, err := worker.getBindMounts(volumeMounts, containerSpec.BindMounts)
   315  		if err != nil {
   316  			creatingContainer.Failed()
   317  			logger.Error("failed-to-create-bind-mounts-for-container", err)
   318  			return nil, err
   319  		}
   320  
   321  		logger.Debug("creating-garden-container")
   322  
   323  		gardenContainer, err = worker.helper.createGardenContainer(containerSpec, fetchedImage, creatingContainer.Handle(), bindMounts)
   324  		if err != nil {
   325  			_, failedErr := creatingContainer.Failed()
   326  			if failedErr != nil {
   327  				logger.Error("failed-to-mark-container-as-failed", err)
   328  			}
   329  			metric.Metrics.FailedContainers.Inc()
   330  
   331  			logger.Error("failed-to-create-container-in-garden", err)
   332  			return nil, err
   333  		}
   334  
   335  	}
   336  
   337  	logger.Debug("created-container-in-garden")
   338  
   339  	metric.Metrics.ContainersCreated.Inc()
   340  	createdContainer, err = creatingContainer.Created()
   341  	if err != nil {
   342  		logger.Error("failed-to-mark-container-as-created", err)
   343  
   344  		_ = worker.gardenClient.Destroy(containerHandle)
   345  
   346  		return nil, err
   347  	}
   348  
   349  	logger.Debug("created-container-in-db")
   350  
   351  	return worker.helper.constructGardenWorkerContainer(
   352  		logger,
   353  		createdContainer,
   354  		gardenContainer,
   355  	)
   356  }
   357  
   358  func (worker *gardenWorker) getBindMounts(volumeMounts []VolumeMount, bindMountSources []BindMountSource) ([]garden.BindMount, error) {
   359  	bindMounts := []garden.BindMount{}
   360  
   361  	for _, mount := range bindMountSources {
   362  		bindMount, found, mountErr := mount.VolumeOn(worker)
   363  		if mountErr != nil {
   364  			return nil, mountErr
   365  		}
   366  		if found {
   367  			bindMounts = append(bindMounts, bindMount)
   368  		}
   369  	}
   370  
   371  	for _, mount := range volumeMounts {
   372  		bindMounts = append(bindMounts, garden.BindMount{
   373  			SrcPath: mount.Volume.Path(),
   374  			DstPath: mount.MountPath,
   375  			Mode:    garden.BindMountModeRW,
   376  		})
   377  	}
   378  	return bindMounts, nil
   379  }
   380  
   381  func (worker *gardenWorker) fetchImageForContainer(
   382  	ctx context.Context,
   383  	logger lager.Logger,
   384  	spec ImageSpec,
   385  	teamID int,
   386  	creatingContainer db.CreatingContainer,
   387  ) (FetchedImage, error) {
   388  	image, err := worker.imageFactory.GetImage(
   389  		logger,
   390  		worker,
   391  		worker.volumeClient,
   392  		spec,
   393  		teamID,
   394  	)
   395  	if err != nil {
   396  		return FetchedImage{}, err
   397  	}
   398  
   399  	logger.Debug("fetching-image")
   400  	return image.FetchForContainer(ctx, logger, creatingContainer)
   401  }
   402  
   403  type mountableLocalInput struct {
   404  	desiredCOWParent Volume
   405  	desiredMountPath string
   406  }
   407  
   408  type mountableRemoteInput struct {
   409  	desiredArtifact  ArtifactSource
   410  	desiredMountPath string
   411  }
   412  
   413  // creates volumes required to run any step:
   414  // * scratch
   415  // * working dir
   416  // * input
   417  // * output
   418  func (worker *gardenWorker) createVolumes(
   419  	ctx context.Context,
   420  	logger lager.Logger,
   421  	isPrivileged bool,
   422  	creatingContainer db.CreatingContainer,
   423  	spec ContainerSpec,
   424  ) ([]VolumeMount, error) {
   425  	var volumeMounts []VolumeMount
   426  	var ioVolumeMounts []VolumeMount
   427  
   428  	scratchVolume, err := worker.volumeClient.FindOrCreateVolumeForContainer(
   429  		logger,
   430  		VolumeSpec{
   431  			Strategy:   baggageclaim.EmptyStrategy{},
   432  			Privileged: isPrivileged,
   433  		},
   434  		creatingContainer,
   435  		spec.TeamID,
   436  		"/scratch",
   437  	)
   438  	if err != nil {
   439  		return nil, err
   440  	}
   441  
   442  	scratchMount := VolumeMount{
   443  		Volume:    scratchVolume,
   444  		MountPath: "/scratch",
   445  	}
   446  
   447  	volumeMounts = append(volumeMounts, scratchMount)
   448  
   449  	hasSpecDirInInputs := anyMountTo(spec.Dir, getDestinationPathsFromInputs(spec.Inputs))
   450  	hasSpecDirInOutputs := anyMountTo(spec.Dir, getDestinationPathsFromOutputs(spec.Outputs))
   451  
   452  	if spec.Dir != "" && !hasSpecDirInOutputs && !hasSpecDirInInputs {
   453  		workdirVolume, volumeErr := worker.volumeClient.FindOrCreateVolumeForContainer(
   454  			logger,
   455  			VolumeSpec{
   456  				Strategy:   baggageclaim.EmptyStrategy{},
   457  				Privileged: isPrivileged,
   458  			},
   459  			creatingContainer,
   460  			spec.TeamID,
   461  			spec.Dir,
   462  		)
   463  		if volumeErr != nil {
   464  			return nil, volumeErr
   465  		}
   466  
   467  		volumeMounts = append(volumeMounts, VolumeMount{
   468  			Volume:    workdirVolume,
   469  			MountPath: spec.Dir,
   470  		})
   471  	}
   472  
   473  	inputDestinationPaths := make(map[string]bool)
   474  
   475  	localInputs := make([]mountableLocalInput, 0)
   476  	nonlocalInputs := make([]mountableRemoteInput, 0)
   477  
   478  	for _, inputSource := range spec.Inputs {
   479  		inputSourceVolume, found, err := inputSource.Source().ExistsOn(logger, worker)
   480  		if err != nil {
   481  			return nil, err
   482  		}
   483  		cleanedInputPath := filepath.Clean(inputSource.DestinationPath())
   484  		inputDestinationPaths[cleanedInputPath] = true
   485  
   486  		if found {
   487  			localInputs = append(localInputs, mountableLocalInput{
   488  				desiredCOWParent: inputSourceVolume,
   489  				desiredMountPath: cleanedInputPath,
   490  			})
   491  		} else {
   492  			nonlocalInputs = append(nonlocalInputs, mountableRemoteInput{
   493  				desiredArtifact:  inputSource.Source(),
   494  				desiredMountPath: cleanedInputPath,
   495  			})
   496  		}
   497  	}
   498  
   499  	// we create COW volumes for task caches too, in case multiple builds
   500  	// are running the same task
   501  	cowMounts, err := worker.cloneLocalVolumes(
   502  		logger,
   503  		spec.TeamID,
   504  		isPrivileged,
   505  		creatingContainer,
   506  		localInputs,
   507  	)
   508  	if err != nil {
   509  		return nil, err
   510  	}
   511  
   512  	streamedMounts, err := worker.cloneRemoteVolumes(
   513  		ctx,
   514  		logger,
   515  		spec.TeamID,
   516  		isPrivileged,
   517  		creatingContainer,
   518  		nonlocalInputs,
   519  	)
   520  	if err != nil {
   521  		return nil, err
   522  	}
   523  
   524  	ioVolumeMounts = append(ioVolumeMounts, cowMounts...)
   525  	ioVolumeMounts = append(ioVolumeMounts, streamedMounts...)
   526  
   527  	for _, outputPath := range spec.Outputs {
   528  		cleanedOutputPath := filepath.Clean(outputPath)
   529  
   530  		// reuse volume if output path is the same as input
   531  		if inputDestinationPaths[cleanedOutputPath] {
   532  			continue
   533  		}
   534  
   535  		outVolume, volumeErr := worker.volumeClient.FindOrCreateVolumeForContainer(
   536  			logger,
   537  			VolumeSpec{
   538  				Strategy:   baggageclaim.EmptyStrategy{},
   539  				Privileged: isPrivileged,
   540  			},
   541  			creatingContainer,
   542  			spec.TeamID,
   543  			cleanedOutputPath,
   544  		)
   545  		if volumeErr != nil {
   546  			return nil, volumeErr
   547  		}
   548  
   549  		ioVolumeMounts = append(ioVolumeMounts, VolumeMount{
   550  			Volume:    outVolume,
   551  			MountPath: cleanedOutputPath,
   552  		})
   553  	}
   554  
   555  	sort.Sort(byMountPath(ioVolumeMounts))
   556  
   557  	volumeMounts = append(volumeMounts, ioVolumeMounts...)
   558  	return volumeMounts, nil
   559  }
   560  
   561  func (worker *gardenWorker) cloneLocalVolumes(
   562  	logger lager.Logger,
   563  	teamID int,
   564  	privileged bool,
   565  	container db.CreatingContainer,
   566  	locals []mountableLocalInput,
   567  ) ([]VolumeMount, error) {
   568  	mounts := make([]VolumeMount, len(locals))
   569  
   570  	for i, localInput := range locals {
   571  		inputVolume, err := worker.volumeClient.FindOrCreateCOWVolumeForContainer(
   572  			logger,
   573  			VolumeSpec{
   574  				Strategy:   localInput.desiredCOWParent.COWStrategy(),
   575  				Privileged: privileged,
   576  			},
   577  			container,
   578  			localInput.desiredCOWParent,
   579  			teamID,
   580  			localInput.desiredMountPath,
   581  		)
   582  		if err != nil {
   583  			return nil, err
   584  		}
   585  
   586  		mounts[i] = VolumeMount{
   587  			Volume:    inputVolume,
   588  			MountPath: localInput.desiredMountPath,
   589  		}
   590  	}
   591  
   592  	return mounts, nil
   593  }
   594  
   595  func (worker *gardenWorker) cloneRemoteVolumes(
   596  	ctx context.Context,
   597  	logger lager.Logger,
   598  	teamID int,
   599  	privileged bool,
   600  	container db.CreatingContainer,
   601  	nonLocals []mountableRemoteInput,
   602  ) ([]VolumeMount, error) {
   603  
   604  	mounts := make([]VolumeMount, len(nonLocals))
   605  	if len(nonLocals) <= 0 {
   606  		return mounts, nil
   607  	}
   608  
   609  	ctx, span := tracing.StartSpan(ctx, "worker.cloneRemoteVolumes", tracing.Attrs{"container_id": container.Handle()})
   610  	defer span.End()
   611  
   612  	g, groupCtx := errgroup.WithContext(ctx)
   613  
   614  	for i, nonLocalInput := range nonLocals {
   615  		// this is to ensure each go func gets its own non changing copy of the iterator
   616  		i, nonLocalInput := i, nonLocalInput
   617  		inputVolume, err := worker.volumeClient.FindOrCreateVolumeForContainer(
   618  			logger,
   619  			VolumeSpec{
   620  				Strategy:   baggageclaim.EmptyStrategy{},
   621  				Privileged: privileged,
   622  			},
   623  			container,
   624  			teamID,
   625  			nonLocalInput.desiredMountPath,
   626  		)
   627  		if err != nil {
   628  			return []VolumeMount{}, err
   629  		}
   630  
   631  		g.Go(func() error {
   632  			if streamable, ok := nonLocalInput.desiredArtifact.(StreamableArtifactSource); ok {
   633  				err = streamable.StreamTo(groupCtx, inputVolume)
   634  				if err != nil {
   635  					return err
   636  				}
   637  			}
   638  
   639  			mounts[i] = VolumeMount{
   640  				Volume:    inputVolume,
   641  				MountPath: nonLocalInput.desiredMountPath,
   642  			}
   643  
   644  			return nil
   645  		})
   646  	}
   647  	if err := g.Wait(); err != nil {
   648  		return nil, err
   649  	}
   650  
   651  	logger.Debug("streamed-non-local-volumes", lager.Data{"volumes-streamed": len(nonLocals)})
   652  
   653  	return mounts, nil
   654  }
   655  
   656  func (worker *gardenWorker) FindContainerByHandle(logger lager.Logger, teamID int, handle string) (Container, bool, error) {
   657  	gardenContainer, err := worker.gardenClient.Lookup(handle)
   658  	if err != nil {
   659  		if _, ok := err.(garden.ContainerNotFoundError); ok {
   660  			logger.Info("container-not-found")
   661  			return nil, false, nil
   662  		}
   663  
   664  		logger.Error("failed-to-lookup-on-garden", err)
   665  		return nil, false, err
   666  	}
   667  
   668  	createdContainer, found, err := worker.helper.dbTeamFactory.GetByID(teamID).FindCreatedContainerByHandle(handle)
   669  	if err != nil {
   670  		logger.Error("failed-to-lookup-in-db", err)
   671  		return nil, false, err
   672  	}
   673  	if !found {
   674  		return nil, false, nil
   675  	}
   676  
   677  	container, err := worker.helper.constructGardenWorkerContainer(logger, createdContainer, gardenContainer)
   678  	if err != nil {
   679  		logger.Error("failed-to-construct-container", err)
   680  		return nil, false, err
   681  	}
   682  
   683  	return container, true, nil
   684  }
   685  
   686  func (worker *gardenWorker) Name() string {
   687  	return worker.dbWorker.Name()
   688  }
   689  
   690  func (worker *gardenWorker) ResourceTypes() []atc.WorkerResourceType {
   691  	return worker.dbWorker.ResourceTypes()
   692  }
   693  
   694  func (worker *gardenWorker) Tags() atc.Tags {
   695  	return worker.dbWorker.Tags()
   696  }
   697  
   698  func (worker *gardenWorker) Ephemeral() bool {
   699  	return worker.dbWorker.Ephemeral()
   700  }
   701  
   702  func (worker *gardenWorker) BuildContainers() int {
   703  	return worker.buildContainers
   704  }
   705  
   706  func (worker *gardenWorker) Satisfies(logger lager.Logger, spec WorkerSpec) bool {
   707  	workerTeamID := worker.dbWorker.TeamID()
   708  	workerResourceTypes := worker.dbWorker.ResourceTypes()
   709  
   710  	if spec.TeamID != workerTeamID && workerTeamID != 0 {
   711  		return false
   712  	}
   713  
   714  	if spec.ResourceType != "" {
   715  		matchedType := false
   716  		for _, t := range workerResourceTypes {
   717  			if t.Type == spec.ResourceType {
   718  				matchedType = true
   719  				break
   720  			}
   721  		}
   722  
   723  		if !matchedType {
   724  			return false
   725  		}
   726  	}
   727  
   728  	if spec.Platform != "" {
   729  		if spec.Platform != worker.dbWorker.Platform() {
   730  			return false
   731  		}
   732  	}
   733  
   734  	if !worker.tagsMatch(spec.Tags) {
   735  		return false
   736  	}
   737  
   738  	return true
   739  }
   740  
   741  func (worker *gardenWorker) Description() string {
   742  	messages := []string{
   743  		fmt.Sprintf("platform '%s'", worker.dbWorker.Platform()),
   744  	}
   745  
   746  	for _, tag := range worker.dbWorker.Tags() {
   747  		messages = append(messages, fmt.Sprintf("tag '%s'", tag))
   748  	}
   749  
   750  	return strings.Join(messages, ", ")
   751  }
   752  
   753  func (worker *gardenWorker) IsOwnedByTeam() bool {
   754  	return worker.dbWorker.TeamID() != 0
   755  }
   756  
   757  func (worker *gardenWorker) Uptime() time.Duration {
   758  	return time.Since(worker.dbWorker.StartTime())
   759  }
   760  
   761  func (worker *gardenWorker) tagsMatch(tags []string) bool {
   762  	workerTags := worker.dbWorker.Tags()
   763  	if len(workerTags) > 0 && len(tags) == 0 {
   764  		return false
   765  	}
   766  
   767  insert_coin:
   768  	for _, stag := range tags {
   769  		for _, wtag := range workerTags {
   770  			if stag == wtag {
   771  				continue insert_coin
   772  			}
   773  		}
   774  
   775  		return false
   776  	}
   777  
   778  	return true
   779  }
   780  
   781  func (worker *gardenWorker) ActiveTasks() (int, error) {
   782  	return worker.dbWorker.ActiveTasks()
   783  }
   784  func (worker *gardenWorker) IncreaseActiveTasks() error {
   785  	return worker.dbWorker.IncreaseActiveTasks()
   786  }
   787  func (worker *gardenWorker) DecreaseActiveTasks() error {
   788  	return worker.dbWorker.DecreaseActiveTasks()
   789  }