github.com/Heebron/moby@v0.0.0-20221111184709-6eab4f55faf7/daemon/cluster/executor/container/adapter.go (about)

     1  package container // import "github.com/docker/docker/daemon/cluster/executor/container"
     2  
     3  import (
     4  	"context"
     5  	"encoding/base64"
     6  	"encoding/json"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"strings"
    11  	"syscall"
    12  	"time"
    13  
    14  	"github.com/docker/distribution/reference"
    15  	"github.com/docker/docker/api/types"
    16  	"github.com/docker/docker/api/types/backend"
    17  	containertypes "github.com/docker/docker/api/types/container"
    18  	"github.com/docker/docker/api/types/events"
    19  	imagetypes "github.com/docker/docker/api/types/image"
    20  	"github.com/docker/docker/api/types/registry"
    21  	containerpkg "github.com/docker/docker/container"
    22  	"github.com/docker/docker/daemon"
    23  	"github.com/docker/docker/daemon/cluster/convert"
    24  	executorpkg "github.com/docker/docker/daemon/cluster/executor"
    25  	"github.com/docker/docker/libnetwork"
    26  	volumeopts "github.com/docker/docker/volume/service/opts"
    27  	gogotypes "github.com/gogo/protobuf/types"
    28  	"github.com/moby/swarmkit/v2/agent/exec"
    29  	"github.com/moby/swarmkit/v2/api"
    30  	"github.com/moby/swarmkit/v2/log"
    31  	"github.com/opencontainers/go-digest"
    32  	"github.com/pkg/errors"
    33  	"github.com/sirupsen/logrus"
    34  	"golang.org/x/time/rate"
    35  )
    36  
    37  // nodeAttachmentReadyInterval is the interval to poll
    38  const nodeAttachmentReadyInterval = 100 * time.Millisecond
    39  
    40  // containerAdapter conducts remote operations for a container. All calls
    41  // are mostly naked calls to the client API, seeded with information from
    42  // containerConfig.
    43  type containerAdapter struct {
    44  	backend       executorpkg.Backend
    45  	imageBackend  executorpkg.ImageBackend
    46  	volumeBackend executorpkg.VolumeBackend
    47  	container     *containerConfig
    48  	dependencies  exec.DependencyGetter
    49  }
    50  
    51  func newContainerAdapter(b executorpkg.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) {
    52  	ctnr, err := newContainerConfig(task, node)
    53  	if err != nil {
    54  		return nil, err
    55  	}
    56  
    57  	return &containerAdapter{
    58  		container:     ctnr,
    59  		backend:       b,
    60  		imageBackend:  i,
    61  		volumeBackend: v,
    62  		dependencies:  dependencies,
    63  	}, nil
    64  }
    65  
    66  func (c *containerAdapter) pullImage(ctx context.Context) error {
    67  	spec := c.container.spec()
    68  
    69  	// Skip pulling if the image is referenced by image ID.
    70  	if _, err := digest.Parse(spec.Image); err == nil {
    71  		return nil
    72  	}
    73  
    74  	// Skip pulling if the image is referenced by digest and already
    75  	// exists locally.
    76  	named, err := reference.ParseNormalizedNamed(spec.Image)
    77  	if err == nil {
    78  		if _, ok := named.(reference.Canonical); ok {
    79  			_, err := c.imageBackend.GetImage(ctx, spec.Image, imagetypes.GetImageOpts{})
    80  			if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
    81  				return err
    82  			}
    83  			if err == nil {
    84  				return nil
    85  			}
    86  		}
    87  	}
    88  
    89  	// if the image needs to be pulled, the auth config will be retrieved and updated
    90  	var encodedAuthConfig string
    91  	if spec.PullOptions != nil {
    92  		encodedAuthConfig = spec.PullOptions.RegistryAuth
    93  	}
    94  
    95  	authConfig := &registry.AuthConfig{}
    96  	if encodedAuthConfig != "" {
    97  		if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil {
    98  			logrus.Warnf("invalid authconfig: %v", err)
    99  		}
   100  	}
   101  
   102  	pr, pw := io.Pipe()
   103  	metaHeaders := map[string][]string{}
   104  	go func() {
   105  		// TODO LCOW Support: This will need revisiting as
   106  		// the stack is built up to include LCOW support for swarm.
   107  		err := c.imageBackend.PullImage(ctx, c.container.image(), "", nil, metaHeaders, authConfig, pw)
   108  		pw.CloseWithError(err)
   109  	}()
   110  
   111  	dec := json.NewDecoder(pr)
   112  	dec.UseNumber()
   113  	m := map[string]interface{}{}
   114  	spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1)
   115  
   116  	lastStatus := ""
   117  	for {
   118  		if err := dec.Decode(&m); err != nil {
   119  			if err == io.EOF {
   120  				break
   121  			}
   122  			return err
   123  		}
   124  		l := log.G(ctx)
   125  		// limit pull progress logs unless the status changes
   126  		if spamLimiter.Allow() || lastStatus != m["status"] {
   127  			// if we have progress details, we have everything we need
   128  			if progress, ok := m["progressDetail"].(map[string]interface{}); ok {
   129  				// first, log the image and status
   130  				l = l.WithFields(logrus.Fields{
   131  					"image":  c.container.image(),
   132  					"status": m["status"],
   133  				})
   134  				// then, if we have progress, log the progress
   135  				if progress["current"] != nil && progress["total"] != nil {
   136  					l = l.WithFields(logrus.Fields{
   137  						"current": progress["current"],
   138  						"total":   progress["total"],
   139  					})
   140  				}
   141  			}
   142  			l.Debug("pull in progress")
   143  		}
   144  		// sometimes, we get no useful information at all, and add no fields
   145  		if status, ok := m["status"].(string); ok {
   146  			lastStatus = status
   147  		}
   148  	}
   149  
   150  	// if the final stream object contained an error, return it
   151  	if errMsg, ok := m["error"]; ok {
   152  		return fmt.Errorf("%v", errMsg)
   153  	}
   154  	return nil
   155  }
   156  
   157  // waitNodeAttachments validates that NetworkAttachments exist on this node
   158  // for every network in use by this task. It blocks until the network
   159  // attachments are ready, or the context times out. If it returns nil, then the
   160  // node's network attachments are all there.
   161  func (c *containerAdapter) waitNodeAttachments(ctx context.Context) error {
   162  	// to do this, we're going to get the attachment store and try getting the
   163  	// IP address for each network. if any network comes back not existing,
   164  	// we'll wait and try again.
   165  	attachmentStore := c.backend.GetAttachmentStore()
   166  	if attachmentStore == nil {
   167  		return fmt.Errorf("error getting attachment store")
   168  	}
   169  
   170  	// essentially, we're long-polling here. this is really sub-optimal, but a
   171  	// better solution based off signaling channels would require a more
   172  	// substantial rearchitecture and probably not be worth our time in terms
   173  	// of performance gains.
   174  	poll := time.NewTicker(nodeAttachmentReadyInterval)
   175  	defer poll.Stop()
   176  	for {
   177  		// set a flag ready to true. if we try to get a network IP that doesn't
   178  		// exist yet, we will set this flag to "false"
   179  		ready := true
   180  		for _, attachment := range c.container.networksAttachments {
   181  			// we only need node attachments (IP address) for overlay networks
   182  			// TODO(dperny): unsure if this will work with other network
   183  			// drivers, but i also don't think other network drivers use the
   184  			// node attachment IP address.
   185  			if attachment.Network.DriverState.Name == "overlay" {
   186  				if _, exists := attachmentStore.GetIPForNetwork(attachment.Network.ID); !exists {
   187  					ready = false
   188  				}
   189  			}
   190  		}
   191  
   192  		// if everything is ready here, then we can just return no error
   193  		if ready {
   194  			return nil
   195  		}
   196  
   197  		// otherwise, try polling again, or wait for context canceled.
   198  		select {
   199  		case <-ctx.Done():
   200  			return fmt.Errorf("node is missing network attachments, ip addresses may be exhausted")
   201  		case <-poll.C:
   202  		}
   203  	}
   204  }
   205  
   206  func (c *containerAdapter) createNetworks(ctx context.Context) error {
   207  	for name := range c.container.networksAttachments {
   208  		ncr, err := c.container.networkCreateRequest(name)
   209  		if err != nil {
   210  			return err
   211  		}
   212  
   213  		if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing
   214  			if _, ok := err.(libnetwork.NetworkNameError); ok {
   215  				continue
   216  			}
   217  			// We will continue if CreateManagedNetwork returns PredefinedNetworkError error.
   218  			// Other callers still can treat it as Error.
   219  			if _, ok := err.(daemon.PredefinedNetworkError); ok {
   220  				continue
   221  			}
   222  			return err
   223  		}
   224  	}
   225  
   226  	return nil
   227  }
   228  
   229  func (c *containerAdapter) removeNetworks(ctx context.Context) error {
   230  	var (
   231  		activeEndpointsError *libnetwork.ActiveEndpointsError
   232  		errNoSuchNetwork     libnetwork.ErrNoSuchNetwork
   233  	)
   234  
   235  	for name, v := range c.container.networksAttachments {
   236  		if err := c.backend.DeleteManagedNetwork(v.Network.ID); err != nil {
   237  			switch {
   238  			case errors.As(err, &activeEndpointsError):
   239  				continue
   240  			case errors.As(err, &errNoSuchNetwork):
   241  				continue
   242  			default:
   243  				log.G(ctx).Errorf("network %s remove failed: %v", name, err)
   244  				return err
   245  			}
   246  		}
   247  	}
   248  
   249  	return nil
   250  }
   251  
   252  func (c *containerAdapter) networkAttach(ctx context.Context) error {
   253  	config := c.container.createNetworkingConfig(c.backend)
   254  
   255  	var (
   256  		networkName string
   257  		networkID   string
   258  	)
   259  
   260  	if config != nil {
   261  		for n, epConfig := range config.EndpointsConfig {
   262  			networkName = n
   263  			networkID = epConfig.NetworkID
   264  			break
   265  		}
   266  	}
   267  
   268  	return c.backend.UpdateAttachment(networkName, networkID, c.container.networkAttachmentContainerID(), config)
   269  }
   270  
   271  func (c *containerAdapter) waitForDetach(ctx context.Context) error {
   272  	config := c.container.createNetworkingConfig(c.backend)
   273  
   274  	var (
   275  		networkName string
   276  		networkID   string
   277  	)
   278  
   279  	if config != nil {
   280  		for n, epConfig := range config.EndpointsConfig {
   281  			networkName = n
   282  			networkID = epConfig.NetworkID
   283  			break
   284  		}
   285  	}
   286  
   287  	return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.networkAttachmentContainerID())
   288  }
   289  
   290  func (c *containerAdapter) create(ctx context.Context) error {
   291  	var cr containertypes.CreateResponse
   292  	var err error
   293  	if cr, err = c.backend.CreateManagedContainer(ctx, types.ContainerCreateConfig{
   294  		Name:       c.container.name(),
   295  		Config:     c.container.config(),
   296  		HostConfig: c.container.hostConfig(c.dependencies.Volumes()),
   297  		// Use the first network in container create
   298  		NetworkingConfig: c.container.createNetworkingConfig(c.backend),
   299  	}); err != nil {
   300  		return err
   301  	}
   302  
   303  	// Docker daemon currently doesn't support multiple networks in container create
   304  	// Connect to all other networks
   305  	nc := c.container.connectNetworkingConfig(c.backend)
   306  
   307  	if nc != nil {
   308  		for n, ep := range nc.EndpointsConfig {
   309  			if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil {
   310  				return err
   311  			}
   312  		}
   313  	}
   314  
   315  	container := c.container.task.Spec.GetContainer()
   316  	if container == nil {
   317  		return errors.New("unable to get container from task spec")
   318  	}
   319  
   320  	if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil {
   321  		return err
   322  	}
   323  
   324  	// configure secrets
   325  	secretRefs := convert.SecretReferencesFromGRPC(container.Secrets)
   326  	if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil {
   327  		return err
   328  	}
   329  
   330  	configRefs := convert.ConfigReferencesFromGRPC(container.Configs)
   331  	if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil {
   332  		return err
   333  	}
   334  
   335  	return c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig())
   336  }
   337  
   338  // checkMounts ensures that the provided mounts won't have any host-specific
   339  // problems at start up. For example, we disallow bind mounts without an
   340  // existing path, which slightly different from the container API.
   341  func (c *containerAdapter) checkMounts() error {
   342  	spec := c.container.spec()
   343  	for _, mount := range spec.Mounts {
   344  		switch mount.Type {
   345  		case api.MountTypeBind:
   346  			if _, err := os.Stat(mount.Source); os.IsNotExist(err) {
   347  				return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source)
   348  			}
   349  		}
   350  	}
   351  
   352  	return nil
   353  }
   354  
   355  func (c *containerAdapter) start(ctx context.Context) error {
   356  	if err := c.checkMounts(); err != nil {
   357  		return err
   358  	}
   359  
   360  	return c.backend.ContainerStart(ctx, c.container.name(), nil, "", "")
   361  }
   362  
   363  func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
   364  	cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false)
   365  	if ctx.Err() != nil {
   366  		return types.ContainerJSON{}, ctx.Err()
   367  	}
   368  	if err != nil {
   369  		return types.ContainerJSON{}, err
   370  	}
   371  	return *cs, nil
   372  }
   373  
   374  // events issues a call to the events API and returns a channel with all
   375  // events. The stream of events can be shutdown by cancelling the context.
   376  func (c *containerAdapter) events(ctx context.Context) <-chan events.Message {
   377  	log.G(ctx).Debugf("waiting on events")
   378  	buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter())
   379  	eventsq := make(chan events.Message, len(buffer))
   380  
   381  	for _, event := range buffer {
   382  		eventsq <- event
   383  	}
   384  
   385  	go func() {
   386  		defer c.backend.UnsubscribeFromEvents(l)
   387  
   388  		for {
   389  			select {
   390  			case ev := <-l:
   391  				jev, ok := ev.(events.Message)
   392  				if !ok {
   393  					log.G(ctx).Warnf("unexpected event message: %q", ev)
   394  					continue
   395  				}
   396  				select {
   397  				case eventsq <- jev:
   398  				case <-ctx.Done():
   399  					return
   400  				}
   401  			case <-ctx.Done():
   402  				return
   403  			}
   404  		}
   405  	}()
   406  
   407  	return eventsq
   408  }
   409  
   410  func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) {
   411  	return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning)
   412  }
   413  
   414  func (c *containerAdapter) shutdown(ctx context.Context) error {
   415  	var options = containertypes.StopOptions{}
   416  	// Default stop grace period to nil (daemon will use the stopTimeout of the container)
   417  	if spec := c.container.spec(); spec.StopGracePeriod != nil {
   418  		timeout := int(spec.StopGracePeriod.Seconds)
   419  		options.Timeout = &timeout
   420  	}
   421  	return c.backend.ContainerStop(ctx, c.container.name(), options)
   422  }
   423  
   424  func (c *containerAdapter) terminate(ctx context.Context) error {
   425  	return c.backend.ContainerKill(c.container.name(), syscall.SIGKILL.String())
   426  }
   427  
   428  func (c *containerAdapter) remove(ctx context.Context) error {
   429  	return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{
   430  		RemoveVolume: true,
   431  		ForceRemove:  true,
   432  	})
   433  }
   434  
   435  func (c *containerAdapter) createVolumes(ctx context.Context) error {
   436  	// Create plugin volumes that are embedded inside a Mount
   437  	for _, mount := range c.container.task.Spec.GetContainer().Mounts {
   438  		mount := mount
   439  		if mount.Type != api.MountTypeVolume {
   440  			continue
   441  		}
   442  
   443  		if mount.VolumeOptions == nil {
   444  			continue
   445  		}
   446  
   447  		if mount.VolumeOptions.DriverConfig == nil {
   448  			continue
   449  		}
   450  
   451  		req := c.container.volumeCreateRequest(&mount)
   452  
   453  		// Check if this volume exists on the engine
   454  		if _, err := c.volumeBackend.Create(ctx, req.Name, req.Driver,
   455  			volumeopts.WithCreateOptions(req.DriverOpts),
   456  			volumeopts.WithCreateLabels(req.Labels),
   457  		); err != nil {
   458  			// TODO(amitshukla): Today, volume create through the engine api does not return an error
   459  			// when the named volume with the same parameters already exists.
   460  			// It returns an error if the driver name is different - that is a valid error
   461  			return err
   462  		}
   463  	}
   464  
   465  	return nil
   466  }
   467  
   468  // waitClusterVolumes blocks until the VolumeGetter returns a path for each
   469  // cluster volume in use by this task
   470  func (c *containerAdapter) waitClusterVolumes(ctx context.Context) error {
   471  	for _, attached := range c.container.task.Volumes {
   472  		// for every attachment, try until we succeed or until the context
   473  		// is canceled.
   474  		for {
   475  			select {
   476  			case <-ctx.Done():
   477  				return ctx.Err()
   478  			default:
   479  				// continue through the code.
   480  			}
   481  			path, err := c.dependencies.Volumes().Get(attached.ID)
   482  			if err == nil && path != "" {
   483  				// break out of the inner-most loop
   484  				break
   485  			}
   486  		}
   487  	}
   488  	log.G(ctx).Debug("volumes ready")
   489  	return nil
   490  }
   491  
   492  func (c *containerAdapter) activateServiceBinding() error {
   493  	return c.backend.ActivateContainerServiceBinding(c.container.name())
   494  }
   495  
   496  func (c *containerAdapter) deactivateServiceBinding() error {
   497  	return c.backend.DeactivateContainerServiceBinding(c.container.name())
   498  }
   499  
   500  func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) {
   501  	apiOptions := &types.ContainerLogsOptions{
   502  		Follow: options.Follow,
   503  
   504  		// Always say yes to Timestamps and Details. we make the decision
   505  		// of whether to return these to the user or not way higher up the
   506  		// stack.
   507  		Timestamps: true,
   508  		Details:    true,
   509  	}
   510  
   511  	if options.Since != nil {
   512  		since, err := gogotypes.TimestampFromProto(options.Since)
   513  		if err != nil {
   514  			return nil, err
   515  		}
   516  		// print since as this formatted string because the docker container
   517  		// logs interface expects it like this.
   518  		// see github.com/docker/docker/api/types/time.ParseTimestamps
   519  		apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond()))
   520  	}
   521  
   522  	if options.Tail < 0 {
   523  		// See protobuf documentation for details of how this works.
   524  		apiOptions.Tail = fmt.Sprint(-options.Tail - 1)
   525  	} else if options.Tail > 0 {
   526  		return nil, errors.New("tail relative to start of logs not supported via docker API")
   527  	}
   528  
   529  	if len(options.Streams) == 0 {
   530  		// empty == all
   531  		apiOptions.ShowStdout, apiOptions.ShowStderr = true, true
   532  	} else {
   533  		for _, stream := range options.Streams {
   534  			switch stream {
   535  			case api.LogStreamStdout:
   536  				apiOptions.ShowStdout = true
   537  			case api.LogStreamStderr:
   538  				apiOptions.ShowStderr = true
   539  			}
   540  		}
   541  	}
   542  	msgs, _, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions)
   543  	if err != nil {
   544  		return nil, err
   545  	}
   546  	return msgs, nil
   547  }
   548  
   549  // todo: typed/wrapped errors
   550  func isContainerCreateNameConflict(err error) bool {
   551  	return strings.Contains(err.Error(), "Conflict. The name")
   552  }
   553  
   554  func isUnknownContainer(err error) bool {
   555  	return strings.Contains(err.Error(), "No such container:")
   556  }
   557  
   558  func isStoppedContainer(err error) bool {
   559  	return strings.Contains(err.Error(), "is already stopped")
   560  }