github.com/rish1988/moby@v25.0.2+incompatible/daemon/cluster/executor/container/adapter.go (about)

     1  package container // import "github.com/docker/docker/daemon/cluster/executor/container"
     2  
     3  import (
     4  	"context"
     5  	"encoding/base64"
     6  	"encoding/json"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"strings"
    11  	"syscall"
    12  	"time"
    13  
    14  	"github.com/containerd/log"
    15  	"github.com/distribution/reference"
    16  	"github.com/docker/docker/api/types"
    17  	"github.com/docker/docker/api/types/backend"
    18  	containertypes "github.com/docker/docker/api/types/container"
    19  	"github.com/docker/docker/api/types/events"
    20  	"github.com/docker/docker/api/types/registry"
    21  	containerpkg "github.com/docker/docker/container"
    22  	"github.com/docker/docker/daemon"
    23  	"github.com/docker/docker/daemon/cluster/convert"
    24  	executorpkg "github.com/docker/docker/daemon/cluster/executor"
    25  	"github.com/docker/docker/libnetwork"
    26  	volumeopts "github.com/docker/docker/volume/service/opts"
    27  	gogotypes "github.com/gogo/protobuf/types"
    28  	"github.com/moby/swarmkit/v2/agent/exec"
    29  	"github.com/moby/swarmkit/v2/api"
    30  	swarmlog "github.com/moby/swarmkit/v2/log"
    31  	"github.com/opencontainers/go-digest"
    32  	"github.com/pkg/errors"
    33  	"golang.org/x/time/rate"
    34  )
    35  
    36  // nodeAttachmentReadyInterval is the interval to poll
    37  const nodeAttachmentReadyInterval = 100 * time.Millisecond
    38  
    39  // containerAdapter conducts remote operations for a container. All calls
    40  // are mostly naked calls to the client API, seeded with information from
    41  // containerConfig.
    42  type containerAdapter struct {
    43  	backend       executorpkg.Backend
    44  	imageBackend  executorpkg.ImageBackend
    45  	volumeBackend executorpkg.VolumeBackend
    46  	container     *containerConfig
    47  	dependencies  exec.DependencyGetter
    48  }
    49  
    50  func newContainerAdapter(b executorpkg.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) {
    51  	ctnr, err := newContainerConfig(task, node)
    52  	if err != nil {
    53  		return nil, err
    54  	}
    55  
    56  	return &containerAdapter{
    57  		container:     ctnr,
    58  		backend:       b,
    59  		imageBackend:  i,
    60  		volumeBackend: v,
    61  		dependencies:  dependencies,
    62  	}, nil
    63  }
    64  
    65  func (c *containerAdapter) pullImage(ctx context.Context) error {
    66  	spec := c.container.spec()
    67  
    68  	// Skip pulling if the image is referenced by image ID.
    69  	if _, err := digest.Parse(spec.Image); err == nil {
    70  		return nil
    71  	}
    72  
    73  	// Skip pulling if the image is referenced by digest and already
    74  	// exists locally.
    75  	named, err := reference.ParseNormalizedNamed(spec.Image)
    76  	if err == nil {
    77  		if _, ok := named.(reference.Canonical); ok {
    78  			_, err := c.imageBackend.GetImage(ctx, spec.Image, backend.GetImageOpts{})
    79  			if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
    80  				return err
    81  			}
    82  			if err == nil {
    83  				return nil
    84  			}
    85  		}
    86  	}
    87  
    88  	// if the image needs to be pulled, the auth config will be retrieved and updated
    89  	var encodedAuthConfig string
    90  	if spec.PullOptions != nil {
    91  		encodedAuthConfig = spec.PullOptions.RegistryAuth
    92  	}
    93  
    94  	authConfig := &registry.AuthConfig{}
    95  	if encodedAuthConfig != "" {
    96  		if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil {
    97  			swarmlog.G(ctx).Warnf("invalid authconfig: %v", err)
    98  		}
    99  	}
   100  
   101  	pr, pw := io.Pipe()
   102  	metaHeaders := map[string][]string{}
   103  	go func() {
   104  		// TODO LCOW Support: This will need revisiting as
   105  		// the stack is built up to include LCOW support for swarm.
   106  
   107  		// Make sure the image has a tag, otherwise it will pull all tags.
   108  		ref := reference.TagNameOnly(named)
   109  		err := c.imageBackend.PullImage(ctx, ref, nil, metaHeaders, authConfig, pw)
   110  		pw.CloseWithError(err)
   111  	}()
   112  
   113  	dec := json.NewDecoder(pr)
   114  	dec.UseNumber()
   115  	m := map[string]interface{}{}
   116  	spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1)
   117  
   118  	lastStatus := ""
   119  	for {
   120  		if err := dec.Decode(&m); err != nil {
   121  			if err == io.EOF {
   122  				break
   123  			}
   124  			return err
   125  		}
   126  		l := swarmlog.G(ctx)
   127  		// limit pull progress logs unless the status changes
   128  		if spamLimiter.Allow() || lastStatus != m["status"] {
   129  			// if we have progress details, we have everything we need
   130  			if progress, ok := m["progressDetail"].(map[string]interface{}); ok {
   131  				// first, log the image and status
   132  				l = l.WithFields(log.Fields{
   133  					"image":  c.container.image(),
   134  					"status": m["status"],
   135  				})
   136  				// then, if we have progress, log the progress
   137  				if progress["current"] != nil && progress["total"] != nil {
   138  					l = l.WithFields(log.Fields{
   139  						"current": progress["current"],
   140  						"total":   progress["total"],
   141  					})
   142  				}
   143  			}
   144  			l.Debug("pull in progress")
   145  		}
   146  		// sometimes, we get no useful information at all, and add no fields
   147  		if status, ok := m["status"].(string); ok {
   148  			lastStatus = status
   149  		}
   150  	}
   151  
   152  	// if the final stream object contained an error, return it
   153  	if errMsg, ok := m["error"]; ok {
   154  		return fmt.Errorf("%v", errMsg)
   155  	}
   156  	return nil
   157  }
   158  
   159  // waitNodeAttachments validates that NetworkAttachments exist on this node
   160  // for every network in use by this task. It blocks until the network
   161  // attachments are ready, or the context times out. If it returns nil, then the
   162  // node's network attachments are all there.
   163  func (c *containerAdapter) waitNodeAttachments(ctx context.Context) error {
   164  	// to do this, we're going to get the attachment store and try getting the
   165  	// IP address for each network. if any network comes back not existing,
   166  	// we'll wait and try again.
   167  	attachmentStore := c.backend.GetAttachmentStore()
   168  	if attachmentStore == nil {
   169  		return fmt.Errorf("error getting attachment store")
   170  	}
   171  
   172  	// essentially, we're long-polling here. this is really sub-optimal, but a
   173  	// better solution based off signaling channels would require a more
   174  	// substantial rearchitecture and probably not be worth our time in terms
   175  	// of performance gains.
   176  	poll := time.NewTicker(nodeAttachmentReadyInterval)
   177  	defer poll.Stop()
   178  	for {
   179  		// set a flag ready to true. if we try to get a network IP that doesn't
   180  		// exist yet, we will set this flag to "false"
   181  		ready := true
   182  		for _, attachment := range c.container.networksAttachments {
   183  			// we only need node attachments (IP address) for overlay networks
   184  			// TODO(dperny): unsure if this will work with other network
   185  			// drivers, but i also don't think other network drivers use the
   186  			// node attachment IP address.
   187  			if attachment.Network.DriverState.Name == "overlay" {
   188  				if _, exists := attachmentStore.GetIPForNetwork(attachment.Network.ID); !exists {
   189  					ready = false
   190  				}
   191  			}
   192  		}
   193  
   194  		// if everything is ready here, then we can just return no error
   195  		if ready {
   196  			return nil
   197  		}
   198  
   199  		// otherwise, try polling again, or wait for context canceled.
   200  		select {
   201  		case <-ctx.Done():
   202  			return fmt.Errorf("node is missing network attachments, ip addresses may be exhausted")
   203  		case <-poll.C:
   204  		}
   205  	}
   206  }
   207  
   208  func (c *containerAdapter) createNetworks(ctx context.Context) error {
   209  	for name := range c.container.networksAttachments {
   210  		ncr, err := c.container.networkCreateRequest(name)
   211  		if err != nil {
   212  			return err
   213  		}
   214  
   215  		if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing
   216  			if _, ok := err.(libnetwork.NetworkNameError); ok {
   217  				continue
   218  			}
   219  			// We will continue if CreateManagedNetwork returns PredefinedNetworkError error.
   220  			// Other callers still can treat it as Error.
   221  			if _, ok := err.(daemon.PredefinedNetworkError); ok {
   222  				continue
   223  			}
   224  			return err
   225  		}
   226  	}
   227  
   228  	return nil
   229  }
   230  
   231  func (c *containerAdapter) removeNetworks(ctx context.Context) error {
   232  	var (
   233  		activeEndpointsError *libnetwork.ActiveEndpointsError
   234  		errNoSuchNetwork     libnetwork.ErrNoSuchNetwork
   235  	)
   236  
   237  	for name, v := range c.container.networksAttachments {
   238  		if err := c.backend.DeleteManagedNetwork(v.Network.ID); err != nil {
   239  			switch {
   240  			case errors.As(err, &activeEndpointsError):
   241  				continue
   242  			case errors.As(err, &errNoSuchNetwork):
   243  				continue
   244  			default:
   245  				swarmlog.G(ctx).Errorf("network %s remove failed: %v", name, err)
   246  				return err
   247  			}
   248  		}
   249  	}
   250  
   251  	return nil
   252  }
   253  
   254  func (c *containerAdapter) networkAttach(ctx context.Context) error {
   255  	config := c.container.createNetworkingConfig(c.backend)
   256  
   257  	var (
   258  		networkName string
   259  		networkID   string
   260  	)
   261  
   262  	if config != nil {
   263  		for n, epConfig := range config.EndpointsConfig {
   264  			networkName = n
   265  			networkID = epConfig.NetworkID
   266  			break
   267  		}
   268  	}
   269  
   270  	return c.backend.UpdateAttachment(networkName, networkID, c.container.networkAttachmentContainerID(), config)
   271  }
   272  
   273  func (c *containerAdapter) waitForDetach(ctx context.Context) error {
   274  	config := c.container.createNetworkingConfig(c.backend)
   275  
   276  	var (
   277  		networkName string
   278  		networkID   string
   279  	)
   280  
   281  	if config != nil {
   282  		for n, epConfig := range config.EndpointsConfig {
   283  			networkName = n
   284  			networkID = epConfig.NetworkID
   285  			break
   286  		}
   287  	}
   288  
   289  	return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.networkAttachmentContainerID())
   290  }
   291  
   292  func (c *containerAdapter) create(ctx context.Context) error {
   293  	var cr containertypes.CreateResponse
   294  	var err error
   295  	if cr, err = c.backend.CreateManagedContainer(ctx, backend.ContainerCreateConfig{
   296  		Name:       c.container.name(),
   297  		Config:     c.container.config(),
   298  		HostConfig: c.container.hostConfig(c.dependencies.Volumes()),
   299  		// Use the first network in container create
   300  		NetworkingConfig: c.container.createNetworkingConfig(c.backend),
   301  	}); err != nil {
   302  		return err
   303  	}
   304  
   305  	container := c.container.task.Spec.GetContainer()
   306  	if container == nil {
   307  		return errors.New("unable to get container from task spec")
   308  	}
   309  
   310  	if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil {
   311  		return err
   312  	}
   313  
   314  	// configure secrets
   315  	secretRefs := convert.SecretReferencesFromGRPC(container.Secrets)
   316  	if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil {
   317  		return err
   318  	}
   319  
   320  	configRefs := convert.ConfigReferencesFromGRPC(container.Configs)
   321  	if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil {
   322  		return err
   323  	}
   324  
   325  	return c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig())
   326  }
   327  
   328  // checkMounts ensures that the provided mounts won't have any host-specific
   329  // problems at start up. For example, we disallow bind mounts without an
   330  // existing path, which slightly different from the container API.
   331  func (c *containerAdapter) checkMounts() error {
   332  	spec := c.container.spec()
   333  	for _, mount := range spec.Mounts {
   334  		switch mount.Type {
   335  		case api.MountTypeBind:
   336  			if _, err := os.Stat(mount.Source); os.IsNotExist(err) {
   337  				return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source)
   338  			}
   339  		}
   340  	}
   341  
   342  	return nil
   343  }
   344  
   345  func (c *containerAdapter) start(ctx context.Context) error {
   346  	if err := c.checkMounts(); err != nil {
   347  		return err
   348  	}
   349  
   350  	return c.backend.ContainerStart(ctx, c.container.name(), nil, "", "")
   351  }
   352  
   353  func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
   354  	cs, err := c.backend.ContainerInspectCurrent(ctx, c.container.name(), false)
   355  	if ctx.Err() != nil {
   356  		return types.ContainerJSON{}, ctx.Err()
   357  	}
   358  	if err != nil {
   359  		return types.ContainerJSON{}, err
   360  	}
   361  	return *cs, nil
   362  }
   363  
   364  // events issues a call to the events API and returns a channel with all
   365  // events. The stream of events can be shutdown by cancelling the context.
   366  func (c *containerAdapter) events(ctx context.Context) <-chan events.Message {
   367  	swarmlog.G(ctx).Debugf("waiting on events")
   368  	buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter())
   369  	eventsq := make(chan events.Message, len(buffer))
   370  
   371  	for _, event := range buffer {
   372  		eventsq <- event
   373  	}
   374  
   375  	go func() {
   376  		defer c.backend.UnsubscribeFromEvents(l)
   377  
   378  		for {
   379  			select {
   380  			case ev := <-l:
   381  				jev, ok := ev.(events.Message)
   382  				if !ok {
   383  					swarmlog.G(ctx).Warnf("unexpected event message: %q", ev)
   384  					continue
   385  				}
   386  				select {
   387  				case eventsq <- jev:
   388  				case <-ctx.Done():
   389  					return
   390  				}
   391  			case <-ctx.Done():
   392  				return
   393  			}
   394  		}
   395  	}()
   396  
   397  	return eventsq
   398  }
   399  
   400  func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) {
   401  	return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning)
   402  }
   403  
   404  func (c *containerAdapter) shutdown(ctx context.Context) error {
   405  	options := containertypes.StopOptions{}
   406  	// Default stop grace period to nil (daemon will use the stopTimeout of the container)
   407  	if spec := c.container.spec(); spec.StopGracePeriod != nil {
   408  		timeout := int(spec.StopGracePeriod.Seconds)
   409  		options.Timeout = &timeout
   410  	}
   411  	return c.backend.ContainerStop(ctx, c.container.name(), options)
   412  }
   413  
   414  func (c *containerAdapter) terminate(ctx context.Context) error {
   415  	return c.backend.ContainerKill(c.container.name(), syscall.SIGKILL.String())
   416  }
   417  
   418  func (c *containerAdapter) remove(ctx context.Context) error {
   419  	return c.backend.ContainerRm(c.container.name(), &backend.ContainerRmConfig{
   420  		RemoveVolume: true,
   421  		ForceRemove:  true,
   422  	})
   423  }
   424  
   425  func (c *containerAdapter) createVolumes(ctx context.Context) error {
   426  	// Create plugin volumes that are embedded inside a Mount
   427  	for _, mount := range c.container.task.Spec.GetContainer().Mounts {
   428  		mount := mount
   429  		if mount.Type != api.MountTypeVolume {
   430  			continue
   431  		}
   432  
   433  		if mount.VolumeOptions == nil {
   434  			continue
   435  		}
   436  
   437  		if mount.VolumeOptions.DriverConfig == nil {
   438  			continue
   439  		}
   440  
   441  		req := c.container.volumeCreateRequest(&mount)
   442  
   443  		// Check if this volume exists on the engine
   444  		if _, err := c.volumeBackend.Create(ctx, req.Name, req.Driver,
   445  			volumeopts.WithCreateOptions(req.DriverOpts),
   446  			volumeopts.WithCreateLabels(req.Labels),
   447  		); err != nil {
   448  			// TODO(amitshukla): Today, volume create through the engine api does not return an error
   449  			// when the named volume with the same parameters already exists.
   450  			// It returns an error if the driver name is different - that is a valid error
   451  			return err
   452  		}
   453  	}
   454  
   455  	return nil
   456  }
   457  
   458  // waitClusterVolumes blocks until the VolumeGetter returns a path for each
   459  // cluster volume in use by this task
   460  func (c *containerAdapter) waitClusterVolumes(ctx context.Context) error {
   461  	for _, attached := range c.container.task.Volumes {
   462  		// for every attachment, try until we succeed or until the context
   463  		// is canceled.
   464  		for {
   465  			select {
   466  			case <-ctx.Done():
   467  				return ctx.Err()
   468  			default:
   469  				// continue through the code.
   470  			}
   471  			path, err := c.dependencies.Volumes().Get(attached.ID)
   472  			if err == nil && path != "" {
   473  				// break out of the inner-most loop
   474  				break
   475  			}
   476  		}
   477  	}
   478  	swarmlog.G(ctx).Debug("volumes ready")
   479  	return nil
   480  }
   481  
   482  func (c *containerAdapter) activateServiceBinding() error {
   483  	return c.backend.ActivateContainerServiceBinding(c.container.name())
   484  }
   485  
   486  func (c *containerAdapter) deactivateServiceBinding() error {
   487  	return c.backend.DeactivateContainerServiceBinding(c.container.name())
   488  }
   489  
   490  func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) {
   491  	apiOptions := &containertypes.LogsOptions{
   492  		Follow: options.Follow,
   493  
   494  		// Always say yes to Timestamps and Details. we make the decision
   495  		// of whether to return these to the user or not way higher up the
   496  		// stack.
   497  		Timestamps: true,
   498  		Details:    true,
   499  	}
   500  
   501  	if options.Since != nil {
   502  		since, err := gogotypes.TimestampFromProto(options.Since)
   503  		if err != nil {
   504  			return nil, err
   505  		}
   506  		// print since as this formatted string because the docker container
   507  		// logs interface expects it like this.
   508  		// see github.com/docker/docker/api/types/time.ParseTimestamps
   509  		apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond()))
   510  	}
   511  
   512  	if options.Tail < 0 {
   513  		// See protobuf documentation for details of how this works.
   514  		apiOptions.Tail = fmt.Sprint(-options.Tail - 1)
   515  	} else if options.Tail > 0 {
   516  		return nil, errors.New("tail relative to start of logs not supported via docker API")
   517  	}
   518  
   519  	if len(options.Streams) == 0 {
   520  		// empty == all
   521  		apiOptions.ShowStdout, apiOptions.ShowStderr = true, true
   522  	} else {
   523  		for _, stream := range options.Streams {
   524  			switch stream {
   525  			case api.LogStreamStdout:
   526  				apiOptions.ShowStdout = true
   527  			case api.LogStreamStderr:
   528  				apiOptions.ShowStderr = true
   529  			}
   530  		}
   531  	}
   532  	msgs, _, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions)
   533  	if err != nil {
   534  		return nil, err
   535  	}
   536  	return msgs, nil
   537  }
   538  
   539  // todo: typed/wrapped errors
   540  func isContainerCreateNameConflict(err error) bool {
   541  	return strings.Contains(err.Error(), "Conflict. The name")
   542  }
   543  
   544  func isUnknownContainer(err error) bool {
   545  	return strings.Contains(err.Error(), "No such container:")
   546  }
   547  
   548  func isStoppedContainer(err error) bool {
   549  	return strings.Contains(err.Error(), "is already stopped")
   550  }