github.com/Prakhar-Agarwal-byte/moby@v0.0.0-20231027092010-a14e3e8ab87e/daemon/cluster/executor/container/adapter.go (about)

     1  package container // import "github.com/Prakhar-Agarwal-byte/moby/daemon/cluster/executor/container"
     2  
     3  import (
     4  	"context"
     5  	"encoding/base64"
     6  	"encoding/json"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"strings"
    11  	"syscall"
    12  	"time"
    13  
    14  	"github.com/containerd/log"
    15  	"github.com/distribution/reference"
    16  	"github.com/Prakhar-Agarwal-byte/moby/api/types"
    17  	"github.com/Prakhar-Agarwal-byte/moby/api/types/backend"
    18  	containertypes "github.com/Prakhar-Agarwal-byte/moby/api/types/container"
    19  	"github.com/Prakhar-Agarwal-byte/moby/api/types/events"
    20  	imagetypes "github.com/Prakhar-Agarwal-byte/moby/api/types/image"
    21  	"github.com/Prakhar-Agarwal-byte/moby/api/types/registry"
    22  	containerpkg "github.com/Prakhar-Agarwal-byte/moby/container"
    23  	"github.com/Prakhar-Agarwal-byte/moby/daemon"
    24  	"github.com/Prakhar-Agarwal-byte/moby/daemon/cluster/convert"
    25  	executorpkg "github.com/Prakhar-Agarwal-byte/moby/daemon/cluster/executor"
    26  	"github.com/Prakhar-Agarwal-byte/moby/libnetwork"
    27  	volumeopts "github.com/Prakhar-Agarwal-byte/moby/volume/service/opts"
    28  	gogotypes "github.com/gogo/protobuf/types"
    29  	"github.com/moby/swarmkit/v2/agent/exec"
    30  	"github.com/moby/swarmkit/v2/api"
    31  	swarmlog "github.com/moby/swarmkit/v2/log"
    32  	"github.com/opencontainers/go-digest"
    33  	"github.com/pkg/errors"
    34  	"golang.org/x/time/rate"
    35  )
    36  
    37  // nodeAttachmentReadyInterval is the interval to poll
    38  const nodeAttachmentReadyInterval = 100 * time.Millisecond
    39  
    40  // containerAdapter conducts remote operations for a container. All calls
    41  // are mostly naked calls to the client API, seeded with information from
    42  // containerConfig.
    43  type containerAdapter struct {
    44  	backend       executorpkg.Backend
    45  	imageBackend  executorpkg.ImageBackend
    46  	volumeBackend executorpkg.VolumeBackend
    47  	container     *containerConfig
    48  	dependencies  exec.DependencyGetter
    49  }
    50  
    51  func newContainerAdapter(b executorpkg.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) {
    52  	ctnr, err := newContainerConfig(task, node)
    53  	if err != nil {
    54  		return nil, err
    55  	}
    56  
    57  	return &containerAdapter{
    58  		container:     ctnr,
    59  		backend:       b,
    60  		imageBackend:  i,
    61  		volumeBackend: v,
    62  		dependencies:  dependencies,
    63  	}, nil
    64  }
    65  
    66  func (c *containerAdapter) pullImage(ctx context.Context) error {
    67  	spec := c.container.spec()
    68  
    69  	// Skip pulling if the image is referenced by image ID.
    70  	if _, err := digest.Parse(spec.Image); err == nil {
    71  		return nil
    72  	}
    73  
    74  	// Skip pulling if the image is referenced by digest and already
    75  	// exists locally.
    76  	named, err := reference.ParseNormalizedNamed(spec.Image)
    77  	if err == nil {
    78  		if _, ok := named.(reference.Canonical); ok {
    79  			_, err := c.imageBackend.GetImage(ctx, spec.Image, imagetypes.GetImageOpts{})
    80  			if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
    81  				return err
    82  			}
    83  			if err == nil {
    84  				return nil
    85  			}
    86  		}
    87  	}
    88  
    89  	// if the image needs to be pulled, the auth config will be retrieved and updated
    90  	var encodedAuthConfig string
    91  	if spec.PullOptions != nil {
    92  		encodedAuthConfig = spec.PullOptions.RegistryAuth
    93  	}
    94  
    95  	authConfig := &registry.AuthConfig{}
    96  	if encodedAuthConfig != "" {
    97  		if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil {
    98  			swarmlog.G(ctx).Warnf("invalid authconfig: %v", err)
    99  		}
   100  	}
   101  
   102  	pr, pw := io.Pipe()
   103  	metaHeaders := map[string][]string{}
   104  	go func() {
   105  		// TODO LCOW Support: This will need revisiting as
   106  		// the stack is built up to include LCOW support for swarm.
   107  
   108  		// Make sure the image has a tag, otherwise it will pull all tags.
   109  		ref := reference.TagNameOnly(named)
   110  		err := c.imageBackend.PullImage(ctx, ref, nil, metaHeaders, authConfig, pw)
   111  		pw.CloseWithError(err)
   112  	}()
   113  
   114  	dec := json.NewDecoder(pr)
   115  	dec.UseNumber()
   116  	m := map[string]interface{}{}
   117  	spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1)
   118  
   119  	lastStatus := ""
   120  	for {
   121  		if err := dec.Decode(&m); err != nil {
   122  			if err == io.EOF {
   123  				break
   124  			}
   125  			return err
   126  		}
   127  		l := swarmlog.G(ctx)
   128  		// limit pull progress logs unless the status changes
   129  		if spamLimiter.Allow() || lastStatus != m["status"] {
   130  			// if we have progress details, we have everything we need
   131  			if progress, ok := m["progressDetail"].(map[string]interface{}); ok {
   132  				// first, log the image and status
   133  				l = l.WithFields(log.Fields{
   134  					"image":  c.container.image(),
   135  					"status": m["status"],
   136  				})
   137  				// then, if we have progress, log the progress
   138  				if progress["current"] != nil && progress["total"] != nil {
   139  					l = l.WithFields(log.Fields{
   140  						"current": progress["current"],
   141  						"total":   progress["total"],
   142  					})
   143  				}
   144  			}
   145  			l.Debug("pull in progress")
   146  		}
   147  		// sometimes, we get no useful information at all, and add no fields
   148  		if status, ok := m["status"].(string); ok {
   149  			lastStatus = status
   150  		}
   151  	}
   152  
   153  	// if the final stream object contained an error, return it
   154  	if errMsg, ok := m["error"]; ok {
   155  		return fmt.Errorf("%v", errMsg)
   156  	}
   157  	return nil
   158  }
   159  
   160  // waitNodeAttachments validates that NetworkAttachments exist on this node
   161  // for every network in use by this task. It blocks until the network
   162  // attachments are ready, or the context times out. If it returns nil, then the
   163  // node's network attachments are all there.
   164  func (c *containerAdapter) waitNodeAttachments(ctx context.Context) error {
   165  	// to do this, we're going to get the attachment store and try getting the
   166  	// IP address for each network. if any network comes back not existing,
   167  	// we'll wait and try again.
   168  	attachmentStore := c.backend.GetAttachmentStore()
   169  	if attachmentStore == nil {
   170  		return fmt.Errorf("error getting attachment store")
   171  	}
   172  
   173  	// essentially, we're long-polling here. this is really sub-optimal, but a
   174  	// better solution based off signaling channels would require a more
   175  	// substantial rearchitecture and probably not be worth our time in terms
   176  	// of performance gains.
   177  	poll := time.NewTicker(nodeAttachmentReadyInterval)
   178  	defer poll.Stop()
   179  	for {
   180  		// set a flag ready to true. if we try to get a network IP that doesn't
   181  		// exist yet, we will set this flag to "false"
   182  		ready := true
   183  		for _, attachment := range c.container.networksAttachments {
   184  			// we only need node attachments (IP address) for overlay networks
   185  			// TODO(dperny): unsure if this will work with other network
   186  			// drivers, but i also don't think other network drivers use the
   187  			// node attachment IP address.
   188  			if attachment.Network.DriverState.Name == "overlay" {
   189  				if _, exists := attachmentStore.GetIPForNetwork(attachment.Network.ID); !exists {
   190  					ready = false
   191  				}
   192  			}
   193  		}
   194  
   195  		// if everything is ready here, then we can just return no error
   196  		if ready {
   197  			return nil
   198  		}
   199  
   200  		// otherwise, try polling again, or wait for context canceled.
   201  		select {
   202  		case <-ctx.Done():
   203  			return fmt.Errorf("node is missing network attachments, ip addresses may be exhausted")
   204  		case <-poll.C:
   205  		}
   206  	}
   207  }
   208  
   209  func (c *containerAdapter) createNetworks(ctx context.Context) error {
   210  	for name := range c.container.networksAttachments {
   211  		ncr, err := c.container.networkCreateRequest(name)
   212  		if err != nil {
   213  			return err
   214  		}
   215  
   216  		if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing
   217  			if _, ok := err.(libnetwork.NetworkNameError); ok {
   218  				continue
   219  			}
   220  			// We will continue if CreateManagedNetwork returns PredefinedNetworkError error.
   221  			// Other callers still can treat it as Error.
   222  			if _, ok := err.(daemon.PredefinedNetworkError); ok {
   223  				continue
   224  			}
   225  			return err
   226  		}
   227  	}
   228  
   229  	return nil
   230  }
   231  
   232  func (c *containerAdapter) removeNetworks(ctx context.Context) error {
   233  	var (
   234  		activeEndpointsError *libnetwork.ActiveEndpointsError
   235  		errNoSuchNetwork     libnetwork.ErrNoSuchNetwork
   236  	)
   237  
   238  	for name, v := range c.container.networksAttachments {
   239  		if err := c.backend.DeleteManagedNetwork(v.Network.ID); err != nil {
   240  			switch {
   241  			case errors.As(err, &activeEndpointsError):
   242  				continue
   243  			case errors.As(err, &errNoSuchNetwork):
   244  				continue
   245  			default:
   246  				swarmlog.G(ctx).Errorf("network %s remove failed: %v", name, err)
   247  				return err
   248  			}
   249  		}
   250  	}
   251  
   252  	return nil
   253  }
   254  
   255  func (c *containerAdapter) networkAttach(ctx context.Context) error {
   256  	config := c.container.createNetworkingConfig(c.backend)
   257  
   258  	var (
   259  		networkName string
   260  		networkID   string
   261  	)
   262  
   263  	if config != nil {
   264  		for n, epConfig := range config.EndpointsConfig {
   265  			networkName = n
   266  			networkID = epConfig.NetworkID
   267  			break
   268  		}
   269  	}
   270  
   271  	return c.backend.UpdateAttachment(networkName, networkID, c.container.networkAttachmentContainerID(), config)
   272  }
   273  
   274  func (c *containerAdapter) waitForDetach(ctx context.Context) error {
   275  	config := c.container.createNetworkingConfig(c.backend)
   276  
   277  	var (
   278  		networkName string
   279  		networkID   string
   280  	)
   281  
   282  	if config != nil {
   283  		for n, epConfig := range config.EndpointsConfig {
   284  			networkName = n
   285  			networkID = epConfig.NetworkID
   286  			break
   287  		}
   288  	}
   289  
   290  	return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.networkAttachmentContainerID())
   291  }
   292  
   293  func (c *containerAdapter) create(ctx context.Context) error {
   294  	var cr containertypes.CreateResponse
   295  	var err error
   296  	if cr, err = c.backend.CreateManagedContainer(ctx, types.ContainerCreateConfig{
   297  		Name:       c.container.name(),
   298  		Config:     c.container.config(),
   299  		HostConfig: c.container.hostConfig(c.dependencies.Volumes()),
   300  		// Use the first network in container create
   301  		NetworkingConfig: c.container.createNetworkingConfig(c.backend),
   302  	}); err != nil {
   303  		return err
   304  	}
   305  
   306  	container := c.container.task.Spec.GetContainer()
   307  	if container == nil {
   308  		return errors.New("unable to get container from task spec")
   309  	}
   310  
   311  	if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil {
   312  		return err
   313  	}
   314  
   315  	// configure secrets
   316  	secretRefs := convert.SecretReferencesFromGRPC(container.Secrets)
   317  	if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil {
   318  		return err
   319  	}
   320  
   321  	configRefs := convert.ConfigReferencesFromGRPC(container.Configs)
   322  	if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil {
   323  		return err
   324  	}
   325  
   326  	return c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig())
   327  }
   328  
   329  // checkMounts ensures that the provided mounts won't have any host-specific
   330  // problems at start up. For example, we disallow bind mounts without an
   331  // existing path, which slightly different from the container API.
   332  func (c *containerAdapter) checkMounts() error {
   333  	spec := c.container.spec()
   334  	for _, mount := range spec.Mounts {
   335  		switch mount.Type {
   336  		case api.MountTypeBind:
   337  			if _, err := os.Stat(mount.Source); os.IsNotExist(err) {
   338  				return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source)
   339  			}
   340  		}
   341  	}
   342  
   343  	return nil
   344  }
   345  
   346  func (c *containerAdapter) start(ctx context.Context) error {
   347  	if err := c.checkMounts(); err != nil {
   348  		return err
   349  	}
   350  
   351  	return c.backend.ContainerStart(ctx, c.container.name(), nil, "", "")
   352  }
   353  
   354  func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
   355  	cs, err := c.backend.ContainerInspectCurrent(ctx, c.container.name(), false)
   356  	if ctx.Err() != nil {
   357  		return types.ContainerJSON{}, ctx.Err()
   358  	}
   359  	if err != nil {
   360  		return types.ContainerJSON{}, err
   361  	}
   362  	return *cs, nil
   363  }
   364  
   365  // events issues a call to the events API and returns a channel with all
   366  // events. The stream of events can be shutdown by cancelling the context.
   367  func (c *containerAdapter) events(ctx context.Context) <-chan events.Message {
   368  	swarmlog.G(ctx).Debugf("waiting on events")
   369  	buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter())
   370  	eventsq := make(chan events.Message, len(buffer))
   371  
   372  	for _, event := range buffer {
   373  		eventsq <- event
   374  	}
   375  
   376  	go func() {
   377  		defer c.backend.UnsubscribeFromEvents(l)
   378  
   379  		for {
   380  			select {
   381  			case ev := <-l:
   382  				jev, ok := ev.(events.Message)
   383  				if !ok {
   384  					swarmlog.G(ctx).Warnf("unexpected event message: %q", ev)
   385  					continue
   386  				}
   387  				select {
   388  				case eventsq <- jev:
   389  				case <-ctx.Done():
   390  					return
   391  				}
   392  			case <-ctx.Done():
   393  				return
   394  			}
   395  		}
   396  	}()
   397  
   398  	return eventsq
   399  }
   400  
   401  func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) {
   402  	return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning)
   403  }
   404  
   405  func (c *containerAdapter) shutdown(ctx context.Context) error {
   406  	options := containertypes.StopOptions{}
   407  	// Default stop grace period to nil (daemon will use the stopTimeout of the container)
   408  	if spec := c.container.spec(); spec.StopGracePeriod != nil {
   409  		timeout := int(spec.StopGracePeriod.Seconds)
   410  		options.Timeout = &timeout
   411  	}
   412  	return c.backend.ContainerStop(ctx, c.container.name(), options)
   413  }
   414  
   415  func (c *containerAdapter) terminate(ctx context.Context) error {
   416  	return c.backend.ContainerKill(c.container.name(), syscall.SIGKILL.String())
   417  }
   418  
   419  func (c *containerAdapter) remove(ctx context.Context) error {
   420  	return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{
   421  		RemoveVolume: true,
   422  		ForceRemove:  true,
   423  	})
   424  }
   425  
   426  func (c *containerAdapter) createVolumes(ctx context.Context) error {
   427  	// Create plugin volumes that are embedded inside a Mount
   428  	for _, mount := range c.container.task.Spec.GetContainer().Mounts {
   429  		mount := mount
   430  		if mount.Type != api.MountTypeVolume {
   431  			continue
   432  		}
   433  
   434  		if mount.VolumeOptions == nil {
   435  			continue
   436  		}
   437  
   438  		if mount.VolumeOptions.DriverConfig == nil {
   439  			continue
   440  		}
   441  
   442  		req := c.container.volumeCreateRequest(&mount)
   443  
   444  		// Check if this volume exists on the engine
   445  		if _, err := c.volumeBackend.Create(ctx, req.Name, req.Driver,
   446  			volumeopts.WithCreateOptions(req.DriverOpts),
   447  			volumeopts.WithCreateLabels(req.Labels),
   448  		); err != nil {
   449  			// TODO(amitshukla): Today, volume create through the engine api does not return an error
   450  			// when the named volume with the same parameters already exists.
   451  			// It returns an error if the driver name is different - that is a valid error
   452  			return err
   453  		}
   454  	}
   455  
   456  	return nil
   457  }
   458  
   459  // waitClusterVolumes blocks until the VolumeGetter returns a path for each
   460  // cluster volume in use by this task
   461  func (c *containerAdapter) waitClusterVolumes(ctx context.Context) error {
   462  	for _, attached := range c.container.task.Volumes {
   463  		// for every attachment, try until we succeed or until the context
   464  		// is canceled.
   465  		for {
   466  			select {
   467  			case <-ctx.Done():
   468  				return ctx.Err()
   469  			default:
   470  				// continue through the code.
   471  			}
   472  			path, err := c.dependencies.Volumes().Get(attached.ID)
   473  			if err == nil && path != "" {
   474  				// break out of the inner-most loop
   475  				break
   476  			}
   477  		}
   478  	}
   479  	swarmlog.G(ctx).Debug("volumes ready")
   480  	return nil
   481  }
   482  
   483  func (c *containerAdapter) activateServiceBinding() error {
   484  	return c.backend.ActivateContainerServiceBinding(c.container.name())
   485  }
   486  
   487  func (c *containerAdapter) deactivateServiceBinding() error {
   488  	return c.backend.DeactivateContainerServiceBinding(c.container.name())
   489  }
   490  
   491  func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) {
   492  	apiOptions := &containertypes.LogsOptions{
   493  		Follow: options.Follow,
   494  
   495  		// Always say yes to Timestamps and Details. we make the decision
   496  		// of whether to return these to the user or not way higher up the
   497  		// stack.
   498  		Timestamps: true,
   499  		Details:    true,
   500  	}
   501  
   502  	if options.Since != nil {
   503  		since, err := gogotypes.TimestampFromProto(options.Since)
   504  		if err != nil {
   505  			return nil, err
   506  		}
   507  		// print since as this formatted string because the docker container
   508  		// logs interface expects it like this.
   509  		// see github.com/Prakhar-Agarwal-byte/moby/api/types/time.ParseTimestamps
   510  		apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond()))
   511  	}
   512  
   513  	if options.Tail < 0 {
   514  		// See protobuf documentation for details of how this works.
   515  		apiOptions.Tail = fmt.Sprint(-options.Tail - 1)
   516  	} else if options.Tail > 0 {
   517  		return nil, errors.New("tail relative to start of logs not supported via docker API")
   518  	}
   519  
   520  	if len(options.Streams) == 0 {
   521  		// empty == all
   522  		apiOptions.ShowStdout, apiOptions.ShowStderr = true, true
   523  	} else {
   524  		for _, stream := range options.Streams {
   525  			switch stream {
   526  			case api.LogStreamStdout:
   527  				apiOptions.ShowStdout = true
   528  			case api.LogStreamStderr:
   529  				apiOptions.ShowStderr = true
   530  			}
   531  		}
   532  	}
   533  	msgs, _, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions)
   534  	if err != nil {
   535  		return nil, err
   536  	}
   537  	return msgs, nil
   538  }
   539  
   540  // todo: typed/wrapped errors
   541  func isContainerCreateNameConflict(err error) bool {
   542  	return strings.Contains(err.Error(), "Conflict. The name")
   543  }
   544  
   545  func isUnknownContainer(err error) bool {
   546  	return strings.Contains(err.Error(), "No such container:")
   547  }
   548  
   549  func isStoppedContainer(err error) bool {
   550  	return strings.Contains(err.Error(), "is already stopped")
   551  }