github.com/rita33cool1/iot-system-gateway@v0.0.0-20200911033302-e65bde238cc5/docker-engine/daemon/cluster/executor/container/adapter.go (about)

     1  package container // import "github.com/docker/docker/daemon/cluster/executor/container"
     2  
     3  import (
     4  	"encoding/base64"
     5  	"encoding/json"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"runtime"
    11  	"strings"
    12  	"syscall"
    13  	"time"
    14  
    15  	"github.com/docker/distribution/reference"
    16  	"github.com/docker/docker/api/types"
    17  	"github.com/docker/docker/api/types/backend"
    18  	containertypes "github.com/docker/docker/api/types/container"
    19  	"github.com/docker/docker/api/types/events"
    20  	containerpkg "github.com/docker/docker/container"
    21  	"github.com/docker/docker/daemon"
    22  	"github.com/docker/docker/daemon/cluster/convert"
    23  	executorpkg "github.com/docker/docker/daemon/cluster/executor"
    24  	"github.com/docker/libnetwork"
    25  	"github.com/docker/swarmkit/agent/exec"
    26  	"github.com/docker/swarmkit/api"
    27  	"github.com/docker/swarmkit/log"
    28  	gogotypes "github.com/gogo/protobuf/types"
    29  	"github.com/opencontainers/go-digest"
    30  	"github.com/sirupsen/logrus"
    31  	"golang.org/x/net/context"
    32  	"golang.org/x/time/rate"
    33  )
    34  
    35  // containerAdapter conducts remote operations for a container. All calls
    36  // are mostly naked calls to the client API, seeded with information from
    37  // containerConfig.
    38  type containerAdapter struct {
    39  	backend      executorpkg.Backend
    40  	container    *containerConfig
    41  	dependencies exec.DependencyGetter
    42  }
    43  
    44  func newContainerAdapter(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) {
    45  	ctnr, err := newContainerConfig(task, node)
    46  	if err != nil {
    47  		return nil, err
    48  	}
    49  
    50  	return &containerAdapter{
    51  		container:    ctnr,
    52  		backend:      b,
    53  		dependencies: dependencies,
    54  	}, nil
    55  }
    56  
    57  func (c *containerAdapter) pullImage(ctx context.Context) error {
    58  	spec := c.container.spec()
    59  
    60  	// Skip pulling if the image is referenced by image ID.
    61  	if _, err := digest.Parse(spec.Image); err == nil {
    62  		return nil
    63  	}
    64  
    65  	// Skip pulling if the image is referenced by digest and already
    66  	// exists locally.
    67  	named, err := reference.ParseNormalizedNamed(spec.Image)
    68  	if err == nil {
    69  		if _, ok := named.(reference.Canonical); ok {
    70  			_, err := c.backend.LookupImage(spec.Image)
    71  			if err == nil {
    72  				return nil
    73  			}
    74  		}
    75  	}
    76  
    77  	// if the image needs to be pulled, the auth config will be retrieved and updated
    78  	var encodedAuthConfig string
    79  	if spec.PullOptions != nil {
    80  		encodedAuthConfig = spec.PullOptions.RegistryAuth
    81  	}
    82  
    83  	authConfig := &types.AuthConfig{}
    84  	if encodedAuthConfig != "" {
    85  		if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil {
    86  			logrus.Warnf("invalid authconfig: %v", err)
    87  		}
    88  	}
    89  
    90  	pr, pw := io.Pipe()
    91  	metaHeaders := map[string][]string{}
    92  	go func() {
    93  		// TODO @jhowardmsft LCOW Support: This will need revisiting as
    94  		// the stack is built up to include LCOW support for swarm.
    95  		platform := runtime.GOOS
    96  		err := c.backend.PullImage(ctx, c.container.image(), "", platform, metaHeaders, authConfig, pw)
    97  		pw.CloseWithError(err)
    98  	}()
    99  
   100  	dec := json.NewDecoder(pr)
   101  	dec.UseNumber()
   102  	m := map[string]interface{}{}
   103  	spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1)
   104  
   105  	lastStatus := ""
   106  	for {
   107  		if err := dec.Decode(&m); err != nil {
   108  			if err == io.EOF {
   109  				break
   110  			}
   111  			return err
   112  		}
   113  		l := log.G(ctx)
   114  		// limit pull progress logs unless the status changes
   115  		if spamLimiter.Allow() || lastStatus != m["status"] {
   116  			// if we have progress details, we have everything we need
   117  			if progress, ok := m["progressDetail"].(map[string]interface{}); ok {
   118  				// first, log the image and status
   119  				l = l.WithFields(logrus.Fields{
   120  					"image":  c.container.image(),
   121  					"status": m["status"],
   122  				})
   123  				// then, if we have progress, log the progress
   124  				if progress["current"] != nil && progress["total"] != nil {
   125  					l = l.WithFields(logrus.Fields{
   126  						"current": progress["current"],
   127  						"total":   progress["total"],
   128  					})
   129  				}
   130  			}
   131  			l.Debug("pull in progress")
   132  		}
   133  		// sometimes, we get no useful information at all, and add no fields
   134  		if status, ok := m["status"].(string); ok {
   135  			lastStatus = status
   136  		}
   137  	}
   138  
   139  	// if the final stream object contained an error, return it
   140  	if errMsg, ok := m["error"]; ok {
   141  		return fmt.Errorf("%v", errMsg)
   142  	}
   143  	return nil
   144  }
   145  
   146  func (c *containerAdapter) createNetworks(ctx context.Context) error {
   147  	for name := range c.container.networksAttachments {
   148  		ncr, err := c.container.networkCreateRequest(name)
   149  		if err != nil {
   150  			return err
   151  		}
   152  
   153  		if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing
   154  			if _, ok := err.(libnetwork.NetworkNameError); ok {
   155  				continue
   156  			}
   157  			// We will continue if CreateManagedNetwork returns PredefinedNetworkError error.
   158  			// Other callers still can treat it as Error.
   159  			if _, ok := err.(daemon.PredefinedNetworkError); ok {
   160  				continue
   161  			}
   162  			return err
   163  		}
   164  	}
   165  
   166  	return nil
   167  }
   168  
   169  func (c *containerAdapter) removeNetworks(ctx context.Context) error {
   170  	for name, v := range c.container.networksAttachments {
   171  		if err := c.backend.DeleteManagedNetwork(v.Network.ID); err != nil {
   172  			switch err.(type) {
   173  			case *libnetwork.ActiveEndpointsError:
   174  				continue
   175  			case libnetwork.ErrNoSuchNetwork:
   176  				continue
   177  			default:
   178  				log.G(ctx).Errorf("network %s remove failed: %v", name, err)
   179  				return err
   180  			}
   181  		}
   182  	}
   183  
   184  	return nil
   185  }
   186  
   187  func (c *containerAdapter) networkAttach(ctx context.Context) error {
   188  	config := c.container.createNetworkingConfig(c.backend)
   189  
   190  	var (
   191  		networkName string
   192  		networkID   string
   193  	)
   194  
   195  	if config != nil {
   196  		for n, epConfig := range config.EndpointsConfig {
   197  			networkName = n
   198  			networkID = epConfig.NetworkID
   199  			break
   200  		}
   201  	}
   202  
   203  	return c.backend.UpdateAttachment(networkName, networkID, c.container.networkAttachmentContainerID(), config)
   204  }
   205  
   206  func (c *containerAdapter) waitForDetach(ctx context.Context) error {
   207  	config := c.container.createNetworkingConfig(c.backend)
   208  
   209  	var (
   210  		networkName string
   211  		networkID   string
   212  	)
   213  
   214  	if config != nil {
   215  		for n, epConfig := range config.EndpointsConfig {
   216  			networkName = n
   217  			networkID = epConfig.NetworkID
   218  			break
   219  		}
   220  	}
   221  
   222  	return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.networkAttachmentContainerID())
   223  }
   224  
   225  func (c *containerAdapter) create(ctx context.Context) error {
   226  	var cr containertypes.ContainerCreateCreatedBody
   227  	var err error
   228  	if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{
   229  		Name:       c.container.name(),
   230  		Config:     c.container.config(),
   231  		HostConfig: c.container.hostConfig(),
   232  		// Use the first network in container create
   233  		NetworkingConfig: c.container.createNetworkingConfig(c.backend),
   234  	}); err != nil {
   235  		return err
   236  	}
   237  
   238  	// Docker daemon currently doesn't support multiple networks in container create
   239  	// Connect to all other networks
   240  	nc := c.container.connectNetworkingConfig(c.backend)
   241  
   242  	if nc != nil {
   243  		for n, ep := range nc.EndpointsConfig {
   244  			if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil {
   245  				return err
   246  			}
   247  		}
   248  	}
   249  
   250  	container := c.container.task.Spec.GetContainer()
   251  	if container == nil {
   252  		return errors.New("unable to get container from task spec")
   253  	}
   254  
   255  	if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil {
   256  		return err
   257  	}
   258  
   259  	// configure secrets
   260  	secretRefs := convert.SecretReferencesFromGRPC(container.Secrets)
   261  	if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil {
   262  		return err
   263  	}
   264  
   265  	configRefs := convert.ConfigReferencesFromGRPC(container.Configs)
   266  	if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil {
   267  		return err
   268  	}
   269  
   270  	return c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig())
   271  }
   272  
   273  // checkMounts ensures that the provided mounts won't have any host-specific
   274  // problems at start up. For example, we disallow bind mounts without an
   275  // existing path, which slightly different from the container API.
   276  func (c *containerAdapter) checkMounts() error {
   277  	spec := c.container.spec()
   278  	for _, mount := range spec.Mounts {
   279  		switch mount.Type {
   280  		case api.MountTypeBind:
   281  			if _, err := os.Stat(mount.Source); os.IsNotExist(err) {
   282  				return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source)
   283  			}
   284  		}
   285  	}
   286  
   287  	return nil
   288  }
   289  
   290  func (c *containerAdapter) start(ctx context.Context) error {
   291  	if err := c.checkMounts(); err != nil {
   292  		return err
   293  	}
   294  
   295  	return c.backend.ContainerStart(c.container.name(), nil, "", "")
   296  }
   297  
   298  func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
   299  	cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false)
   300  	if ctx.Err() != nil {
   301  		return types.ContainerJSON{}, ctx.Err()
   302  	}
   303  	if err != nil {
   304  		return types.ContainerJSON{}, err
   305  	}
   306  	return *cs, nil
   307  }
   308  
   309  // events issues a call to the events API and returns a channel with all
   310  // events. The stream of events can be shutdown by cancelling the context.
   311  func (c *containerAdapter) events(ctx context.Context) <-chan events.Message {
   312  	log.G(ctx).Debugf("waiting on events")
   313  	buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter())
   314  	eventsq := make(chan events.Message, len(buffer))
   315  
   316  	for _, event := range buffer {
   317  		eventsq <- event
   318  	}
   319  
   320  	go func() {
   321  		defer c.backend.UnsubscribeFromEvents(l)
   322  
   323  		for {
   324  			select {
   325  			case ev := <-l:
   326  				jev, ok := ev.(events.Message)
   327  				if !ok {
   328  					log.G(ctx).Warnf("unexpected event message: %q", ev)
   329  					continue
   330  				}
   331  				select {
   332  				case eventsq <- jev:
   333  				case <-ctx.Done():
   334  					return
   335  				}
   336  			case <-ctx.Done():
   337  				return
   338  			}
   339  		}
   340  	}()
   341  
   342  	return eventsq
   343  }
   344  
   345  func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) {
   346  	return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning)
   347  }
   348  
   349  func (c *containerAdapter) shutdown(ctx context.Context) error {
   350  	// Default stop grace period to nil (daemon will use the stopTimeout of the container)
   351  	var stopgrace *int
   352  	spec := c.container.spec()
   353  	if spec.StopGracePeriod != nil {
   354  		stopgraceValue := int(spec.StopGracePeriod.Seconds)
   355  		stopgrace = &stopgraceValue
   356  	}
   357  	return c.backend.ContainerStop(c.container.name(), stopgrace)
   358  }
   359  
   360  func (c *containerAdapter) terminate(ctx context.Context) error {
   361  	return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL))
   362  }
   363  
   364  func (c *containerAdapter) remove(ctx context.Context) error {
   365  	return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{
   366  		RemoveVolume: true,
   367  		ForceRemove:  true,
   368  	})
   369  }
   370  
   371  func (c *containerAdapter) createVolumes(ctx context.Context) error {
   372  	// Create plugin volumes that are embedded inside a Mount
   373  	for _, mount := range c.container.task.Spec.GetContainer().Mounts {
   374  		if mount.Type != api.MountTypeVolume {
   375  			continue
   376  		}
   377  
   378  		if mount.VolumeOptions == nil {
   379  			continue
   380  		}
   381  
   382  		if mount.VolumeOptions.DriverConfig == nil {
   383  			continue
   384  		}
   385  
   386  		req := c.container.volumeCreateRequest(&mount)
   387  
   388  		// Check if this volume exists on the engine
   389  		if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil {
   390  			// TODO(amitshukla): Today, volume create through the engine api does not return an error
   391  			// when the named volume with the same parameters already exists.
   392  			// It returns an error if the driver name is different - that is a valid error
   393  			return err
   394  		}
   395  
   396  	}
   397  
   398  	return nil
   399  }
   400  
   401  func (c *containerAdapter) activateServiceBinding() error {
   402  	return c.backend.ActivateContainerServiceBinding(c.container.name())
   403  }
   404  
   405  func (c *containerAdapter) deactivateServiceBinding() error {
   406  	return c.backend.DeactivateContainerServiceBinding(c.container.name())
   407  }
   408  
   409  func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) {
   410  	apiOptions := &types.ContainerLogsOptions{
   411  		Follow: options.Follow,
   412  
   413  		// Always say yes to Timestamps and Details. we make the decision
   414  		// of whether to return these to the user or not way higher up the
   415  		// stack.
   416  		Timestamps: true,
   417  		Details:    true,
   418  	}
   419  
   420  	if options.Since != nil {
   421  		since, err := gogotypes.TimestampFromProto(options.Since)
   422  		if err != nil {
   423  			return nil, err
   424  		}
   425  		// print since as this formatted string because the docker container
   426  		// logs interface expects it like this.
   427  		// see github.com/docker/docker/api/types/time.ParseTimestamps
   428  		apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond()))
   429  	}
   430  
   431  	if options.Tail < 0 {
   432  		// See protobuf documentation for details of how this works.
   433  		apiOptions.Tail = fmt.Sprint(-options.Tail - 1)
   434  	} else if options.Tail > 0 {
   435  		return nil, errors.New("tail relative to start of logs not supported via docker API")
   436  	}
   437  
   438  	if len(options.Streams) == 0 {
   439  		// empty == all
   440  		apiOptions.ShowStdout, apiOptions.ShowStderr = true, true
   441  	} else {
   442  		for _, stream := range options.Streams {
   443  			switch stream {
   444  			case api.LogStreamStdout:
   445  				apiOptions.ShowStdout = true
   446  			case api.LogStreamStderr:
   447  				apiOptions.ShowStderr = true
   448  			}
   449  		}
   450  	}
   451  	msgs, _, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions)
   452  	if err != nil {
   453  		return nil, err
   454  	}
   455  	return msgs, nil
   456  }
   457  
   458  // todo: typed/wrapped errors
   459  func isContainerCreateNameConflict(err error) bool {
   460  	return strings.Contains(err.Error(), "Conflict. The name")
   461  }
   462  
   463  func isUnknownContainer(err error) bool {
   464  	return strings.Contains(err.Error(), "No such container:")
   465  }
   466  
   467  func isStoppedContainer(err error) bool {
   468  	return strings.Contains(err.Error(), "is already stopped")
   469  }