github.com/sams1990/dockerrepo@v17.12.1-ce-rc2+incompatible/daemon/cluster/executor/container/adapter.go (about)

     1  package container
     2  
     3  import (
     4  	"encoding/base64"
     5  	"encoding/json"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"runtime"
    11  	"strings"
    12  	"syscall"
    13  	"time"
    14  
    15  	"github.com/docker/distribution/reference"
    16  	"github.com/docker/docker/api/types"
    17  	"github.com/docker/docker/api/types/backend"
    18  	containertypes "github.com/docker/docker/api/types/container"
    19  	"github.com/docker/docker/api/types/events"
    20  	containerpkg "github.com/docker/docker/container"
    21  	"github.com/docker/docker/daemon/cluster/convert"
    22  	executorpkg "github.com/docker/docker/daemon/cluster/executor"
    23  	"github.com/docker/libnetwork"
    24  	"github.com/docker/swarmkit/agent/exec"
    25  	"github.com/docker/swarmkit/api"
    26  	"github.com/docker/swarmkit/log"
    27  	gogotypes "github.com/gogo/protobuf/types"
    28  	"github.com/opencontainers/go-digest"
    29  	"github.com/sirupsen/logrus"
    30  	"golang.org/x/net/context"
    31  	"golang.org/x/time/rate"
    32  )
    33  
    34  // containerAdapter conducts remote operations for a container. All calls
    35  // are mostly naked calls to the client API, seeded with information from
    36  // containerConfig.
    37  type containerAdapter struct {
    38  	backend      executorpkg.Backend
    39  	container    *containerConfig
    40  	dependencies exec.DependencyGetter
    41  }
    42  
    43  func newContainerAdapter(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) {
    44  	ctnr, err := newContainerConfig(task, node)
    45  	if err != nil {
    46  		return nil, err
    47  	}
    48  
    49  	return &containerAdapter{
    50  		container:    ctnr,
    51  		backend:      b,
    52  		dependencies: dependencies,
    53  	}, nil
    54  }
    55  
    56  func (c *containerAdapter) pullImage(ctx context.Context) error {
    57  	spec := c.container.spec()
    58  
    59  	// Skip pulling if the image is referenced by image ID.
    60  	if _, err := digest.Parse(spec.Image); err == nil {
    61  		return nil
    62  	}
    63  
    64  	// Skip pulling if the image is referenced by digest and already
    65  	// exists locally.
    66  	named, err := reference.ParseNormalizedNamed(spec.Image)
    67  	if err == nil {
    68  		if _, ok := named.(reference.Canonical); ok {
    69  			_, err := c.backend.LookupImage(spec.Image)
    70  			if err == nil {
    71  				return nil
    72  			}
    73  		}
    74  	}
    75  
    76  	// if the image needs to be pulled, the auth config will be retrieved and updated
    77  	var encodedAuthConfig string
    78  	if spec.PullOptions != nil {
    79  		encodedAuthConfig = spec.PullOptions.RegistryAuth
    80  	}
    81  
    82  	authConfig := &types.AuthConfig{}
    83  	if encodedAuthConfig != "" {
    84  		if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil {
    85  			logrus.Warnf("invalid authconfig: %v", err)
    86  		}
    87  	}
    88  
    89  	pr, pw := io.Pipe()
    90  	metaHeaders := map[string][]string{}
    91  	go func() {
    92  		// TODO @jhowardmsft LCOW Support: This will need revisiting as
    93  		// the stack is built up to include LCOW support for swarm.
    94  		platform := runtime.GOOS
    95  		err := c.backend.PullImage(ctx, c.container.image(), "", platform, metaHeaders, authConfig, pw)
    96  		pw.CloseWithError(err)
    97  	}()
    98  
    99  	dec := json.NewDecoder(pr)
   100  	dec.UseNumber()
   101  	m := map[string]interface{}{}
   102  	spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1)
   103  
   104  	lastStatus := ""
   105  	for {
   106  		if err := dec.Decode(&m); err != nil {
   107  			if err == io.EOF {
   108  				break
   109  			}
   110  			return err
   111  		}
   112  		l := log.G(ctx)
   113  		// limit pull progress logs unless the status changes
   114  		if spamLimiter.Allow() || lastStatus != m["status"] {
   115  			// if we have progress details, we have everything we need
   116  			if progress, ok := m["progressDetail"].(map[string]interface{}); ok {
   117  				// first, log the image and status
   118  				l = l.WithFields(logrus.Fields{
   119  					"image":  c.container.image(),
   120  					"status": m["status"],
   121  				})
   122  				// then, if we have progress, log the progress
   123  				if progress["current"] != nil && progress["total"] != nil {
   124  					l = l.WithFields(logrus.Fields{
   125  						"current": progress["current"],
   126  						"total":   progress["total"],
   127  					})
   128  				}
   129  			}
   130  			l.Debug("pull in progress")
   131  		}
   132  		// sometimes, we get no useful information at all, and add no fields
   133  		if status, ok := m["status"].(string); ok {
   134  			lastStatus = status
   135  		}
   136  	}
   137  
   138  	// if the final stream object contained an error, return it
   139  	if errMsg, ok := m["error"]; ok {
   140  		return fmt.Errorf("%v", errMsg)
   141  	}
   142  	return nil
   143  }
   144  
   145  func (c *containerAdapter) createNetworks(ctx context.Context) error {
   146  	for _, network := range c.container.networks() {
   147  		ncr, err := c.container.networkCreateRequest(network)
   148  		if err != nil {
   149  			return err
   150  		}
   151  
   152  		if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing
   153  			if _, ok := err.(libnetwork.NetworkNameError); ok {
   154  				continue
   155  			}
   156  
   157  			return err
   158  		}
   159  	}
   160  
   161  	return nil
   162  }
   163  
   164  func (c *containerAdapter) removeNetworks(ctx context.Context) error {
   165  	for _, nid := range c.container.networks() {
   166  		if err := c.backend.DeleteManagedNetwork(nid); err != nil {
   167  			switch err.(type) {
   168  			case *libnetwork.ActiveEndpointsError:
   169  				continue
   170  			case libnetwork.ErrNoSuchNetwork:
   171  				continue
   172  			default:
   173  				log.G(ctx).Errorf("network %s remove failed: %v", nid, err)
   174  				return err
   175  			}
   176  		}
   177  	}
   178  
   179  	return nil
   180  }
   181  
   182  func (c *containerAdapter) networkAttach(ctx context.Context) error {
   183  	config := c.container.createNetworkingConfig(c.backend)
   184  
   185  	var (
   186  		networkName string
   187  		networkID   string
   188  	)
   189  
   190  	if config != nil {
   191  		for n, epConfig := range config.EndpointsConfig {
   192  			networkName = n
   193  			networkID = epConfig.NetworkID
   194  			break
   195  		}
   196  	}
   197  
   198  	return c.backend.UpdateAttachment(networkName, networkID, c.container.networkAttachmentContainerID(), config)
   199  }
   200  
   201  func (c *containerAdapter) waitForDetach(ctx context.Context) error {
   202  	config := c.container.createNetworkingConfig(c.backend)
   203  
   204  	var (
   205  		networkName string
   206  		networkID   string
   207  	)
   208  
   209  	if config != nil {
   210  		for n, epConfig := range config.EndpointsConfig {
   211  			networkName = n
   212  			networkID = epConfig.NetworkID
   213  			break
   214  		}
   215  	}
   216  
   217  	return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.networkAttachmentContainerID())
   218  }
   219  
   220  func (c *containerAdapter) create(ctx context.Context) error {
   221  	var cr containertypes.ContainerCreateCreatedBody
   222  	var err error
   223  	if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{
   224  		Name:       c.container.name(),
   225  		Config:     c.container.config(),
   226  		HostConfig: c.container.hostConfig(),
   227  		// Use the first network in container create
   228  		NetworkingConfig: c.container.createNetworkingConfig(c.backend),
   229  	}); err != nil {
   230  		return err
   231  	}
   232  
   233  	// Docker daemon currently doesn't support multiple networks in container create
   234  	// Connect to all other networks
   235  	nc := c.container.connectNetworkingConfig(c.backend)
   236  
   237  	if nc != nil {
   238  		for n, ep := range nc.EndpointsConfig {
   239  			if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil {
   240  				return err
   241  			}
   242  		}
   243  	}
   244  
   245  	container := c.container.task.Spec.GetContainer()
   246  	if container == nil {
   247  		return errors.New("unable to get container from task spec")
   248  	}
   249  
   250  	if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil {
   251  		return err
   252  	}
   253  
   254  	// configure secrets
   255  	secretRefs := convert.SecretReferencesFromGRPC(container.Secrets)
   256  	if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil {
   257  		return err
   258  	}
   259  
   260  	configRefs := convert.ConfigReferencesFromGRPC(container.Configs)
   261  	if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil {
   262  		return err
   263  	}
   264  
   265  	if err := c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil {
   266  		return err
   267  	}
   268  
   269  	return nil
   270  }
   271  
   272  // checkMounts ensures that the provided mounts won't have any host-specific
   273  // problems at start up. For example, we disallow bind mounts without an
   274  // existing path, which slightly different from the container API.
   275  func (c *containerAdapter) checkMounts() error {
   276  	spec := c.container.spec()
   277  	for _, mount := range spec.Mounts {
   278  		switch mount.Type {
   279  		case api.MountTypeBind:
   280  			if _, err := os.Stat(mount.Source); os.IsNotExist(err) {
   281  				return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source)
   282  			}
   283  		}
   284  	}
   285  
   286  	return nil
   287  }
   288  
   289  func (c *containerAdapter) start(ctx context.Context) error {
   290  	if err := c.checkMounts(); err != nil {
   291  		return err
   292  	}
   293  
   294  	return c.backend.ContainerStart(c.container.name(), nil, "", "")
   295  }
   296  
   297  func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
   298  	cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false)
   299  	if ctx.Err() != nil {
   300  		return types.ContainerJSON{}, ctx.Err()
   301  	}
   302  	if err != nil {
   303  		return types.ContainerJSON{}, err
   304  	}
   305  	return *cs, nil
   306  }
   307  
   308  // events issues a call to the events API and returns a channel with all
   309  // events. The stream of events can be shutdown by cancelling the context.
   310  func (c *containerAdapter) events(ctx context.Context) <-chan events.Message {
   311  	log.G(ctx).Debugf("waiting on events")
   312  	buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter())
   313  	eventsq := make(chan events.Message, len(buffer))
   314  
   315  	for _, event := range buffer {
   316  		eventsq <- event
   317  	}
   318  
   319  	go func() {
   320  		defer c.backend.UnsubscribeFromEvents(l)
   321  
   322  		for {
   323  			select {
   324  			case ev := <-l:
   325  				jev, ok := ev.(events.Message)
   326  				if !ok {
   327  					log.G(ctx).Warnf("unexpected event message: %q", ev)
   328  					continue
   329  				}
   330  				select {
   331  				case eventsq <- jev:
   332  				case <-ctx.Done():
   333  					return
   334  				}
   335  			case <-ctx.Done():
   336  				return
   337  			}
   338  		}
   339  	}()
   340  
   341  	return eventsq
   342  }
   343  
   344  func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) {
   345  	return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning)
   346  }
   347  
   348  func (c *containerAdapter) shutdown(ctx context.Context) error {
   349  	// Default stop grace period to nil (daemon will use the stopTimeout of the container)
   350  	var stopgrace *int
   351  	spec := c.container.spec()
   352  	if spec.StopGracePeriod != nil {
   353  		stopgraceValue := int(spec.StopGracePeriod.Seconds)
   354  		stopgrace = &stopgraceValue
   355  	}
   356  	return c.backend.ContainerStop(c.container.name(), stopgrace)
   357  }
   358  
   359  func (c *containerAdapter) terminate(ctx context.Context) error {
   360  	return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL))
   361  }
   362  
   363  func (c *containerAdapter) remove(ctx context.Context) error {
   364  	return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{
   365  		RemoveVolume: true,
   366  		ForceRemove:  true,
   367  	})
   368  }
   369  
   370  func (c *containerAdapter) createVolumes(ctx context.Context) error {
   371  	// Create plugin volumes that are embedded inside a Mount
   372  	for _, mount := range c.container.task.Spec.GetContainer().Mounts {
   373  		if mount.Type != api.MountTypeVolume {
   374  			continue
   375  		}
   376  
   377  		if mount.VolumeOptions == nil {
   378  			continue
   379  		}
   380  
   381  		if mount.VolumeOptions.DriverConfig == nil {
   382  			continue
   383  		}
   384  
   385  		req := c.container.volumeCreateRequest(&mount)
   386  
   387  		// Check if this volume exists on the engine
   388  		if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil {
   389  			// TODO(amitshukla): Today, volume create through the engine api does not return an error
   390  			// when the named volume with the same parameters already exists.
   391  			// It returns an error if the driver name is different - that is a valid error
   392  			return err
   393  		}
   394  
   395  	}
   396  
   397  	return nil
   398  }
   399  
   400  func (c *containerAdapter) activateServiceBinding() error {
   401  	return c.backend.ActivateContainerServiceBinding(c.container.name())
   402  }
   403  
   404  func (c *containerAdapter) deactivateServiceBinding() error {
   405  	return c.backend.DeactivateContainerServiceBinding(c.container.name())
   406  }
   407  
   408  func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) {
   409  	apiOptions := &types.ContainerLogsOptions{
   410  		Follow: options.Follow,
   411  
   412  		// Always say yes to Timestamps and Details. we make the decision
   413  		// of whether to return these to the user or not way higher up the
   414  		// stack.
   415  		Timestamps: true,
   416  		Details:    true,
   417  	}
   418  
   419  	if options.Since != nil {
   420  		since, err := gogotypes.TimestampFromProto(options.Since)
   421  		if err != nil {
   422  			return nil, err
   423  		}
   424  		// print since as this formatted string because the docker container
   425  		// logs interface expects it like this.
   426  		// see github.com/docker/docker/api/types/time.ParseTimestamps
   427  		apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond()))
   428  	}
   429  
   430  	if options.Tail < 0 {
   431  		// See protobuf documentation for details of how this works.
   432  		apiOptions.Tail = fmt.Sprint(-options.Tail - 1)
   433  	} else if options.Tail > 0 {
   434  		return nil, errors.New("tail relative to start of logs not supported via docker API")
   435  	}
   436  
   437  	if len(options.Streams) == 0 {
   438  		// empty == all
   439  		apiOptions.ShowStdout, apiOptions.ShowStderr = true, true
   440  	} else {
   441  		for _, stream := range options.Streams {
   442  			switch stream {
   443  			case api.LogStreamStdout:
   444  				apiOptions.ShowStdout = true
   445  			case api.LogStreamStderr:
   446  				apiOptions.ShowStderr = true
   447  			}
   448  		}
   449  	}
   450  	msgs, _, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions)
   451  	if err != nil {
   452  		return nil, err
   453  	}
   454  	return msgs, nil
   455  }
   456  
   457  // todo: typed/wrapped errors
   458  func isContainerCreateNameConflict(err error) bool {
   459  	return strings.Contains(err.Error(), "Conflict. The name")
   460  }
   461  
   462  func isUnknownContainer(err error) bool {
   463  	return strings.Contains(err.Error(), "No such container:")
   464  }
   465  
   466  func isStoppedContainer(err error) bool {
   467  	return strings.Contains(err.Error(), "is already stopped")
   468  }