github.com/kaisenlinux/docker@v0.0.0-20230510090727-ea55db55fac7/swarmkit/agent/exec/dockerapi/adapter.go (about)

     1  package dockerapi
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"strings"
     9  	"time"
    10  
    11  	"github.com/docker/docker/api/types"
    12  	"github.com/docker/docker/api/types/events"
    13  	engineapi "github.com/docker/docker/client"
    14  	"github.com/docker/swarmkit/agent/exec"
    15  	"github.com/docker/swarmkit/api"
    16  	"github.com/docker/swarmkit/log"
    17  	gogotypes "github.com/gogo/protobuf/types"
    18  	"github.com/pkg/errors"
    19  	"github.com/sirupsen/logrus"
    20  	"golang.org/x/time/rate"
    21  )
    22  
    23  // containerAdapter conducts remote operations for a container. All calls
    24  // are mostly naked calls to the client API, seeded with information from
    25  // containerConfig.
    26  type containerAdapter struct {
    27  	client    engineapi.APIClient
    28  	container *containerConfig
    29  	secrets   exec.SecretGetter
    30  }
    31  
    32  func newContainerAdapter(client engineapi.APIClient, nodeDescription *api.NodeDescription, task *api.Task, secrets exec.SecretGetter) (*containerAdapter, error) {
    33  	ctnr, err := newContainerConfig(nodeDescription, task)
    34  	if err != nil {
    35  		return nil, err
    36  	}
    37  
    38  	return &containerAdapter{
    39  		client:    client,
    40  		container: ctnr,
    41  		secrets:   secrets,
    42  	}, nil
    43  }
    44  
    45  func noopPrivilegeFn() (string, error) { return "", nil }
    46  
    47  func (c *containerConfig) imagePullOptions() types.ImagePullOptions {
    48  	var registryAuth string
    49  
    50  	if c.spec().PullOptions != nil {
    51  		registryAuth = c.spec().PullOptions.RegistryAuth
    52  	}
    53  
    54  	return types.ImagePullOptions{
    55  		// if the image needs to be pulled, the auth config will be retrieved and updated
    56  		RegistryAuth:  registryAuth,
    57  		PrivilegeFunc: noopPrivilegeFn,
    58  	}
    59  }
    60  
    61  func (c *containerAdapter) pullImage(ctx context.Context) error {
    62  	rc, err := c.client.ImagePull(ctx, c.container.image(), c.container.imagePullOptions())
    63  	if err != nil {
    64  		return err
    65  	}
    66  
    67  	dec := json.NewDecoder(rc)
    68  	dec.UseNumber()
    69  	m := map[string]interface{}{}
    70  	spamLimiter := rate.NewLimiter(rate.Every(1000*time.Millisecond), 1)
    71  
    72  	lastStatus := ""
    73  	for {
    74  		if err := dec.Decode(&m); err != nil {
    75  			if err == io.EOF {
    76  				break
    77  			}
    78  			return err
    79  		}
    80  		l := log.G(ctx)
    81  		// limit pull progress logs unless the status changes
    82  		if spamLimiter.Allow() || lastStatus != m["status"] {
    83  			// if we have progress details, we have everything we need
    84  			if progress, ok := m["progressDetail"].(map[string]interface{}); ok {
    85  				// first, log the image and status
    86  				l = l.WithFields(logrus.Fields{
    87  					"image":  c.container.image(),
    88  					"status": m["status"],
    89  				})
    90  				// then, if we have progress, log the progress
    91  				if progress["current"] != nil && progress["total"] != nil {
    92  					l = l.WithFields(logrus.Fields{
    93  						"current": progress["current"],
    94  						"total":   progress["total"],
    95  					})
    96  				}
    97  			}
    98  			l.Debug("pull in progress")
    99  		}
   100  		// sometimes, we get no useful information at all, and add no fields
   101  		if status, ok := m["status"].(string); ok {
   102  			lastStatus = status
   103  		}
   104  	}
   105  	// if the final stream object contained an error, return it
   106  	if errMsg, ok := m["error"]; ok {
   107  		return errors.Errorf("%v", errMsg)
   108  	}
   109  	return nil
   110  }
   111  
   112  func (c *containerAdapter) createNetworks(ctx context.Context) error {
   113  	for _, network := range c.container.networks() {
   114  		opts, err := c.container.networkCreateOptions(network)
   115  		if err != nil {
   116  			return err
   117  		}
   118  
   119  		if _, err := c.client.NetworkCreate(ctx, network, opts); err != nil {
   120  			if isNetworkExistError(err, network) {
   121  				continue
   122  			}
   123  
   124  			return err
   125  		}
   126  	}
   127  
   128  	return nil
   129  }
   130  
   131  func (c *containerAdapter) removeNetworks(ctx context.Context) error {
   132  	for _, nid := range c.container.networks() {
   133  		if err := c.client.NetworkRemove(ctx, nid); err != nil {
   134  			if isActiveEndpointError(err) {
   135  				continue
   136  			}
   137  
   138  			log.G(ctx).Errorf("network %s remove failed", nid)
   139  			return err
   140  		}
   141  	}
   142  
   143  	return nil
   144  }
   145  
   146  func (c *containerAdapter) create(ctx context.Context) error {
   147  	_, err := c.client.ContainerCreate(ctx,
   148  		c.container.config(),
   149  		c.container.hostConfig(),
   150  		c.container.networkingConfig(),
   151  		c.container.name())
   152  
   153  	return err
   154  }
   155  
   156  func (c *containerAdapter) start(ctx context.Context) error {
   157  	// TODO(nishanttotla): Consider adding checkpoint handling later
   158  	return c.client.ContainerStart(ctx, c.container.name(), types.ContainerStartOptions{})
   159  }
   160  
   161  func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
   162  	return c.client.ContainerInspect(ctx, c.container.name())
   163  }
   164  
   165  // events issues a call to the events API and returns a channel with all
   166  // events. The stream of events can be shutdown by cancelling the context.
   167  //
   168  // A chan struct{} is returned that will be closed if the event processing
   169  // fails and needs to be restarted.
   170  func (c *containerAdapter) events(ctx context.Context) (<-chan events.Message, <-chan struct{}, error) {
   171  	// TODO(stevvooe): Move this to a single, global event dispatch. For
   172  	// now, we create a connection per container.
   173  	var (
   174  		eventsq = make(chan events.Message)
   175  		closed  = make(chan struct{})
   176  	)
   177  
   178  	log.G(ctx).Debugf("waiting on events")
   179  	// TODO(stevvooe): For long running tasks, it is likely that we will have
   180  	// to restart this under failure.
   181  	eventCh, errCh := c.client.Events(ctx, types.EventsOptions{
   182  		Since:   "0",
   183  		Filters: c.container.eventFilter(),
   184  	})
   185  
   186  	go func() {
   187  		defer close(closed)
   188  
   189  		for {
   190  			select {
   191  			case msg := <-eventCh:
   192  				select {
   193  				case eventsq <- msg:
   194  				case <-ctx.Done():
   195  					return
   196  				}
   197  			case err := <-errCh:
   198  				log.G(ctx).WithError(err).Error("error from events stream")
   199  				return
   200  			case <-ctx.Done():
   201  				// exit
   202  				return
   203  			}
   204  		}
   205  	}()
   206  
   207  	return eventsq, closed, nil
   208  }
   209  
   210  func (c *containerAdapter) shutdown(ctx context.Context) error {
   211  	// Default stop grace period to 10s.
   212  	stopgrace := 10 * time.Second
   213  	spec := c.container.spec()
   214  	if spec.StopGracePeriod != nil {
   215  		stopgrace, _ = gogotypes.DurationFromProto(spec.StopGracePeriod)
   216  	}
   217  	return c.client.ContainerStop(ctx, c.container.name(), &stopgrace)
   218  }
   219  
   220  func (c *containerAdapter) terminate(ctx context.Context) error {
   221  	return c.client.ContainerKill(ctx, c.container.name(), "")
   222  }
   223  
   224  func (c *containerAdapter) remove(ctx context.Context) error {
   225  	return c.client.ContainerRemove(ctx, c.container.name(), types.ContainerRemoveOptions{
   226  		RemoveVolumes: true,
   227  		Force:         true,
   228  	})
   229  }
   230  
   231  func (c *containerAdapter) createVolumes(ctx context.Context) error {
   232  	// Create plugin volumes that are embedded inside a Mount
   233  	for _, mount := range c.container.spec().Mounts {
   234  		if mount.Type != api.MountTypeVolume {
   235  			continue
   236  		}
   237  
   238  		// we create volumes when there is a volume driver available volume options
   239  		if mount.VolumeOptions == nil {
   240  			continue
   241  		}
   242  
   243  		if mount.VolumeOptions.DriverConfig == nil {
   244  			continue
   245  		}
   246  
   247  		req := c.container.volumeCreateRequest(&mount)
   248  		if _, err := c.client.VolumeCreate(ctx, *req); err != nil {
   249  			// TODO(amitshukla): Today, volume create through the engine api does not return an error
   250  			// when the named volume with the same parameters already exists.
   251  			// It returns an error if the driver name is different - that is a valid error
   252  			return err
   253  		}
   254  	}
   255  
   256  	return nil
   257  }
   258  
   259  func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (io.ReadCloser, error) {
   260  	conf := c.container.config()
   261  	if conf != nil && conf.Tty {
   262  		return nil, errors.New("logs not supported on services with TTY")
   263  	}
   264  
   265  	apiOptions := types.ContainerLogsOptions{
   266  		Follow:     options.Follow,
   267  		Timestamps: true,
   268  		Details:    false,
   269  	}
   270  
   271  	if options.Since != nil {
   272  		since, err := gogotypes.TimestampFromProto(options.Since)
   273  		if err != nil {
   274  			return nil, err
   275  		}
   276  		apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond()))
   277  	}
   278  
   279  	if options.Tail < 0 {
   280  		// See protobuf documentation for details of how this works.
   281  		apiOptions.Tail = fmt.Sprint(-options.Tail - 1)
   282  	} else if options.Tail > 0 {
   283  		return nil, fmt.Errorf("tail relative to start of logs not supported via docker API")
   284  	}
   285  
   286  	if len(options.Streams) == 0 {
   287  		// empty == all
   288  		apiOptions.ShowStdout, apiOptions.ShowStderr = true, true
   289  	} else {
   290  		for _, stream := range options.Streams {
   291  			switch stream {
   292  			case api.LogStreamStdout:
   293  				apiOptions.ShowStdout = true
   294  			case api.LogStreamStderr:
   295  				apiOptions.ShowStderr = true
   296  			}
   297  		}
   298  	}
   299  
   300  	return c.client.ContainerLogs(ctx, c.container.name(), apiOptions)
   301  }
   302  
   303  // TODO(mrjana/stevvooe): There is no proper error code for network not found
   304  // error in engine-api. Resort to string matching until engine-api is fixed.
   305  
   306  func isActiveEndpointError(err error) bool {
   307  	return strings.Contains(err.Error(), "has active endpoints")
   308  }
   309  
   310  func isNetworkExistError(err error, name string) bool {
   311  	return strings.Contains(err.Error(), fmt.Sprintf("network with name %s already exists", name))
   312  }
   313  
   314  func isContainerCreateNameConflict(err error) bool {
   315  	return strings.Contains(err.Error(), "Conflict. The name")
   316  }
   317  
   318  func isUnknownContainer(err error) bool {
   319  	return strings.Contains(err.Error(), "No such container:")
   320  }
   321  
   322  func isStoppedContainer(err error) bool {
   323  	return strings.Contains(err.Error(), "is already stopped")
   324  }