github.com/rish1988/moby@v25.0.2+incompatible/daemon/start.go (about)

     1  package daemon // import "github.com/docker/docker/daemon"
     2  
     3  import (
     4  	"context"
     5  	"runtime"
     6  	"time"
     7  
     8  	"github.com/containerd/log"
     9  	"github.com/docker/docker/api/types/backend"
    10  	containertypes "github.com/docker/docker/api/types/container"
    11  	"github.com/docker/docker/api/types/events"
    12  	"github.com/docker/docker/container"
    13  	"github.com/docker/docker/errdefs"
    14  	"github.com/docker/docker/internal/compatcontext"
    15  	"github.com/docker/docker/libcontainerd"
    16  	"github.com/pkg/errors"
    17  )
    18  
    19  // validateState verifies if the container is in a non-conflicting state.
    20  func validateState(ctr *container.Container) error {
    21  	ctr.Lock()
    22  	defer ctr.Unlock()
    23  
    24  	// Intentionally checking paused first, because a container can be
    25  	// BOTH running AND paused. To start a paused (but running) container,
    26  	// it must be thawed ("un-paused").
    27  	if ctr.Paused {
    28  		return errdefs.Conflict(errors.New("cannot start a paused container, try unpause instead"))
    29  	} else if ctr.Running {
    30  		// This is not an actual error, but produces a 304 "not modified"
    31  		// when returned through the API to indicates the container is
    32  		// already in the desired state. It's implemented as an error
    33  		// to make the code calling this function terminate early (as
    34  		// no further processing is needed).
    35  		return errdefs.NotModified(errors.New("container is already running"))
    36  	}
    37  	if ctr.RemovalInProgress || ctr.Dead {
    38  		return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
    39  	}
    40  	return nil
    41  }
    42  
    43  // ContainerStart starts a container.
    44  func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error {
    45  	daemonCfg := daemon.config()
    46  	if checkpoint != "" && !daemonCfg.Experimental {
    47  		return errdefs.InvalidParameter(errors.New("checkpoint is only supported in experimental mode"))
    48  	}
    49  
    50  	ctr, err := daemon.GetContainer(name)
    51  	if err != nil {
    52  		return err
    53  	}
    54  	if err := validateState(ctr); err != nil {
    55  		return err
    56  	}
    57  
    58  	// Windows does not have the backwards compatibility issue here.
    59  	if runtime.GOOS != "windows" {
    60  		// This is kept for backward compatibility - hostconfig should be passed when
    61  		// creating a container, not during start.
    62  		if hostConfig != nil {
    63  			log.G(ctx).Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12")
    64  			oldNetworkMode := ctr.HostConfig.NetworkMode
    65  			if err := daemon.setSecurityOptions(&daemonCfg.Config, ctr, hostConfig); err != nil {
    66  				return errdefs.InvalidParameter(err)
    67  			}
    68  			if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil {
    69  				return errdefs.InvalidParameter(err)
    70  			}
    71  			if err := daemon.setHostConfig(ctr, hostConfig); err != nil {
    72  				return errdefs.InvalidParameter(err)
    73  			}
    74  			newNetworkMode := ctr.HostConfig.NetworkMode
    75  			if string(oldNetworkMode) != string(newNetworkMode) {
    76  				// if user has change the network mode on starting, clean up the
    77  				// old networks. It is a deprecated feature and has been removed in Docker 1.12
    78  				ctr.NetworkSettings.Networks = nil
    79  			}
    80  			if err := ctr.CheckpointTo(daemon.containersReplica); err != nil {
    81  				return errdefs.System(err)
    82  			}
    83  			ctr.InitDNSHostConfig()
    84  		}
    85  	} else {
    86  		if hostConfig != nil {
    87  			return errdefs.InvalidParameter(errors.New("Supplying a hostconfig on start is not supported. It should be supplied on create"))
    88  		}
    89  	}
    90  
    91  	// check if hostConfig is in line with the current system settings.
    92  	// It may happen cgroups are umounted or the like.
    93  	if _, err = daemon.verifyContainerSettings(daemonCfg, ctr.HostConfig, nil, false); err != nil {
    94  		return errdefs.InvalidParameter(err)
    95  	}
    96  	// Adapt for old containers in case we have updates in this function and
    97  	// old containers never have chance to call the new function in create stage.
    98  	if hostConfig != nil {
    99  		if err := daemon.adaptContainerSettings(&daemonCfg.Config, ctr.HostConfig, false); err != nil {
   100  			return errdefs.InvalidParameter(err)
   101  		}
   102  	}
   103  	return daemon.containerStart(ctx, daemonCfg, ctr, checkpoint, checkpointDir, true)
   104  }
   105  
   106  // containerStart prepares the container to run by setting up everything the
   107  // container needs, such as storage and networking, as well as links
   108  // between containers. The container is left waiting for a signal to
   109  // begin running.
   110  func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore, container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (retErr error) {
   111  	start := time.Now()
   112  	container.Lock()
   113  	defer container.Unlock()
   114  
   115  	if resetRestartManager && container.Running { // skip this check if already in restarting step and resetRestartManager==false
   116  		return nil
   117  	}
   118  
   119  	if container.RemovalInProgress || container.Dead {
   120  		return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
   121  	}
   122  
   123  	if checkpointDir != "" {
   124  		// TODO(mlaventure): how would we support that?
   125  		return errdefs.Forbidden(errors.New("custom checkpointdir is not supported"))
   126  	}
   127  
   128  	// if we encounter an error during start we need to ensure that any other
   129  	// setup has been cleaned up properly
   130  	defer func() {
   131  		if retErr != nil {
   132  			container.SetError(retErr)
   133  			// if no one else has set it, make sure we don't leave it at zero
   134  			if container.ExitCode() == 0 {
   135  				container.SetExitCode(exitUnknown)
   136  			}
   137  			if err := container.CheckpointTo(daemon.containersReplica); err != nil {
   138  				log.G(ctx).Errorf("%s: failed saving state on start failure: %v", container.ID, err)
   139  			}
   140  			container.Reset(false)
   141  
   142  			daemon.Cleanup(compatcontext.WithoutCancel(ctx), container)
   143  			// if containers AutoRemove flag is set, remove it after clean up
   144  			if container.HostConfig.AutoRemove {
   145  				container.Unlock()
   146  				if err := daemon.containerRm(&daemonCfg.Config, container.ID, &backend.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
   147  					log.G(ctx).Errorf("can't remove container %s: %v", container.ID, err)
   148  				}
   149  				container.Lock()
   150  			}
   151  		}
   152  	}()
   153  
   154  	if err := daemon.conditionalMountOnStart(container); err != nil {
   155  		return err
   156  	}
   157  
   158  	if err := daemon.initializeNetworking(&daemonCfg.Config, container); err != nil {
   159  		return err
   160  	}
   161  
   162  	mnts, err := daemon.setupContainerDirs(container)
   163  	if err != nil {
   164  		return err
   165  	}
   166  
   167  	m, cleanup, err := daemon.setupMounts(ctx, container)
   168  	if err != nil {
   169  		return err
   170  	}
   171  	mnts = append(mnts, m...)
   172  	defer cleanup(compatcontext.WithoutCancel(ctx))
   173  
   174  	spec, err := daemon.createSpec(ctx, daemonCfg, container, mnts)
   175  	if err != nil {
   176  		// Any error that occurs while creating the spec, even if it's the
   177  		// result of an invalid container config, must be considered a System
   178  		// error (internal server error), as it's not an error with the request
   179  		// to start the container.
   180  		//
   181  		// Invalid configuration in the config itself must be validated when
   182  		// creating the container (creating its config), but some errors are
   183  		// dependent on the current state, for example when starting a container
   184  		// that shares a namespace with another container, and that container
   185  		// is not running (or missing).
   186  		return errdefs.System(err)
   187  	}
   188  
   189  	if resetRestartManager {
   190  		container.ResetRestartManager(true)
   191  		container.HasBeenManuallyStopped = false
   192  	}
   193  
   194  	if err := daemon.saveAppArmorConfig(container); err != nil {
   195  		return err
   196  	}
   197  
   198  	if checkpoint != "" {
   199  		checkpointDir, err = getCheckpointDir(checkpointDir, checkpoint, container.Name, container.ID, container.CheckpointDir(), false)
   200  		if err != nil {
   201  			return err
   202  		}
   203  	}
   204  
   205  	shim, createOptions, err := daemon.getLibcontainerdCreateOptions(daemonCfg, container)
   206  	if err != nil {
   207  		return err
   208  	}
   209  
   210  	ctr, err := libcontainerd.ReplaceContainer(ctx, daemon.containerd, container.ID, spec, shim, createOptions)
   211  	if err != nil {
   212  		return setExitCodeFromError(container.SetExitCode, err)
   213  	}
   214  	defer func() {
   215  		if retErr != nil {
   216  			if err := ctr.Delete(compatcontext.WithoutCancel(ctx)); err != nil {
   217  				log.G(ctx).WithError(err).WithField("container", container.ID).
   218  					Error("failed to delete failed start container")
   219  			}
   220  		}
   221  	}()
   222  
   223  	// TODO(mlaventure): we need to specify checkpoint options here
   224  	tsk, err := ctr.NewTask(context.TODO(), // Passing ctx caused integration tests to be stuck in the cleanup phase
   225  		checkpointDir, container.StreamConfig.Stdin() != nil || container.Config.Tty,
   226  		container.InitializeStdio)
   227  	if err != nil {
   228  		return setExitCodeFromError(container.SetExitCode, err)
   229  	}
   230  	defer func() {
   231  		if retErr != nil {
   232  			if err := tsk.ForceDelete(compatcontext.WithoutCancel(ctx)); err != nil {
   233  				log.G(ctx).WithError(err).WithField("container", container.ID).
   234  					Error("failed to delete task after fail start")
   235  			}
   236  		}
   237  	}()
   238  
   239  	if err := tsk.Start(context.TODO()); err != nil { // passing ctx caused integration tests to be stuck in the cleanup phase
   240  		return setExitCodeFromError(container.SetExitCode, err)
   241  	}
   242  
   243  	container.HasBeenManuallyRestarted = false
   244  	container.SetRunning(ctr, tsk, true)
   245  	container.HasBeenStartedBefore = true
   246  	daemon.setStateCounter(container)
   247  
   248  	daemon.initHealthMonitor(container)
   249  
   250  	if err := container.CheckpointTo(daemon.containersReplica); err != nil {
   251  		log.G(ctx).WithError(err).WithField("container", container.ID).
   252  			Errorf("failed to store container")
   253  	}
   254  
   255  	daemon.LogContainerEvent(container, events.ActionStart)
   256  	containerActions.WithValues("start").UpdateSince(start)
   257  
   258  	return nil
   259  }
   260  
   261  // Cleanup releases any network resources allocated to the container along with any rules
   262  // around how containers are linked together.  It also unmounts the container's root filesystem.
   263  func (daemon *Daemon) Cleanup(ctx context.Context, container *container.Container) {
   264  	// Microsoft HCS containers get in a bad state if host resources are
   265  	// released while the container still exists.
   266  	if ctr, ok := container.C8dContainer(); ok {
   267  		if err := ctr.Delete(context.Background()); err != nil {
   268  			log.G(ctx).Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err)
   269  		}
   270  	}
   271  
   272  	daemon.releaseNetwork(container)
   273  
   274  	if err := container.UnmountIpcMount(); err != nil {
   275  		log.G(ctx).Warnf("%s cleanup: failed to unmount IPC: %s", container.ID, err)
   276  	}
   277  
   278  	if err := daemon.conditionalUnmountOnCleanup(container); err != nil {
   279  		// FIXME: remove once reference counting for graphdrivers has been refactored
   280  		// Ensure that all the mounts are gone
   281  		if mountid, err := daemon.imageService.GetLayerMountID(container.ID); err == nil {
   282  			daemon.cleanupMountsByID(mountid)
   283  		}
   284  	}
   285  
   286  	if err := container.UnmountSecrets(); err != nil {
   287  		log.G(ctx).Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err)
   288  	}
   289  
   290  	if err := recursiveUnmount(container.Root); err != nil {
   291  		log.G(ctx).WithError(err).WithField("container", container.ID).Warn("Error while cleaning up container resource mounts.")
   292  	}
   293  
   294  	for _, eConfig := range container.ExecCommands.Commands() {
   295  		daemon.unregisterExecCommand(container, eConfig)
   296  	}
   297  
   298  	if container.BaseFS != "" {
   299  		if err := container.UnmountVolumes(ctx, daemon.LogVolumeEvent); err != nil {
   300  			log.G(ctx).Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
   301  		}
   302  	}
   303  
   304  	container.CancelAttachContext()
   305  }