github.com/adityamillind98/moby@v23.0.0-rc.4+incompatible/daemon/daemon.go (about)

     1  // Package daemon exposes the functions that occur on the host server
     2  // that the Docker daemon is running.
     3  //
     4  // In implementing the various functions of the daemon, there is often
     5  // a method-specific struct for configuring the runtime behavior.
     6  package daemon // import "github.com/docker/docker/daemon"
     7  
     8  import (
     9  	"context"
    10  	"fmt"
    11  	"net"
    12  	"net/url"
    13  	"os"
    14  	"path"
    15  	"path/filepath"
    16  	"runtime"
    17  	"sync"
    18  	"time"
    19  
    20  	"github.com/containerd/containerd"
    21  	"github.com/containerd/containerd/defaults"
    22  	"github.com/containerd/containerd/pkg/dialer"
    23  	"github.com/containerd/containerd/pkg/userns"
    24  	"github.com/containerd/containerd/remotes/docker"
    25  	"github.com/docker/docker/api/types"
    26  	containertypes "github.com/docker/docker/api/types/container"
    27  	"github.com/docker/docker/api/types/swarm"
    28  	"github.com/docker/docker/builder"
    29  	"github.com/docker/docker/container"
    30  	"github.com/docker/docker/daemon/config"
    31  	"github.com/docker/docker/daemon/events"
    32  	"github.com/docker/docker/daemon/exec"
    33  	_ "github.com/docker/docker/daemon/graphdriver/register" // register graph drivers
    34  	"github.com/docker/docker/daemon/images"
    35  	"github.com/docker/docker/daemon/logger"
    36  	"github.com/docker/docker/daemon/network"
    37  	"github.com/docker/docker/daemon/stats"
    38  	dmetadata "github.com/docker/docker/distribution/metadata"
    39  	"github.com/docker/docker/dockerversion"
    40  	"github.com/docker/docker/errdefs"
    41  	"github.com/docker/docker/image"
    42  	"github.com/docker/docker/layer"
    43  	libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
    44  	"github.com/docker/docker/libnetwork"
    45  	"github.com/docker/docker/libnetwork/cluster"
    46  	nwconfig "github.com/docker/docker/libnetwork/config"
    47  	"github.com/docker/docker/pkg/fileutils"
    48  	"github.com/docker/docker/pkg/idtools"
    49  	"github.com/docker/docker/pkg/plugingetter"
    50  	"github.com/docker/docker/pkg/sysinfo"
    51  	"github.com/docker/docker/pkg/system"
    52  	"github.com/docker/docker/pkg/truncindex"
    53  	"github.com/docker/docker/plugin"
    54  	pluginexec "github.com/docker/docker/plugin/executor/containerd"
    55  	refstore "github.com/docker/docker/reference"
    56  	"github.com/docker/docker/registry"
    57  	"github.com/docker/docker/runconfig"
    58  	volumesservice "github.com/docker/docker/volume/service"
    59  	"github.com/moby/buildkit/util/resolver"
    60  	resolverconfig "github.com/moby/buildkit/util/resolver/config"
    61  	"github.com/moby/locker"
    62  	"github.com/pkg/errors"
    63  	"github.com/sirupsen/logrus"
    64  	"go.etcd.io/bbolt"
    65  	"golang.org/x/sync/semaphore"
    66  	"golang.org/x/sync/singleflight"
    67  	"google.golang.org/grpc"
    68  	"google.golang.org/grpc/backoff"
    69  	"google.golang.org/grpc/credentials/insecure"
    70  )
    71  
    72  var (
    73  	errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform")
    74  )
    75  
    76  // Daemon holds information about the Docker daemon.
    77  type Daemon struct {
    78  	id                    string
    79  	repository            string
    80  	containers            container.Store
    81  	containersReplica     container.ViewDB
    82  	execCommands          *exec.Store
    83  	imageService          *images.ImageService
    84  	idIndex               *truncindex.TruncIndex
    85  	configStore           *config.Config
    86  	statsCollector        *stats.Collector
    87  	defaultLogConfig      containertypes.LogConfig
    88  	registryService       registry.Service
    89  	EventsService         *events.Events
    90  	netController         libnetwork.NetworkController
    91  	volumes               *volumesservice.VolumesService
    92  	root                  string
    93  	sysInfoOnce           sync.Once
    94  	sysInfo               *sysinfo.SysInfo
    95  	shutdown              bool
    96  	idMapping             idtools.IdentityMapping
    97  	graphDriver           string        // TODO: move graphDriver field to an InfoService
    98  	PluginStore           *plugin.Store // TODO: remove
    99  	pluginManager         *plugin.Manager
   100  	linkIndex             *linkIndex
   101  	containerdCli         *containerd.Client
   102  	containerd            libcontainerdtypes.Client
   103  	defaultIsolation      containertypes.Isolation // Default isolation mode on Windows
   104  	clusterProvider       cluster.Provider
   105  	cluster               Cluster
   106  	genericResources      []swarm.GenericResource
   107  	metricsPluginListener net.Listener
   108  	ReferenceStore        refstore.Store
   109  
   110  	machineMemory uint64
   111  
   112  	seccompProfile     []byte
   113  	seccompProfilePath string
   114  
   115  	usage singleflight.Group
   116  
   117  	pruneRunning int32
   118  	hosts        map[string]bool // hosts stores the addresses the daemon is listening on
   119  	startupDone  chan struct{}
   120  
   121  	attachmentStore       network.AttachmentStore
   122  	attachableNetworkLock *locker.Locker
   123  
   124  	// This is used for Windows which doesn't currently support running on containerd
   125  	// It stores metadata for the content store (used for manifest caching)
   126  	// This needs to be closed on daemon exit
   127  	mdDB *bbolt.DB
   128  }
   129  
   130  // StoreHosts stores the addresses the daemon is listening on
   131  func (daemon *Daemon) StoreHosts(hosts []string) {
   132  	if daemon.hosts == nil {
   133  		daemon.hosts = make(map[string]bool)
   134  	}
   135  	for _, h := range hosts {
   136  		daemon.hosts[h] = true
   137  	}
   138  }
   139  
   140  // HasExperimental returns whether the experimental features of the daemon are enabled or not
   141  func (daemon *Daemon) HasExperimental() bool {
   142  	return daemon.configStore != nil && daemon.configStore.Experimental
   143  }
   144  
   145  // Features returns the features map from configStore
   146  func (daemon *Daemon) Features() *map[string]bool {
   147  	return &daemon.configStore.Features
   148  }
   149  
   150  // RegistryHosts returns registry configuration in containerd resolvers format
   151  func (daemon *Daemon) RegistryHosts() docker.RegistryHosts {
   152  	var (
   153  		registryKey = "docker.io"
   154  		mirrors     = make([]string, len(daemon.configStore.Mirrors))
   155  		m           = map[string]resolverconfig.RegistryConfig{}
   156  	)
   157  	// must trim "https://" or "http://" prefix
   158  	for i, v := range daemon.configStore.Mirrors {
   159  		if uri, err := url.Parse(v); err == nil {
   160  			v = uri.Host
   161  		}
   162  		mirrors[i] = v
   163  	}
   164  	// set mirrors for default registry
   165  	m[registryKey] = resolverconfig.RegistryConfig{Mirrors: mirrors}
   166  
   167  	for _, v := range daemon.configStore.InsecureRegistries {
   168  		u, err := url.Parse(v)
   169  		c := resolverconfig.RegistryConfig{}
   170  		if err == nil {
   171  			v = u.Host
   172  			t := true
   173  			if u.Scheme == "http" {
   174  				c.PlainHTTP = &t
   175  			} else {
   176  				c.Insecure = &t
   177  			}
   178  		}
   179  		m[v] = c
   180  	}
   181  
   182  	for k, v := range m {
   183  		v.TLSConfigDir = []string{registry.HostCertsDir(k)}
   184  		m[k] = v
   185  	}
   186  
   187  	certsDir := registry.CertsDir()
   188  	if fis, err := os.ReadDir(certsDir); err == nil {
   189  		for _, fi := range fis {
   190  			if _, ok := m[fi.Name()]; !ok {
   191  				m[fi.Name()] = resolverconfig.RegistryConfig{
   192  					TLSConfigDir: []string{filepath.Join(certsDir, fi.Name())},
   193  				}
   194  			}
   195  		}
   196  	}
   197  
   198  	return resolver.NewRegistryConfig(m)
   199  }
   200  
   201  func (daemon *Daemon) restore() error {
   202  	var mapLock sync.Mutex
   203  	containers := make(map[string]*container.Container)
   204  
   205  	logrus.Info("Loading containers: start.")
   206  
   207  	dir, err := os.ReadDir(daemon.repository)
   208  	if err != nil {
   209  		return err
   210  	}
   211  
   212  	// parallelLimit is the maximum number of parallel startup jobs that we
   213  	// allow (this is the limited used for all startup semaphores). The multipler
   214  	// (128) was chosen after some fairly significant benchmarking -- don't change
   215  	// it unless you've tested it significantly (this value is adjusted if
   216  	// RLIMIT_NOFILE is small to avoid EMFILE).
   217  	parallelLimit := adjustParallelLimit(len(dir), 128*runtime.NumCPU())
   218  
   219  	// Re-used for all parallel startup jobs.
   220  	var group sync.WaitGroup
   221  	sem := semaphore.NewWeighted(int64(parallelLimit))
   222  
   223  	for _, v := range dir {
   224  		group.Add(1)
   225  		go func(id string) {
   226  			defer group.Done()
   227  			_ = sem.Acquire(context.Background(), 1)
   228  			defer sem.Release(1)
   229  
   230  			log := logrus.WithField("container", id)
   231  
   232  			c, err := daemon.load(id)
   233  			if err != nil {
   234  				log.WithError(err).Error("failed to load container")
   235  				return
   236  			}
   237  			if !system.IsOSSupported(c.OS) {
   238  				log.Errorf("failed to load container: %s (%q)", system.ErrNotSupportedOperatingSystem, c.OS)
   239  				return
   240  			}
   241  			// Ignore the container if it does not support the current driver being used by the graph
   242  			if (c.Driver == "" && daemon.graphDriver == "aufs") || c.Driver == daemon.graphDriver {
   243  				rwlayer, err := daemon.imageService.GetLayerByID(c.ID)
   244  				if err != nil {
   245  					log.WithError(err).Error("failed to load container mount")
   246  					return
   247  				}
   248  				c.RWLayer = rwlayer
   249  				log.WithFields(logrus.Fields{
   250  					"running": c.IsRunning(),
   251  					"paused":  c.IsPaused(),
   252  				}).Debug("loaded container")
   253  
   254  				mapLock.Lock()
   255  				containers[c.ID] = c
   256  				mapLock.Unlock()
   257  			} else {
   258  				log.Debugf("cannot load container because it was created with another storage driver")
   259  			}
   260  		}(v.Name())
   261  	}
   262  	group.Wait()
   263  
   264  	removeContainers := make(map[string]*container.Container)
   265  	restartContainers := make(map[*container.Container]chan struct{})
   266  	activeSandboxes := make(map[string]interface{})
   267  
   268  	for _, c := range containers {
   269  		group.Add(1)
   270  		go func(c *container.Container) {
   271  			defer group.Done()
   272  			_ = sem.Acquire(context.Background(), 1)
   273  			defer sem.Release(1)
   274  
   275  			log := logrus.WithField("container", c.ID)
   276  
   277  			if err := daemon.registerName(c); err != nil {
   278  				log.WithError(err).Errorf("failed to register container name: %s", c.Name)
   279  				mapLock.Lock()
   280  				delete(containers, c.ID)
   281  				mapLock.Unlock()
   282  				return
   283  			}
   284  			if err := daemon.Register(c); err != nil {
   285  				log.WithError(err).Error("failed to register container")
   286  				mapLock.Lock()
   287  				delete(containers, c.ID)
   288  				mapLock.Unlock()
   289  				return
   290  			}
   291  		}(c)
   292  	}
   293  	group.Wait()
   294  
   295  	for _, c := range containers {
   296  		group.Add(1)
   297  		go func(c *container.Container) {
   298  			defer group.Done()
   299  			_ = sem.Acquire(context.Background(), 1)
   300  			defer sem.Release(1)
   301  
   302  			log := logrus.WithField("container", c.ID)
   303  
   304  			if err := daemon.checkpointAndSave(c); err != nil {
   305  				log.WithError(err).Error("error saving backported mountspec to disk")
   306  			}
   307  
   308  			daemon.setStateCounter(c)
   309  
   310  			logger := func(c *container.Container) *logrus.Entry {
   311  				return log.WithFields(logrus.Fields{
   312  					"running":    c.IsRunning(),
   313  					"paused":     c.IsPaused(),
   314  					"restarting": c.IsRestarting(),
   315  				})
   316  			}
   317  
   318  			logger(c).Debug("restoring container")
   319  
   320  			var (
   321  				err      error
   322  				alive    bool
   323  				ec       uint32
   324  				exitedAt time.Time
   325  				process  libcontainerdtypes.Process
   326  			)
   327  
   328  			alive, _, process, err = daemon.containerd.Restore(context.Background(), c.ID, c.InitializeStdio)
   329  			if err != nil && !errdefs.IsNotFound(err) {
   330  				logger(c).WithError(err).Error("failed to restore container with containerd")
   331  				return
   332  			}
   333  			logger(c).Debugf("alive: %v", alive)
   334  			if !alive {
   335  				// If process is not nil, cleanup dead container from containerd.
   336  				// If process is nil then the above `containerd.Restore` returned an errdefs.NotFoundError,
   337  				// and docker's view of the container state will be updated accorrdingly via SetStopped further down.
   338  				if process != nil {
   339  					logger(c).Debug("cleaning up dead container process")
   340  					ec, exitedAt, err = process.Delete(context.Background())
   341  					if err != nil && !errdefs.IsNotFound(err) {
   342  						logger(c).WithError(err).Error("failed to delete container from containerd")
   343  						return
   344  					}
   345  				}
   346  			} else if !daemon.configStore.LiveRestoreEnabled {
   347  				logger(c).Debug("shutting down container considered alive by containerd")
   348  				if err := daemon.shutdownContainer(c); err != nil && !errdefs.IsNotFound(err) {
   349  					log.WithError(err).Error("error shutting down container")
   350  					return
   351  				}
   352  				c.ResetRestartManager(false)
   353  			}
   354  
   355  			if c.IsRunning() || c.IsPaused() {
   356  				logger(c).Debug("syncing container on disk state with real state")
   357  
   358  				c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
   359  
   360  				switch {
   361  				case c.IsPaused() && alive:
   362  					s, err := daemon.containerd.Status(context.Background(), c.ID)
   363  					if err != nil {
   364  						logger(c).WithError(err).Error("failed to get container status")
   365  					} else {
   366  						logger(c).WithField("state", s).Info("restored container paused")
   367  						switch s {
   368  						case containerd.Paused, containerd.Pausing:
   369  							// nothing to do
   370  						case containerd.Stopped:
   371  							alive = false
   372  						case containerd.Unknown:
   373  							log.Error("unknown status for paused container during restore")
   374  						default:
   375  							// running
   376  							c.Lock()
   377  							c.Paused = false
   378  							daemon.setStateCounter(c)
   379  							daemon.updateHealthMonitor(c)
   380  							if err := c.CheckpointTo(daemon.containersReplica); err != nil {
   381  								log.WithError(err).Error("failed to update paused container state")
   382  							}
   383  							c.Unlock()
   384  						}
   385  					}
   386  				case !c.IsPaused() && alive:
   387  					logger(c).Debug("restoring healthcheck")
   388  					c.Lock()
   389  					daemon.updateHealthMonitor(c)
   390  					c.Unlock()
   391  				}
   392  
   393  				if !alive {
   394  					logger(c).Debug("setting stopped state")
   395  					c.Lock()
   396  					c.SetStopped(&container.ExitStatus{ExitCode: int(ec), ExitedAt: exitedAt})
   397  					daemon.Cleanup(c)
   398  					if err := c.CheckpointTo(daemon.containersReplica); err != nil {
   399  						log.WithError(err).Error("failed to update stopped container state")
   400  					}
   401  					c.Unlock()
   402  					logger(c).Debug("set stopped state")
   403  				}
   404  
   405  				// we call Mount and then Unmount to get BaseFs of the container
   406  				if err := daemon.Mount(c); err != nil {
   407  					// The mount is unlikely to fail. However, in case mount fails
   408  					// the container should be allowed to restore here. Some functionalities
   409  					// (like docker exec -u user) might be missing but container is able to be
   410  					// stopped/restarted/removed.
   411  					// See #29365 for related information.
   412  					// The error is only logged here.
   413  					logger(c).WithError(err).Warn("failed to mount container to get BaseFs path")
   414  				} else {
   415  					if err := daemon.Unmount(c); err != nil {
   416  						logger(c).WithError(err).Warn("failed to umount container to get BaseFs path")
   417  					}
   418  				}
   419  
   420  				c.ResetRestartManager(false)
   421  				if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
   422  					options, err := daemon.buildSandboxOptions(c)
   423  					if err != nil {
   424  						logger(c).WithError(err).Warn("failed to build sandbox option to restore container")
   425  					}
   426  					mapLock.Lock()
   427  					activeSandboxes[c.NetworkSettings.SandboxID] = options
   428  					mapLock.Unlock()
   429  				}
   430  			}
   431  
   432  			// get list of containers we need to restart
   433  
   434  			// Do not autostart containers which
   435  			// has endpoints in a swarm scope
   436  			// network yet since the cluster is
   437  			// not initialized yet. We will start
   438  			// it after the cluster is
   439  			// initialized.
   440  			if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
   441  				mapLock.Lock()
   442  				restartContainers[c] = make(chan struct{})
   443  				mapLock.Unlock()
   444  			} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
   445  				mapLock.Lock()
   446  				removeContainers[c.ID] = c
   447  				mapLock.Unlock()
   448  			}
   449  
   450  			c.Lock()
   451  			if c.RemovalInProgress {
   452  				// We probably crashed in the middle of a removal, reset
   453  				// the flag.
   454  				//
   455  				// We DO NOT remove the container here as we do not
   456  				// know if the user had requested for either the
   457  				// associated volumes, network links or both to also
   458  				// be removed. So we put the container in the "dead"
   459  				// state and leave further processing up to them.
   460  				c.RemovalInProgress = false
   461  				c.Dead = true
   462  				if err := c.CheckpointTo(daemon.containersReplica); err != nil {
   463  					log.WithError(err).Error("failed to update RemovalInProgress container state")
   464  				} else {
   465  					log.Debugf("reset RemovalInProgress state for container")
   466  				}
   467  			}
   468  			c.Unlock()
   469  			logger(c).Debug("done restoring container")
   470  		}(c)
   471  	}
   472  	group.Wait()
   473  
   474  	// Initialize the network controller and configure network settings.
   475  	//
   476  	// Note that we cannot initialize the network controller earlier, as it
   477  	// needs to know if there's active sandboxes (running containers).
   478  	if err = daemon.initNetworkController(activeSandboxes); err != nil {
   479  		return fmt.Errorf("Error initializing network controller: %v", err)
   480  	}
   481  
   482  	// Now that all the containers are registered, register the links
   483  	for _, c := range containers {
   484  		group.Add(1)
   485  		go func(c *container.Container) {
   486  			_ = sem.Acquire(context.Background(), 1)
   487  
   488  			if err := daemon.registerLinks(c, c.HostConfig); err != nil {
   489  				logrus.WithField("container", c.ID).WithError(err).Error("failed to register link for container")
   490  			}
   491  
   492  			sem.Release(1)
   493  			group.Done()
   494  		}(c)
   495  	}
   496  	group.Wait()
   497  
   498  	for c, notifier := range restartContainers {
   499  		group.Add(1)
   500  		go func(c *container.Container, chNotify chan struct{}) {
   501  			_ = sem.Acquire(context.Background(), 1)
   502  
   503  			log := logrus.WithField("container", c.ID)
   504  
   505  			log.Debug("starting container")
   506  
   507  			// ignore errors here as this is a best effort to wait for children to be
   508  			//   running before we try to start the container
   509  			children := daemon.children(c)
   510  			timeout := time.NewTimer(5 * time.Second)
   511  			defer timeout.Stop()
   512  
   513  			for _, child := range children {
   514  				if notifier, exists := restartContainers[child]; exists {
   515  					select {
   516  					case <-notifier:
   517  					case <-timeout.C:
   518  					}
   519  				}
   520  			}
   521  
   522  			if err := daemon.prepareMountPoints(c); err != nil {
   523  				log.WithError(err).Error("failed to prepare mount points for container")
   524  			}
   525  			if err := daemon.containerStart(c, "", "", true); err != nil {
   526  				log.WithError(err).Error("failed to start container")
   527  			}
   528  			close(chNotify)
   529  
   530  			sem.Release(1)
   531  			group.Done()
   532  		}(c, notifier)
   533  	}
   534  	group.Wait()
   535  
   536  	for id := range removeContainers {
   537  		group.Add(1)
   538  		go func(cid string) {
   539  			_ = sem.Acquire(context.Background(), 1)
   540  
   541  			if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
   542  				logrus.WithField("container", cid).WithError(err).Error("failed to remove container")
   543  			}
   544  
   545  			sem.Release(1)
   546  			group.Done()
   547  		}(id)
   548  	}
   549  	group.Wait()
   550  
   551  	// any containers that were started above would already have had this done,
   552  	// however we need to now prepare the mountpoints for the rest of the containers as well.
   553  	// This shouldn't cause any issue running on the containers that already had this run.
   554  	// This must be run after any containers with a restart policy so that containerized plugins
   555  	// can have a chance to be running before we try to initialize them.
   556  	for _, c := range containers {
   557  		// if the container has restart policy, do not
   558  		// prepare the mountpoints since it has been done on restarting.
   559  		// This is to speed up the daemon start when a restart container
   560  		// has a volume and the volume driver is not available.
   561  		if _, ok := restartContainers[c]; ok {
   562  			continue
   563  		} else if _, ok := removeContainers[c.ID]; ok {
   564  			// container is automatically removed, skip it.
   565  			continue
   566  		}
   567  
   568  		group.Add(1)
   569  		go func(c *container.Container) {
   570  			_ = sem.Acquire(context.Background(), 1)
   571  
   572  			if err := daemon.prepareMountPoints(c); err != nil {
   573  				logrus.WithField("container", c.ID).WithError(err).Error("failed to prepare mountpoints for container")
   574  			}
   575  
   576  			sem.Release(1)
   577  			group.Done()
   578  		}(c)
   579  	}
   580  	group.Wait()
   581  
   582  	logrus.Info("Loading containers: done.")
   583  
   584  	return nil
   585  }
   586  
   587  // RestartSwarmContainers restarts any autostart container which has a
   588  // swarm endpoint.
   589  func (daemon *Daemon) RestartSwarmContainers() {
   590  	ctx := context.Background()
   591  
   592  	// parallelLimit is the maximum number of parallel startup jobs that we
   593  	// allow (this is the limited used for all startup semaphores). The multipler
   594  	// (128) was chosen after some fairly significant benchmarking -- don't change
   595  	// it unless you've tested it significantly (this value is adjusted if
   596  	// RLIMIT_NOFILE is small to avoid EMFILE).
   597  	parallelLimit := adjustParallelLimit(len(daemon.List()), 128*runtime.NumCPU())
   598  
   599  	var group sync.WaitGroup
   600  	sem := semaphore.NewWeighted(int64(parallelLimit))
   601  
   602  	for _, c := range daemon.List() {
   603  		if !c.IsRunning() && !c.IsPaused() {
   604  			// Autostart all the containers which has a
   605  			// swarm endpoint now that the cluster is
   606  			// initialized.
   607  			if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
   608  				group.Add(1)
   609  				go func(c *container.Container) {
   610  					if err := sem.Acquire(ctx, 1); err != nil {
   611  						// ctx is done.
   612  						group.Done()
   613  						return
   614  					}
   615  
   616  					if err := daemon.containerStart(c, "", "", true); err != nil {
   617  						logrus.WithField("container", c.ID).WithError(err).Error("failed to start swarm container")
   618  					}
   619  
   620  					sem.Release(1)
   621  					group.Done()
   622  				}(c)
   623  			}
   624  		}
   625  	}
   626  	group.Wait()
   627  }
   628  
   629  func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
   630  	return daemon.linkIndex.children(c)
   631  }
   632  
   633  // parents returns the names of the parent containers of the container
   634  // with the given name.
   635  func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
   636  	return daemon.linkIndex.parents(c)
   637  }
   638  
   639  func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
   640  	fullName := path.Join(parent.Name, alias)
   641  	if err := daemon.containersReplica.ReserveName(fullName, child.ID); err != nil {
   642  		if err == container.ErrNameReserved {
   643  			logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
   644  			return nil
   645  		}
   646  		return err
   647  	}
   648  	daemon.linkIndex.link(parent, child, fullName)
   649  	return nil
   650  }
   651  
   652  // DaemonJoinsCluster informs the daemon has joined the cluster and provides
   653  // the handler to query the cluster component
   654  func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) {
   655  	daemon.setClusterProvider(clusterProvider)
   656  }
   657  
   658  // DaemonLeavesCluster informs the daemon has left the cluster
   659  func (daemon *Daemon) DaemonLeavesCluster() {
   660  	// Daemon is in charge of removing the attachable networks with
   661  	// connected containers when the node leaves the swarm
   662  	daemon.clearAttachableNetworks()
   663  	// We no longer need the cluster provider, stop it now so that
   664  	// the network agent will stop listening to cluster events.
   665  	daemon.setClusterProvider(nil)
   666  	// Wait for the networking cluster agent to stop
   667  	daemon.netController.AgentStopWait()
   668  	// Daemon is in charge of removing the ingress network when the
   669  	// node leaves the swarm. Wait for job to be done or timeout.
   670  	// This is called also on graceful daemon shutdown. We need to
   671  	// wait, because the ingress release has to happen before the
   672  	// network controller is stopped.
   673  
   674  	if done, err := daemon.ReleaseIngress(); err == nil {
   675  		timeout := time.NewTimer(5 * time.Second)
   676  		defer timeout.Stop()
   677  
   678  		select {
   679  		case <-done:
   680  		case <-timeout.C:
   681  			logrus.Warn("timeout while waiting for ingress network removal")
   682  		}
   683  	} else {
   684  		logrus.Warnf("failed to initiate ingress network removal: %v", err)
   685  	}
   686  
   687  	daemon.attachmentStore.ClearAttachments()
   688  }
   689  
   690  // setClusterProvider sets a component for querying the current cluster state.
   691  func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) {
   692  	daemon.clusterProvider = clusterProvider
   693  	daemon.netController.SetClusterProvider(clusterProvider)
   694  	daemon.attachableNetworkLock = locker.New()
   695  }
   696  
   697  // IsSwarmCompatible verifies if the current daemon
   698  // configuration is compatible with the swarm mode
   699  func (daemon *Daemon) IsSwarmCompatible() error {
   700  	if daemon.configStore == nil {
   701  		return nil
   702  	}
   703  	return daemon.configStore.IsSwarmCompatible()
   704  }
   705  
   706  // NewDaemon sets up everything for the daemon to be able to service
   707  // requests from the webserver.
   708  func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.Store) (daemon *Daemon, err error) {
   709  	registryService, err := registry.NewService(config.ServiceOptions)
   710  	if err != nil {
   711  		return nil, err
   712  	}
   713  
   714  	// Ensure that we have a correct root key limit for launching containers.
   715  	if err := modifyRootKeyLimit(); err != nil {
   716  		logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err)
   717  	}
   718  
   719  	// Ensure we have compatible and valid configuration options
   720  	if err := verifyDaemonSettings(config); err != nil {
   721  		return nil, err
   722  	}
   723  
   724  	// Do we have a disabled network?
   725  	config.DisableBridge = isBridgeNetworkDisabled(config)
   726  
   727  	// Setup the resolv.conf
   728  	setupResolvConf(config)
   729  
   730  	// Verify the platform is supported as a daemon
   731  	if !platformSupported {
   732  		return nil, errSystemNotSupported
   733  	}
   734  
   735  	// Validate platform-specific requirements
   736  	if err := checkSystem(); err != nil {
   737  		return nil, err
   738  	}
   739  
   740  	idMapping, err := setupRemappedRoot(config)
   741  	if err != nil {
   742  		return nil, err
   743  	}
   744  	rootIDs := idMapping.RootPair()
   745  	if err := setupDaemonProcess(config); err != nil {
   746  		return nil, err
   747  	}
   748  
   749  	// set up the tmpDir to use a canonical path
   750  	tmp, err := prepareTempDir(config.Root)
   751  	if err != nil {
   752  		return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
   753  	}
   754  	realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
   755  	if err != nil {
   756  		return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
   757  	}
   758  	if isWindows {
   759  		if _, err := os.Stat(realTmp); err != nil && os.IsNotExist(err) {
   760  			if err := system.MkdirAll(realTmp, 0700); err != nil {
   761  				return nil, fmt.Errorf("Unable to create the TempDir (%s): %s", realTmp, err)
   762  			}
   763  		}
   764  		os.Setenv("TEMP", realTmp)
   765  		os.Setenv("TMP", realTmp)
   766  	} else {
   767  		os.Setenv("TMPDIR", realTmp)
   768  	}
   769  
   770  	d := &Daemon{
   771  		configStore: config,
   772  		PluginStore: pluginStore,
   773  		startupDone: make(chan struct{}),
   774  	}
   775  
   776  	// Ensure the daemon is properly shutdown if there is a failure during
   777  	// initialization
   778  	defer func() {
   779  		if err != nil {
   780  			if err := d.Shutdown(); err != nil {
   781  				logrus.Error(err)
   782  			}
   783  		}
   784  	}()
   785  
   786  	if err := d.setGenericResources(config); err != nil {
   787  		return nil, err
   788  	}
   789  	// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
   790  	// on Windows to dump Go routine stacks
   791  	stackDumpDir := config.Root
   792  	if execRoot := config.GetExecRoot(); execRoot != "" {
   793  		stackDumpDir = execRoot
   794  	}
   795  	d.setupDumpStackTrap(stackDumpDir)
   796  
   797  	if err := d.setupSeccompProfile(); err != nil {
   798  		return nil, err
   799  	}
   800  
   801  	// Set the default isolation mode (only applicable on Windows)
   802  	if err := d.setDefaultIsolation(); err != nil {
   803  		return nil, fmt.Errorf("error setting default isolation mode: %v", err)
   804  	}
   805  
   806  	if err := configureMaxThreads(config); err != nil {
   807  		logrus.Warnf("Failed to configure golang's threads limit: %v", err)
   808  	}
   809  
   810  	// ensureDefaultAppArmorProfile does nothing if apparmor is disabled
   811  	if err := ensureDefaultAppArmorProfile(); err != nil {
   812  		logrus.Errorf(err.Error())
   813  	}
   814  
   815  	daemonRepo := filepath.Join(config.Root, "containers")
   816  	if err := idtools.MkdirAllAndChown(daemonRepo, 0710, idtools.Identity{
   817  		UID: idtools.CurrentIdentity().UID,
   818  		GID: rootIDs.GID,
   819  	}); err != nil {
   820  		return nil, err
   821  	}
   822  
   823  	// Create the directory where we'll store the runtime scripts (i.e. in
   824  	// order to support runtimeArgs)
   825  	daemonRuntimes := filepath.Join(config.Root, "runtimes")
   826  	if err := system.MkdirAll(daemonRuntimes, 0700); err != nil {
   827  		return nil, err
   828  	}
   829  	if err := d.loadRuntimes(); err != nil {
   830  		return nil, err
   831  	}
   832  
   833  	if isWindows {
   834  		if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil {
   835  			return nil, err
   836  		}
   837  	}
   838  
   839  	if isWindows {
   840  		// On Windows we don't support the environment variable, or a user supplied graphdriver
   841  		d.graphDriver = "windowsfilter"
   842  	} else {
   843  		// Unix platforms however run a single graphdriver for all containers, and it can
   844  		// be set through an environment variable, a daemon start parameter, or chosen through
   845  		// initialization of the layerstore through driver priority order for example.
   846  		if drv := os.Getenv("DOCKER_DRIVER"); drv != "" {
   847  			d.graphDriver = drv
   848  			logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", drv)
   849  		} else {
   850  			d.graphDriver = config.GraphDriver // May still be empty. Layerstore init determines instead.
   851  		}
   852  	}
   853  
   854  	d.registryService = registryService
   855  	logger.RegisterPluginGetter(d.PluginStore)
   856  
   857  	metricsSockPath, err := d.listenMetricsSock()
   858  	if err != nil {
   859  		return nil, err
   860  	}
   861  	registerMetricsPluginCallback(d.PluginStore, metricsSockPath)
   862  
   863  	backoffConfig := backoff.DefaultConfig
   864  	backoffConfig.MaxDelay = 3 * time.Second
   865  	connParams := grpc.ConnectParams{
   866  		Backoff: backoffConfig,
   867  	}
   868  	gopts := []grpc.DialOption{
   869  		// WithBlock makes sure that the following containerd request
   870  		// is reliable.
   871  		//
   872  		// NOTE: In one edge case with high load pressure, kernel kills
   873  		// dockerd, containerd and containerd-shims caused by OOM.
   874  		// When both dockerd and containerd restart, but containerd
   875  		// will take time to recover all the existing containers. Before
   876  		// containerd serving, dockerd will failed with gRPC error.
   877  		// That bad thing is that restore action will still ignore the
   878  		// any non-NotFound errors and returns running state for
   879  		// already stopped container. It is unexpected behavior. And
   880  		// we need to restart dockerd to make sure that anything is OK.
   881  		//
   882  		// It is painful. Add WithBlock can prevent the edge case. And
   883  		// n common case, the containerd will be serving in shortly.
   884  		// It is not harm to add WithBlock for containerd connection.
   885  		grpc.WithBlock(),
   886  
   887  		grpc.WithTransportCredentials(insecure.NewCredentials()),
   888  		grpc.WithConnectParams(connParams),
   889  		grpc.WithContextDialer(dialer.ContextDialer),
   890  
   891  		// TODO(stevvooe): We may need to allow configuration of this on the client.
   892  		grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
   893  		grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
   894  	}
   895  
   896  	if config.ContainerdAddr != "" {
   897  		d.containerdCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(config.ContainerdNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
   898  		if err != nil {
   899  			return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
   900  		}
   901  	}
   902  
   903  	createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) {
   904  		var pluginCli *containerd.Client
   905  
   906  		if config.ContainerdAddr != "" {
   907  			pluginCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(config.ContainerdPluginNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
   908  			if err != nil {
   909  				return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
   910  			}
   911  		}
   912  
   913  		var rt types.Runtime
   914  		if runtime.GOOS != "windows" {
   915  			rtPtr, err := d.getRuntime(config.GetDefaultRuntimeName())
   916  			if err != nil {
   917  				return nil, err
   918  			}
   919  			rt = *rtPtr
   920  		}
   921  		return pluginexec.New(ctx, getPluginExecRoot(config.Root), pluginCli, config.ContainerdPluginNamespace, m, rt)
   922  	}
   923  
   924  	// Plugin system initialization should happen before restore. Do not change order.
   925  	d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
   926  		Root:               filepath.Join(config.Root, "plugins"),
   927  		ExecRoot:           getPluginExecRoot(config.Root),
   928  		Store:              d.PluginStore,
   929  		CreateExecutor:     createPluginExec,
   930  		RegistryService:    registryService,
   931  		LiveRestoreEnabled: config.LiveRestoreEnabled,
   932  		LogPluginEvent:     d.LogPluginEvent, // todo: make private
   933  		AuthzMiddleware:    config.AuthzMiddleware,
   934  	})
   935  	if err != nil {
   936  		return nil, errors.Wrap(err, "couldn't create plugin manager")
   937  	}
   938  
   939  	if err := d.setupDefaultLogConfig(); err != nil {
   940  		return nil, err
   941  	}
   942  
   943  	layerStore, err := layer.NewStoreFromOptions(layer.StoreOptions{
   944  		Root:                      config.Root,
   945  		MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
   946  		GraphDriver:               d.graphDriver,
   947  		GraphDriverOptions:        config.GraphOptions,
   948  		IDMapping:                 idMapping,
   949  		PluginGetter:              d.PluginStore,
   950  		ExperimentalEnabled:       config.Experimental,
   951  	})
   952  	if err != nil {
   953  		return nil, err
   954  	}
   955  
   956  	// As layerstore initialization may set the driver
   957  	d.graphDriver = layerStore.DriverName()
   958  
   959  	// Configure and validate the kernels security support. Note this is a Linux/FreeBSD
   960  	// operation only, so it is safe to pass *just* the runtime OS graphdriver.
   961  	if err := configureKernelSecuritySupport(config, d.graphDriver); err != nil {
   962  		return nil, err
   963  	}
   964  
   965  	imageRoot := filepath.Join(config.Root, "image", d.graphDriver)
   966  	ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
   967  	if err != nil {
   968  		return nil, err
   969  	}
   970  
   971  	imageStore, err := image.NewImageStore(ifs, layerStore)
   972  	if err != nil {
   973  		return nil, err
   974  	}
   975  
   976  	d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d)
   977  	if err != nil {
   978  		return nil, err
   979  	}
   980  
   981  	// Try to preserve the daemon ID (which is the trust-key's ID) when upgrading
   982  	// an existing installation; this is a "best-effort".
   983  	idPath := filepath.Join(config.Root, "engine-id")
   984  	err = migrateTrustKeyID(config.TrustKeyPath, idPath)
   985  	if err != nil {
   986  		logrus.WithError(err).Warnf("unable to migrate engine ID; a new engine ID will be generated")
   987  	}
   988  
   989  	// We have a single tag/reference store for the daemon globally. However, it's
   990  	// stored under the graphdriver. On host platforms which only support a single
   991  	// container OS, but multiple selectable graphdrivers, this means depending on which
   992  	// graphdriver is chosen, the global reference store is under there. For
   993  	// platforms which support multiple container operating systems, this is slightly
   994  	// more problematic as where does the global ref store get located? Fortunately,
   995  	// for Windows, which is currently the only daemon supporting multiple container
   996  	// operating systems, the list of graphdrivers available isn't user configurable.
   997  	// For backwards compatibility, we just put it under the windowsfilter
   998  	// directory regardless.
   999  	refStoreLocation := filepath.Join(imageRoot, `repositories.json`)
  1000  	rs, err := refstore.NewReferenceStore(refStoreLocation)
  1001  	if err != nil {
  1002  		return nil, fmt.Errorf("Couldn't create reference store repository: %s", err)
  1003  	}
  1004  	d.ReferenceStore = rs
  1005  
  1006  	distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
  1007  	if err != nil {
  1008  		return nil, err
  1009  	}
  1010  
  1011  	// Check if Devices cgroup is mounted, it is hard requirement for container security,
  1012  	// on Linux.
  1013  	//
  1014  	// Important: we call getSysInfo() directly here, without storing the results,
  1015  	// as networking has not yet been set up, so we only have partial system info
  1016  	// at this point.
  1017  	//
  1018  	// TODO(thaJeztah) add a utility to only collect the CgroupDevicesEnabled information
  1019  	if runtime.GOOS == "linux" && !userns.RunningInUserNS() && !getSysInfo(d).CgroupDevicesEnabled {
  1020  		return nil, errors.New("Devices cgroup isn't mounted")
  1021  	}
  1022  
  1023  	d.id, err = loadOrCreateID(idPath)
  1024  	if err != nil {
  1025  		return nil, err
  1026  	}
  1027  	d.repository = daemonRepo
  1028  	d.containers = container.NewMemoryStore()
  1029  	if d.containersReplica, err = container.NewViewDB(); err != nil {
  1030  		return nil, err
  1031  	}
  1032  	d.execCommands = exec.NewStore()
  1033  	d.idIndex = truncindex.NewTruncIndex([]string{})
  1034  	d.statsCollector = d.newStatsCollector(1 * time.Second)
  1035  
  1036  	d.EventsService = events.New()
  1037  	d.root = config.Root
  1038  	d.idMapping = idMapping
  1039  
  1040  	d.linkIndex = newLinkIndex()
  1041  
  1042  	imgSvcConfig := images.ImageServiceConfig{
  1043  		ContainerStore:            d.containers,
  1044  		DistributionMetadataStore: distributionMetadataStore,
  1045  		EventsService:             d.EventsService,
  1046  		ImageStore:                imageStore,
  1047  		LayerStore:                layerStore,
  1048  		MaxConcurrentDownloads:    config.MaxConcurrentDownloads,
  1049  		MaxConcurrentUploads:      config.MaxConcurrentUploads,
  1050  		MaxDownloadAttempts:       config.MaxDownloadAttempts,
  1051  		ReferenceStore:            rs,
  1052  		RegistryService:           registryService,
  1053  		ContentNamespace:          config.ContainerdNamespace,
  1054  	}
  1055  
  1056  	// containerd is not currently supported with Windows.
  1057  	// So sometimes d.containerdCli will be nil
  1058  	// In that case we'll create a local content store... but otherwise we'll use containerd
  1059  	if d.containerdCli != nil {
  1060  		imgSvcConfig.Leases = d.containerdCli.LeasesService()
  1061  		imgSvcConfig.ContentStore = d.containerdCli.ContentStore()
  1062  	} else {
  1063  		cs, lm, err := d.configureLocalContentStore(config.ContainerdNamespace)
  1064  		if err != nil {
  1065  			return nil, err
  1066  		}
  1067  		imgSvcConfig.ContentStore = cs
  1068  		imgSvcConfig.Leases = lm
  1069  	}
  1070  
  1071  	// TODO: imageStore, distributionMetadataStore, and ReferenceStore are only
  1072  	// used above to run migration. They could be initialized in ImageService
  1073  	// if migration is called from daemon/images. layerStore might move as well.
  1074  	d.imageService = images.NewImageService(imgSvcConfig)
  1075  	logrus.Debugf("Max Concurrent Downloads: %d", imgSvcConfig.MaxConcurrentDownloads)
  1076  	logrus.Debugf("Max Concurrent Uploads: %d", imgSvcConfig.MaxConcurrentUploads)
  1077  	logrus.Debugf("Max Download Attempts: %d", imgSvcConfig.MaxDownloadAttempts)
  1078  
  1079  	go d.execCommandGC()
  1080  
  1081  	if err := d.initLibcontainerd(ctx); err != nil {
  1082  		return nil, err
  1083  	}
  1084  
  1085  	if err := d.restore(); err != nil {
  1086  		return nil, err
  1087  	}
  1088  	close(d.startupDone)
  1089  
  1090  	info := d.SystemInfo()
  1091  	for _, w := range info.Warnings {
  1092  		logrus.Warn(w)
  1093  	}
  1094  
  1095  	engineInfo.WithValues(
  1096  		dockerversion.Version,
  1097  		dockerversion.GitCommit,
  1098  		info.Architecture,
  1099  		info.Driver,
  1100  		info.KernelVersion,
  1101  		info.OperatingSystem,
  1102  		info.OSType,
  1103  		info.OSVersion,
  1104  		info.ID,
  1105  	).Set(1)
  1106  	engineCpus.Set(float64(info.NCPU))
  1107  	engineMemory.Set(float64(info.MemTotal))
  1108  
  1109  	logrus.WithFields(logrus.Fields{
  1110  		"version":     dockerversion.Version,
  1111  		"commit":      dockerversion.GitCommit,
  1112  		"graphdriver": d.graphDriver,
  1113  	}).Info("Docker daemon")
  1114  
  1115  	return d, nil
  1116  }
  1117  
  1118  // DistributionServices returns services controlling daemon storage
  1119  func (daemon *Daemon) DistributionServices() images.DistributionServices {
  1120  	return daemon.imageService.DistributionServices()
  1121  }
  1122  
  1123  func (daemon *Daemon) waitForStartupDone() {
  1124  	<-daemon.startupDone
  1125  }
  1126  
  1127  func (daemon *Daemon) shutdownContainer(c *container.Container) error {
  1128  	// If container failed to exit in stopTimeout seconds of SIGTERM, then using the force
  1129  	if err := daemon.containerStop(context.TODO(), c, containertypes.StopOptions{}); err != nil {
  1130  		return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
  1131  	}
  1132  
  1133  	// Wait without timeout for the container to exit.
  1134  	// Ignore the result.
  1135  	<-c.Wait(context.Background(), container.WaitConditionNotRunning)
  1136  	return nil
  1137  }
  1138  
  1139  // ShutdownTimeout returns the timeout (in seconds) before containers are forcibly
  1140  // killed during shutdown. The default timeout can be configured both on the daemon
  1141  // and per container, and the longest timeout will be used. A grace-period of
  1142  // 5 seconds is added to the configured timeout.
  1143  //
  1144  // A negative (-1) timeout means "indefinitely", which means that containers
  1145  // are not forcibly killed, and the daemon shuts down after all containers exit.
  1146  func (daemon *Daemon) ShutdownTimeout() int {
  1147  	shutdownTimeout := daemon.configStore.ShutdownTimeout
  1148  	if shutdownTimeout < 0 {
  1149  		return -1
  1150  	}
  1151  	if daemon.containers == nil {
  1152  		return shutdownTimeout
  1153  	}
  1154  
  1155  	graceTimeout := 5
  1156  	for _, c := range daemon.containers.List() {
  1157  		stopTimeout := c.StopTimeout()
  1158  		if stopTimeout < 0 {
  1159  			return -1
  1160  		}
  1161  		if stopTimeout+graceTimeout > shutdownTimeout {
  1162  			shutdownTimeout = stopTimeout + graceTimeout
  1163  		}
  1164  	}
  1165  	return shutdownTimeout
  1166  }
  1167  
  1168  // Shutdown stops the daemon.
  1169  func (daemon *Daemon) Shutdown() error {
  1170  	daemon.shutdown = true
  1171  	// Keep mounts and networking running on daemon shutdown if
  1172  	// we are to keep containers running and restore them.
  1173  
  1174  	if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
  1175  		// check if there are any running containers, if none we should do some cleanup
  1176  		if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
  1177  			// metrics plugins still need some cleanup
  1178  			daemon.cleanupMetricsPlugins()
  1179  			return nil
  1180  		}
  1181  	}
  1182  
  1183  	if daemon.containers != nil {
  1184  		logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", daemon.configStore.ShutdownTimeout)
  1185  		logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.ShutdownTimeout())
  1186  		daemon.containers.ApplyAll(func(c *container.Container) {
  1187  			if !c.IsRunning() {
  1188  				return
  1189  			}
  1190  			log := logrus.WithField("container", c.ID)
  1191  			log.Debug("shutting down container")
  1192  			if err := daemon.shutdownContainer(c); err != nil {
  1193  				log.WithError(err).Error("failed to shut down container")
  1194  				return
  1195  			}
  1196  			if mountid, err := daemon.imageService.GetLayerMountID(c.ID); err == nil {
  1197  				daemon.cleanupMountsByID(mountid)
  1198  			}
  1199  			log.Debugf("shut down container")
  1200  		})
  1201  	}
  1202  
  1203  	if daemon.volumes != nil {
  1204  		if err := daemon.volumes.Shutdown(); err != nil {
  1205  			logrus.Errorf("Error shutting down volume store: %v", err)
  1206  		}
  1207  	}
  1208  
  1209  	if daemon.imageService != nil {
  1210  		if err := daemon.imageService.Cleanup(); err != nil {
  1211  			logrus.Error(err)
  1212  		}
  1213  	}
  1214  
  1215  	// If we are part of a cluster, clean up cluster's stuff
  1216  	if daemon.clusterProvider != nil {
  1217  		logrus.Debugf("start clean shutdown of cluster resources...")
  1218  		daemon.DaemonLeavesCluster()
  1219  	}
  1220  
  1221  	daemon.cleanupMetricsPlugins()
  1222  
  1223  	// Shutdown plugins after containers and layerstore. Don't change the order.
  1224  	daemon.pluginShutdown()
  1225  
  1226  	// trigger libnetwork Stop only if it's initialized
  1227  	if daemon.netController != nil {
  1228  		daemon.netController.Stop()
  1229  	}
  1230  
  1231  	if daemon.containerdCli != nil {
  1232  		daemon.containerdCli.Close()
  1233  	}
  1234  
  1235  	if daemon.mdDB != nil {
  1236  		daemon.mdDB.Close()
  1237  	}
  1238  
  1239  	return daemon.cleanupMounts()
  1240  }
  1241  
  1242  // Mount sets container.BaseFS
  1243  // (is it not set coming in? why is it unset?)
  1244  func (daemon *Daemon) Mount(container *container.Container) error {
  1245  	if container.RWLayer == nil {
  1246  		return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil")
  1247  	}
  1248  	dir, err := container.RWLayer.Mount(container.GetMountLabel())
  1249  	if err != nil {
  1250  		return err
  1251  	}
  1252  	logrus.WithField("container", container.ID).Debugf("container mounted via layerStore: %v", dir)
  1253  
  1254  	if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() {
  1255  		// The mount path reported by the graph driver should always be trusted on Windows, since the
  1256  		// volume path for a given mounted layer may change over time.  This should only be an error
  1257  		// on non-Windows operating systems.
  1258  		if runtime.GOOS != "windows" {
  1259  			daemon.Unmount(container)
  1260  			return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
  1261  				daemon.imageService.GraphDriverName(), container.ID, container.BaseFS, dir)
  1262  		}
  1263  	}
  1264  	container.BaseFS = dir // TODO: combine these fields
  1265  	return nil
  1266  }
  1267  
  1268  // Unmount unsets the container base filesystem
  1269  func (daemon *Daemon) Unmount(container *container.Container) error {
  1270  	if container.RWLayer == nil {
  1271  		return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil")
  1272  	}
  1273  	if err := container.RWLayer.Unmount(); err != nil {
  1274  		logrus.WithField("container", container.ID).WithError(err).Error("error unmounting container")
  1275  		return err
  1276  	}
  1277  
  1278  	return nil
  1279  }
  1280  
  1281  // Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker.
  1282  func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) {
  1283  	var v4Subnets []net.IPNet
  1284  	var v6Subnets []net.IPNet
  1285  
  1286  	managedNetworks := daemon.netController.Networks()
  1287  
  1288  	for _, managedNetwork := range managedNetworks {
  1289  		v4infos, v6infos := managedNetwork.Info().IpamInfo()
  1290  		for _, info := range v4infos {
  1291  			if info.IPAMData.Pool != nil {
  1292  				v4Subnets = append(v4Subnets, *info.IPAMData.Pool)
  1293  			}
  1294  		}
  1295  		for _, info := range v6infos {
  1296  			if info.IPAMData.Pool != nil {
  1297  				v6Subnets = append(v6Subnets, *info.IPAMData.Pool)
  1298  			}
  1299  		}
  1300  	}
  1301  
  1302  	return v4Subnets, v6Subnets
  1303  }
  1304  
  1305  // prepareTempDir prepares and returns the default directory to use
  1306  // for temporary files.
  1307  // If it doesn't exist, it is created. If it exists, its content is removed.
  1308  func prepareTempDir(rootDir string) (string, error) {
  1309  	var tmpDir string
  1310  	if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
  1311  		tmpDir = filepath.Join(rootDir, "tmp")
  1312  		newName := tmpDir + "-old"
  1313  		if err := os.Rename(tmpDir, newName); err == nil {
  1314  			go func() {
  1315  				if err := os.RemoveAll(newName); err != nil {
  1316  					logrus.Warnf("failed to delete old tmp directory: %s", newName)
  1317  				}
  1318  			}()
  1319  		} else if !os.IsNotExist(err) {
  1320  			logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err)
  1321  			if err := os.RemoveAll(tmpDir); err != nil {
  1322  				logrus.Warnf("failed to delete old tmp directory: %s", tmpDir)
  1323  			}
  1324  		}
  1325  	}
  1326  	return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, idtools.CurrentIdentity())
  1327  }
  1328  
  1329  func (daemon *Daemon) setGenericResources(conf *config.Config) error {
  1330  	genericResources, err := config.ParseGenericResources(conf.NodeGenericResources)
  1331  	if err != nil {
  1332  		return err
  1333  	}
  1334  
  1335  	daemon.genericResources = genericResources
  1336  
  1337  	return nil
  1338  }
  1339  
  1340  // IsShuttingDown tells whether the daemon is shutting down or not
  1341  func (daemon *Daemon) IsShuttingDown() bool {
  1342  	return daemon.shutdown
  1343  }
  1344  
  1345  func isBridgeNetworkDisabled(conf *config.Config) bool {
  1346  	return conf.BridgeConfig.Iface == config.DisableNetworkBridge
  1347  }
  1348  
  1349  func (daemon *Daemon) networkOptions(pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
  1350  	options := []nwconfig.Option{}
  1351  	if daemon.configStore == nil {
  1352  		return options, nil
  1353  	}
  1354  	conf := daemon.configStore
  1355  	dd := runconfig.DefaultDaemonNetworkMode()
  1356  
  1357  	options = []nwconfig.Option{
  1358  		nwconfig.OptionDataDir(conf.Root),
  1359  		nwconfig.OptionExecRoot(conf.GetExecRoot()),
  1360  		nwconfig.OptionDefaultDriver(string(dd)),
  1361  		nwconfig.OptionDefaultNetwork(dd.NetworkName()),
  1362  		nwconfig.OptionLabels(conf.Labels),
  1363  		nwconfig.OptionNetworkControlPlaneMTU(conf.NetworkControlPlaneMTU),
  1364  		driverOptions(conf),
  1365  	}
  1366  
  1367  	if len(conf.NetworkConfig.DefaultAddressPools.Value()) > 0 {
  1368  		options = append(options, nwconfig.OptionDefaultAddressPoolConfig(conf.NetworkConfig.DefaultAddressPools.Value()))
  1369  	}
  1370  	if conf.LiveRestoreEnabled && len(activeSandboxes) != 0 {
  1371  		options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
  1372  	}
  1373  	if pg != nil {
  1374  		options = append(options, nwconfig.OptionPluginGetter(pg))
  1375  	}
  1376  
  1377  	return options, nil
  1378  }
  1379  
  1380  // GetCluster returns the cluster
  1381  func (daemon *Daemon) GetCluster() Cluster {
  1382  	return daemon.cluster
  1383  }
  1384  
  1385  // SetCluster sets the cluster
  1386  func (daemon *Daemon) SetCluster(cluster Cluster) {
  1387  	daemon.cluster = cluster
  1388  }
  1389  
  1390  func (daemon *Daemon) pluginShutdown() {
  1391  	manager := daemon.pluginManager
  1392  	// Check for a valid manager object. In error conditions, daemon init can fail
  1393  	// and shutdown called, before plugin manager is initialized.
  1394  	if manager != nil {
  1395  		manager.Shutdown()
  1396  	}
  1397  }
  1398  
  1399  // PluginManager returns current pluginManager associated with the daemon
  1400  func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method
  1401  	return daemon.pluginManager
  1402  }
  1403  
  1404  // PluginGetter returns current pluginStore associated with the daemon
  1405  func (daemon *Daemon) PluginGetter() *plugin.Store {
  1406  	return daemon.PluginStore
  1407  }
  1408  
  1409  // CreateDaemonRoot creates the root for the daemon
  1410  func CreateDaemonRoot(config *config.Config) error {
  1411  	// get the canonical path to the Docker root directory
  1412  	var realRoot string
  1413  	if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
  1414  		realRoot = config.Root
  1415  	} else {
  1416  		realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
  1417  		if err != nil {
  1418  			return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
  1419  		}
  1420  	}
  1421  
  1422  	idMapping, err := setupRemappedRoot(config)
  1423  	if err != nil {
  1424  		return err
  1425  	}
  1426  	return setupDaemonRoot(config, realRoot, idMapping.RootPair())
  1427  }
  1428  
  1429  // checkpointAndSave grabs a container lock to safely call container.CheckpointTo
  1430  func (daemon *Daemon) checkpointAndSave(container *container.Container) error {
  1431  	container.Lock()
  1432  	defer container.Unlock()
  1433  	if err := container.CheckpointTo(daemon.containersReplica); err != nil {
  1434  		return fmt.Errorf("Error saving container state: %v", err)
  1435  	}
  1436  	return nil
  1437  }
  1438  
  1439  // because the CLI sends a -1 when it wants to unset the swappiness value
  1440  // we need to clear it on the server side
  1441  func fixMemorySwappiness(resources *containertypes.Resources) {
  1442  	if resources.MemorySwappiness != nil && *resources.MemorySwappiness == -1 {
  1443  		resources.MemorySwappiness = nil
  1444  	}
  1445  }
  1446  
  1447  // GetAttachmentStore returns current attachment store associated with the daemon
  1448  func (daemon *Daemon) GetAttachmentStore() *network.AttachmentStore {
  1449  	return &daemon.attachmentStore
  1450  }
  1451  
  1452  // IdentityMapping returns uid/gid mapping or a SID (in the case of Windows) for the builder
  1453  func (daemon *Daemon) IdentityMapping() idtools.IdentityMapping {
  1454  	return daemon.idMapping
  1455  }
  1456  
  1457  // ImageService returns the Daemon's ImageService
  1458  func (daemon *Daemon) ImageService() *images.ImageService {
  1459  	return daemon.imageService
  1460  }
  1461  
  1462  // BuilderBackend returns the backend used by builder
  1463  func (daemon *Daemon) BuilderBackend() builder.Backend {
  1464  	return struct {
  1465  		*Daemon
  1466  		*images.ImageService
  1467  	}{daemon, daemon.imageService}
  1468  }
  1469  
  1470  // RawSysInfo returns *sysinfo.SysInfo .
  1471  func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo {
  1472  	daemon.sysInfoOnce.Do(func() {
  1473  		// We check if sysInfo is not set here, to allow some test to
  1474  		// override the actual sysInfo.
  1475  		if daemon.sysInfo == nil {
  1476  			daemon.sysInfo = getSysInfo(daemon)
  1477  		}
  1478  	})
  1479  
  1480  	return daemon.sysInfo
  1481  }