github.com/zhuohuang-hust/src-cbuild@v0.0.0-20230105071821-c7aab3e7c840/daemon/daemon.go (about)

     1  // Package daemon exposes the functions that occur on the host server
     2  // that the Docker daemon is running.
     3  //
     4  // In implementing the various functions of the daemon, there is often
     5  // a method-specific struct for configuring the runtime behavior.
     6  package daemon
     7  
     8  import (
     9  	"encoding/json"
    10  	"fmt"
    11  	"io"
    12  	"io/ioutil"
    13  	"net"
    14  	"os"
    15  	"path"
    16  	"path/filepath"
    17  	"runtime"
    18  	"strings"
    19  	"sync"
    20  	"syscall"
    21  	"time"
    22  
    23  	"github.com/Sirupsen/logrus"
    24  	containerd "github.com/docker/containerd/api/grpc/types"
    25  	"github.com/docker/docker/api"
    26  	"github.com/docker/docker/api/types"
    27  	containertypes "github.com/docker/docker/api/types/container"
    28  	"github.com/docker/docker/container"
    29  	"github.com/docker/docker/daemon/events"
    30  	"github.com/docker/docker/daemon/exec"
    31  	"github.com/docker/docker/dockerversion"
    32  	"github.com/docker/docker/plugin"
    33  	"github.com/docker/libnetwork/cluster"
    34  	// register graph drivers
    35  	_ "github.com/docker/docker/daemon/graphdriver/register"
    36  	dmetadata "github.com/docker/docker/distribution/metadata"
    37  	"github.com/docker/docker/distribution/xfer"
    38  	"github.com/docker/docker/image"
    39  	"github.com/docker/docker/layer"
    40  	"github.com/docker/docker/libcontainerd"
    41  	"github.com/docker/docker/migrate/v1"
    42  	"github.com/docker/docker/pkg/fileutils"
    43  	"github.com/docker/docker/pkg/idtools"
    44  	"github.com/docker/docker/pkg/plugingetter"
    45  	"github.com/docker/docker/pkg/progress"
    46  	"github.com/docker/docker/pkg/registrar"
    47  	"github.com/docker/docker/pkg/signal"
    48  	"github.com/docker/docker/pkg/streamformatter"
    49  	"github.com/docker/docker/pkg/sysinfo"
    50  	"github.com/docker/docker/pkg/system"
    51  	"github.com/docker/docker/pkg/truncindex"
    52  	pluginstore "github.com/docker/docker/plugin/store"
    53  	"github.com/docker/docker/reference"
    54  	"github.com/docker/docker/registry"
    55  	"github.com/docker/docker/runconfig"
    56  	volumedrivers "github.com/docker/docker/volume/drivers"
    57  	"github.com/docker/docker/volume/local"
    58  	"github.com/docker/docker/volume/store"
    59  	"github.com/docker/libnetwork"
    60  	nwconfig "github.com/docker/libnetwork/config"
    61  	"github.com/docker/libtrust"
    62  )
    63  
    64  var (
    65  	// DefaultRuntimeBinary is the default runtime to be used by
    66  	// containerd if none is specified
    67  	DefaultRuntimeBinary = "docker-runc"
    68  
    69  	// DefaultInitBinary is the name of the default init binary
    70  	DefaultInitBinary = "docker-init"
    71  
    72  	errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.")
    73  )
    74  
    75  // Daemon holds information about the Docker daemon.
    76  type Daemon struct {
    77  	ID                        string
    78  	repository                string
    79  	containers                container.Store
    80  	execCommands              *exec.Store
    81  	referenceStore            reference.Store
    82  	downloadManager           *xfer.LayerDownloadManager
    83  	uploadManager             *xfer.LayerUploadManager
    84  	distributionMetadataStore dmetadata.Store
    85  	trustKey                  libtrust.PrivateKey
    86  	idIndex                   *truncindex.TruncIndex
    87  	configStore               *Config
    88  	statsCollector            *statsCollector
    89  	defaultLogConfig          containertypes.LogConfig
    90  	RegistryService           registry.Service
    91  	EventsService             *events.Events
    92  	netController             libnetwork.NetworkController
    93  	volumes                   *store.VolumeStore
    94  	discoveryWatcher          discoveryReloader
    95  	root                      string
    96  	seccompEnabled            bool
    97  	shutdown                  bool
    98  	uidMaps                   []idtools.IDMap
    99  	gidMaps                   []idtools.IDMap
   100  	layerStore                layer.Store
   101  	imageStore                image.Store
   102  	PluginStore               *pluginstore.Store
   103  	nameIndex                 *registrar.Registrar
   104  	linkIndex                 *linkIndex
   105  	containerd                libcontainerd.Client
   106  	containerdRemote          libcontainerd.Remote
   107  	defaultIsolation          containertypes.Isolation // Default isolation mode on Windows
   108  	clusterProvider           cluster.Provider
   109  	cluster                   Cluster
   110  
   111  	seccompProfile     []byte
   112  	seccompProfilePath string
   113  }
   114  
   115  // HasExperimental returns whether the experimental features of the daemon are enabled or not
   116  func (daemon *Daemon) HasExperimental() bool {
   117  	if daemon.configStore != nil && daemon.configStore.Experimental {
   118  		return true
   119  	}
   120  	return false
   121  }
   122  
   123  func (daemon *Daemon) restore() error {
   124  	var (
   125  		currentDriver = daemon.GraphDriverName()
   126  		containers    = make(map[string]*container.Container)
   127  	)
   128  
   129  	logrus.Info("Loading containers: start.")
   130  
   131  	dir, err := ioutil.ReadDir(daemon.repository)
   132  	if err != nil {
   133  		return err
   134  	}
   135  
   136  	for _, v := range dir {
   137  		id := v.Name()
   138  		container, err := daemon.load(id)
   139  		if err != nil {
   140  			logrus.Errorf("Failed to load container %v: %v", id, err)
   141  			continue
   142  		}
   143  
   144  		// Ignore the container if it does not support the current driver being used by the graph
   145  		if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
   146  			rwlayer, err := daemon.layerStore.GetRWLayer(container.ID)
   147  			if err != nil {
   148  				logrus.Errorf("Failed to load container mount %v: %v", id, err)
   149  				continue
   150  			}
   151  			container.RWLayer = rwlayer
   152  			logrus.Debugf("Loaded container %v", container.ID)
   153  
   154  			containers[container.ID] = container
   155  		} else {
   156  			logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
   157  		}
   158  	}
   159  
   160  	removeContainers := make(map[string]*container.Container)
   161  	restartContainers := make(map[*container.Container]chan struct{})
   162  	activeSandboxes := make(map[string]interface{})
   163  	for id, c := range containers {
   164  		if err := daemon.registerName(c); err != nil {
   165  			logrus.Errorf("Failed to register container %s: %s", c.ID, err)
   166  			delete(containers, id)
   167  			continue
   168  		}
   169  		if err := daemon.Register(c); err != nil {
   170  			logrus.Errorf("Failed to register container %s: %s", c.ID, err)
   171  			delete(containers, id)
   172  			continue
   173  		}
   174  
   175  		// verify that all volumes valid and have been migrated from the pre-1.7 layout
   176  		if err := daemon.verifyVolumesInfo(c); err != nil {
   177  			// don't skip the container due to error
   178  			logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err)
   179  		}
   180  
   181  		// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
   182  		// We should rewrite it to use the daemon defaults.
   183  		// Fixes https://github.com/docker/docker/issues/22536
   184  		if c.HostConfig.LogConfig.Type == "" {
   185  			if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
   186  				logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
   187  				continue
   188  			}
   189  		}
   190  	}
   191  
   192  	var migrateLegacyLinks bool // Not relevant on Windows
   193  	var wg sync.WaitGroup
   194  	var mapLock sync.Mutex
   195  	for _, c := range containers {
   196  		wg.Add(1)
   197  		go func(c *container.Container) {
   198  			defer wg.Done()
   199  			if err := backportMountSpec(c); err != nil {
   200  				logrus.Error("Failed to migrate old mounts to use new spec format")
   201  			}
   202  
   203  			if c.IsRunning() || c.IsPaused() {
   204  				c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
   205  				if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil {
   206  					logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err)
   207  					return
   208  				}
   209  				c.ResetRestartManager(false)
   210  				if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
   211  					options, err := daemon.buildSandboxOptions(c)
   212  					if err != nil {
   213  						logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
   214  					}
   215  					mapLock.Lock()
   216  					activeSandboxes[c.NetworkSettings.SandboxID] = options
   217  					mapLock.Unlock()
   218  				}
   219  
   220  			}
   221  			// fixme: only if not running
   222  			// get list of containers we need to restart
   223  			if !c.IsRunning() && !c.IsPaused() {
   224  				// Do not autostart containers which
   225  				// has endpoints in a swarm scope
   226  				// network yet since the cluster is
   227  				// not initialized yet. We will start
   228  				// it after the cluster is
   229  				// initialized.
   230  				if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
   231  					mapLock.Lock()
   232  					restartContainers[c] = make(chan struct{})
   233  					mapLock.Unlock()
   234  				} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
   235  					mapLock.Lock()
   236  					removeContainers[c.ID] = c
   237  					mapLock.Unlock()
   238  				}
   239  			}
   240  
   241  			if c.RemovalInProgress {
   242  				// We probably crashed in the middle of a removal, reset
   243  				// the flag.
   244  				//
   245  				// We DO NOT remove the container here as we do not
   246  				// know if the user had requested for either the
   247  				// associated volumes, network links or both to also
   248  				// be removed. So we put the container in the "dead"
   249  				// state and leave further processing up to them.
   250  				logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
   251  				c.ResetRemovalInProgress()
   252  				c.SetDead()
   253  				c.ToDisk()
   254  			}
   255  
   256  			// if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated
   257  			if c.HostConfig != nil && c.HostConfig.Links == nil {
   258  				migrateLegacyLinks = true
   259  			}
   260  		}(c)
   261  	}
   262  	wg.Wait()
   263  	daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
   264  	if err != nil {
   265  		return fmt.Errorf("Error initializing network controller: %v", err)
   266  	}
   267  
   268  	// Perform migration of legacy sqlite links (no-op on Windows)
   269  	if migrateLegacyLinks {
   270  		if err := daemon.sqliteMigration(containers); err != nil {
   271  			return err
   272  		}
   273  	}
   274  
   275  	// Now that all the containers are registered, register the links
   276  	for _, c := range containers {
   277  		if err := daemon.registerLinks(c, c.HostConfig); err != nil {
   278  			logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
   279  		}
   280  	}
   281  
   282  	group := sync.WaitGroup{}
   283  	for c, notifier := range restartContainers {
   284  		group.Add(1)
   285  
   286  		go func(c *container.Container, chNotify chan struct{}) {
   287  			defer group.Done()
   288  
   289  			logrus.Debugf("Starting container %s", c.ID)
   290  
   291  			// ignore errors here as this is a best effort to wait for children to be
   292  			//   running before we try to start the container
   293  			children := daemon.children(c)
   294  			timeout := time.After(5 * time.Second)
   295  			for _, child := range children {
   296  				if notifier, exists := restartContainers[child]; exists {
   297  					select {
   298  					case <-notifier:
   299  					case <-timeout:
   300  					}
   301  				}
   302  			}
   303  
   304  			// Make sure networks are available before starting
   305  			daemon.waitForNetworks(c)
   306  			if err := daemon.containerStart(c, "", "", true); err != nil {
   307  				logrus.Errorf("Failed to start container %s: %s", c.ID, err)
   308  			}
   309  			close(chNotify)
   310  		}(c, notifier)
   311  
   312  	}
   313  	group.Wait()
   314  
   315  	removeGroup := sync.WaitGroup{}
   316  	for id := range removeContainers {
   317  		removeGroup.Add(1)
   318  		go func(cid string) {
   319  			if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
   320  				logrus.Errorf("Failed to remove container %s: %s", cid, err)
   321  			}
   322  			removeGroup.Done()
   323  		}(id)
   324  	}
   325  	removeGroup.Wait()
   326  
   327  	// any containers that were started above would already have had this done,
   328  	// however we need to now prepare the mountpoints for the rest of the containers as well.
   329  	// This shouldn't cause any issue running on the containers that already had this run.
   330  	// This must be run after any containers with a restart policy so that containerized plugins
   331  	// can have a chance to be running before we try to initialize them.
   332  	for _, c := range containers {
   333  		// if the container has restart policy, do not
   334  		// prepare the mountpoints since it has been done on restarting.
   335  		// This is to speed up the daemon start when a restart container
   336  		// has a volume and the volume dirver is not available.
   337  		if _, ok := restartContainers[c]; ok {
   338  			continue
   339  		} else if _, ok := removeContainers[c.ID]; ok {
   340  			// container is automatically removed, skip it.
   341  			continue
   342  		}
   343  
   344  		group.Add(1)
   345  		go func(c *container.Container) {
   346  			defer group.Done()
   347  			if err := daemon.prepareMountPoints(c); err != nil {
   348  				logrus.Error(err)
   349  			}
   350  		}(c)
   351  	}
   352  
   353  	group.Wait()
   354  
   355  	logrus.Info("Loading containers: done.")
   356  
   357  	return nil
   358  }
   359  
   360  // RestartSwarmContainers restarts any autostart container which has a
   361  // swarm endpoint.
   362  func (daemon *Daemon) RestartSwarmContainers() {
   363  	group := sync.WaitGroup{}
   364  	for _, c := range daemon.List() {
   365  		if !c.IsRunning() && !c.IsPaused() {
   366  			// Autostart all the containers which has a
   367  			// swarm endpoint now that the cluster is
   368  			// initialized.
   369  			if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint {
   370  				group.Add(1)
   371  				go func(c *container.Container) {
   372  					defer group.Done()
   373  					if err := daemon.containerStart(c, "", "", true); err != nil {
   374  						logrus.Error(err)
   375  					}
   376  				}(c)
   377  			}
   378  		}
   379  
   380  	}
   381  	group.Wait()
   382  }
   383  
   384  // waitForNetworks is used during daemon initialization when starting up containers
   385  // It ensures that all of a container's networks are available before the daemon tries to start the container.
   386  // In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
   387  func (daemon *Daemon) waitForNetworks(c *container.Container) {
   388  	if daemon.discoveryWatcher == nil {
   389  		return
   390  	}
   391  	// Make sure if the container has a network that requires discovery that the discovery service is available before starting
   392  	for netName := range c.NetworkSettings.Networks {
   393  		// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
   394  		// Most likely this is because the K/V store used for discovery is in a container and needs to be started
   395  		if _, err := daemon.netController.NetworkByName(netName); err != nil {
   396  			if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
   397  				continue
   398  			}
   399  			// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
   400  			// FIXME: why is this slow???
   401  			logrus.Debugf("Container %s waiting for network to be ready", c.Name)
   402  			select {
   403  			case <-daemon.discoveryWatcher.ReadyCh():
   404  			case <-time.After(60 * time.Second):
   405  			}
   406  			return
   407  		}
   408  	}
   409  }
   410  
   411  func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
   412  	return daemon.linkIndex.children(c)
   413  }
   414  
   415  // parents returns the names of the parent containers of the container
   416  // with the given name.
   417  func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
   418  	return daemon.linkIndex.parents(c)
   419  }
   420  
   421  func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
   422  	fullName := path.Join(parent.Name, alias)
   423  	if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil {
   424  		if err == registrar.ErrNameReserved {
   425  			logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
   426  			return nil
   427  		}
   428  		return err
   429  	}
   430  	daemon.linkIndex.link(parent, child, fullName)
   431  	return nil
   432  }
   433  
   434  // SetClusterProvider sets a component for querying the current cluster state.
   435  func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) {
   436  	daemon.clusterProvider = clusterProvider
   437  	daemon.netController.SetClusterProvider(clusterProvider)
   438  }
   439  
   440  // IsSwarmCompatible verifies if the current daemon
   441  // configuration is compatible with the swarm mode
   442  func (daemon *Daemon) IsSwarmCompatible() error {
   443  	if daemon.configStore == nil {
   444  		return nil
   445  	}
   446  	return daemon.configStore.isSwarmCompatible()
   447  }
   448  
   449  // NewDaemon sets up everything for the daemon to be able to service
   450  // requests from the webserver.
   451  func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) {
   452  	setDefaultMtu(config)
   453  
   454  	// Ensure that we have a correct root key limit for launching containers.
   455  	if err := ModifyRootKeyLimit(); err != nil {
   456  		logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err)
   457  	}
   458  
   459  	// Ensure we have compatible and valid configuration options
   460  	if err := verifyDaemonSettings(config); err != nil {
   461  		return nil, err
   462  	}
   463  
   464  	// Do we have a disabled network?
   465  	config.DisableBridge = isBridgeNetworkDisabled(config)
   466  
   467  	// Verify the platform is supported as a daemon
   468  	if !platformSupported {
   469  		return nil, errSystemNotSupported
   470  	}
   471  
   472  	// Validate platform-specific requirements
   473  	if err := checkSystem(); err != nil {
   474  		return nil, err
   475  	}
   476  
   477  	uidMaps, gidMaps, err := setupRemappedRoot(config)
   478  	if err != nil {
   479  		return nil, err
   480  	}
   481  	rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
   482  	if err != nil {
   483  		return nil, err
   484  	}
   485  
   486  	if err := setupDaemonProcess(config); err != nil {
   487  		return nil, err
   488  	}
   489  
   490  	// set up the tmpDir to use a canonical path
   491  	tmp, err := tempDir(config.Root, rootUID, rootGID)
   492  	if err != nil {
   493  		return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
   494  	}
   495  	realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
   496  	if err != nil {
   497  		return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
   498  	}
   499  	os.Setenv("TMPDIR", realTmp)
   500  
   501  	d := &Daemon{configStore: config}
   502  	// Ensure the daemon is properly shutdown if there is a failure during
   503  	// initialization
   504  	defer func() {
   505  		if err != nil {
   506  			if err := d.Shutdown(); err != nil {
   507  				logrus.Error(err)
   508  			}
   509  		}
   510  	}()
   511  
   512  	if err := d.setupSeccompProfile(); err != nil {
   513  		return nil, err
   514  	}
   515  
   516  	// Set the default isolation mode (only applicable on Windows)
   517  	if err := d.setDefaultIsolation(); err != nil {
   518  		return nil, fmt.Errorf("error setting default isolation mode: %v", err)
   519  	}
   520  
   521  	logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
   522  
   523  	if err := configureMaxThreads(config); err != nil {
   524  		logrus.Warnf("Failed to configure golang's threads limit: %v", err)
   525  	}
   526  
   527  	installDefaultAppArmorProfile()
   528  	daemonRepo := filepath.Join(config.Root, "containers")
   529  	if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
   530  		return nil, err
   531  	}
   532  
   533  	if runtime.GOOS == "windows" {
   534  		if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil && !os.IsExist(err) {
   535  			return nil, err
   536  		}
   537  	}
   538  
   539  	driverName := os.Getenv("DOCKER_DRIVER")
   540  	if driverName == "" {
   541  		driverName = config.GraphDriver
   542  	}
   543  
   544  	d.RegistryService = registryService
   545  	d.PluginStore = pluginstore.NewStore(config.Root)
   546  	// Plugin system initialization should happen before restore. Do not change order.
   547  	if err := d.pluginInit(config, containerdRemote); err != nil {
   548  		return nil, err
   549  	}
   550  
   551  	d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
   552  		StorePath:                 config.Root,
   553  		MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
   554  		GraphDriver:               driverName,
   555  		GraphDriverOptions:        config.GraphOptions,
   556  		UIDMaps:                   uidMaps,
   557  		GIDMaps:                   gidMaps,
   558  		PluginGetter:              d.PluginStore,
   559  	})
   560  	if err != nil {
   561  		return nil, err
   562  	}
   563  
   564  	graphDriver := d.layerStore.DriverName()
   565  	imageRoot := filepath.Join(config.Root, "image", graphDriver)
   566  
   567  	// Configure and validate the kernels security support
   568  	if err := configureKernelSecuritySupport(config, graphDriver); err != nil {
   569  		return nil, err
   570  	}
   571  
   572  	logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
   573  	d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads)
   574  	logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
   575  	d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads)
   576  
   577  	ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
   578  	if err != nil {
   579  		return nil, err
   580  	}
   581  
   582  	d.imageStore, err = image.NewImageStore(ifs, d.layerStore)
   583  	if err != nil {
   584  		return nil, err
   585  	}
   586  
   587  	// Configure the volumes driver
   588  	volStore, err := d.configureVolumes(rootUID, rootGID)
   589  	if err != nil {
   590  		return nil, err
   591  	}
   592  
   593  	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
   594  	if err != nil {
   595  		return nil, err
   596  	}
   597  
   598  	trustDir := filepath.Join(config.Root, "trust")
   599  
   600  	if err := system.MkdirAll(trustDir, 0700); err != nil {
   601  		return nil, err
   602  	}
   603  
   604  	distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
   605  	if err != nil {
   606  		return nil, err
   607  	}
   608  
   609  	eventsService := events.New()
   610  
   611  	referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json"))
   612  	if err != nil {
   613  		return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err)
   614  	}
   615  
   616  	migrationStart := time.Now()
   617  	if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil {
   618  		logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
   619  	}
   620  	logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
   621  
   622  	// Discovery is only enabled when the daemon is launched with an address to advertise.  When
   623  	// initialized, the daemon is registered and we can store the discovery backend as its read-only
   624  	if err := d.initDiscovery(config); err != nil {
   625  		return nil, err
   626  	}
   627  
   628  	sysInfo := sysinfo.New(false)
   629  	// Check if Devices cgroup is mounted, it is hard requirement for container security,
   630  	// on Linux.
   631  	if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled {
   632  		return nil, fmt.Errorf("Devices cgroup isn't mounted")
   633  	}
   634  
   635  	d.ID = trustKey.PublicKey().KeyID()
   636  	d.repository = daemonRepo
   637  	d.containers = container.NewMemoryStore()
   638  	d.execCommands = exec.NewStore()
   639  	d.referenceStore = referenceStore
   640  	d.distributionMetadataStore = distributionMetadataStore
   641  	d.trustKey = trustKey
   642  	d.idIndex = truncindex.NewTruncIndex([]string{})
   643  	d.statsCollector = d.newStatsCollector(1 * time.Second)
   644  	d.defaultLogConfig = containertypes.LogConfig{
   645  		Type:   config.LogConfig.Type,
   646  		Config: config.LogConfig.Config,
   647  	}
   648  	d.EventsService = eventsService
   649  	d.volumes = volStore
   650  	d.root = config.Root
   651  	d.uidMaps = uidMaps
   652  	d.gidMaps = gidMaps
   653  	d.seccompEnabled = sysInfo.Seccomp
   654  
   655  	d.nameIndex = registrar.NewRegistrar()
   656  	d.linkIndex = newLinkIndex()
   657  	d.containerdRemote = containerdRemote
   658  
   659  	go d.execCommandGC()
   660  
   661  	d.containerd, err = containerdRemote.Client(d)
   662  	if err != nil {
   663  		return nil, err
   664  	}
   665  
   666  	if err := d.restore(); err != nil {
   667  		return nil, err
   668  	}
   669  
   670  	// FIXME: this method never returns an error
   671  	info, _ := d.SystemInfo()
   672  
   673  	engineVersion.WithValues(
   674  		dockerversion.Version,
   675  		dockerversion.GitCommit,
   676  		info.Architecture,
   677  		info.Driver,
   678  		info.KernelVersion,
   679  		info.OperatingSystem,
   680  	).Set(1)
   681  	engineCpus.Set(float64(info.NCPU))
   682  	engineMemory.Set(float64(info.MemTotal))
   683  
   684  	// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
   685  	// on Windows to dump Go routine stacks
   686  	stackDumpDir := config.Root
   687  	if execRoot := config.GetExecRoot(); execRoot != "" {
   688  		stackDumpDir = execRoot
   689  	}
   690  	d.setupDumpStackTrap(stackDumpDir)
   691  
   692  	return d, nil
   693  }
   694  
   695  func (daemon *Daemon) shutdownContainer(c *container.Container) error {
   696  	stopTimeout := c.StopTimeout()
   697  	// TODO(windows): Handle docker restart with paused containers
   698  	if c.IsPaused() {
   699  		// To terminate a process in freezer cgroup, we should send
   700  		// SIGTERM to this process then unfreeze it, and the process will
   701  		// force to terminate immediately.
   702  		logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID)
   703  		sig, ok := signal.SignalMap["TERM"]
   704  		if !ok {
   705  			return fmt.Errorf("System does not support SIGTERM")
   706  		}
   707  		if err := daemon.kill(c, int(sig)); err != nil {
   708  			return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
   709  		}
   710  		if err := daemon.containerUnpause(c); err != nil {
   711  			return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
   712  		}
   713  		if _, err := c.WaitStop(time.Duration(stopTimeout) * time.Second); err != nil {
   714  			logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout)
   715  			sig, ok := signal.SignalMap["KILL"]
   716  			if !ok {
   717  				return fmt.Errorf("System does not support SIGKILL")
   718  			}
   719  			if err := daemon.kill(c, int(sig)); err != nil {
   720  				logrus.Errorf("Failed to SIGKILL container %s", c.ID)
   721  			}
   722  			c.WaitStop(-1 * time.Second)
   723  			return err
   724  		}
   725  	}
   726  	// If container failed to exit in stopTimeout seconds of SIGTERM, then using the force
   727  	if err := daemon.containerStop(c, stopTimeout); err != nil {
   728  		return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
   729  	}
   730  
   731  	c.WaitStop(-1 * time.Second)
   732  	return nil
   733  }
   734  
   735  // ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers,
   736  // and is limited by daemon's ShutdownTimeout.
   737  func (daemon *Daemon) ShutdownTimeout() int {
   738  	// By default we use daemon's ShutdownTimeout.
   739  	shutdownTimeout := daemon.configStore.ShutdownTimeout
   740  
   741  	graceTimeout := 5
   742  	if daemon.containers != nil {
   743  		for _, c := range daemon.containers.List() {
   744  			if shutdownTimeout >= 0 {
   745  				stopTimeout := c.StopTimeout()
   746  				if stopTimeout < 0 {
   747  					shutdownTimeout = -1
   748  				} else {
   749  					if stopTimeout+graceTimeout > shutdownTimeout {
   750  						shutdownTimeout = stopTimeout + graceTimeout
   751  					}
   752  				}
   753  			}
   754  		}
   755  	}
   756  	return shutdownTimeout
   757  }
   758  
   759  // Shutdown stops the daemon.
   760  func (daemon *Daemon) Shutdown() error {
   761  	daemon.shutdown = true
   762  	// Keep mounts and networking running on daemon shutdown if
   763  	// we are to keep containers running and restore them.
   764  
   765  	if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
   766  		// check if there are any running containers, if none we should do some cleanup
   767  		if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
   768  			return nil
   769  		}
   770  	}
   771  
   772  	if daemon.containers != nil {
   773  		logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout)
   774  		daemon.containers.ApplyAll(func(c *container.Container) {
   775  			if !c.IsRunning() {
   776  				return
   777  			}
   778  			logrus.Debugf("stopping %s", c.ID)
   779  			if err := daemon.shutdownContainer(c); err != nil {
   780  				logrus.Errorf("Stop container error: %v", err)
   781  				return
   782  			}
   783  			if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
   784  				daemon.cleanupMountsByID(mountid)
   785  			}
   786  			logrus.Debugf("container stopped %s", c.ID)
   787  		})
   788  	}
   789  
   790  	if daemon.layerStore != nil {
   791  		if err := daemon.layerStore.Cleanup(); err != nil {
   792  			logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
   793  		}
   794  	}
   795  
   796  	// Shutdown plugins after containers and layerstore. Don't change the order.
   797  	daemon.pluginShutdown()
   798  
   799  	// trigger libnetwork Stop only if it's initialized
   800  	if daemon.netController != nil {
   801  		daemon.netController.Stop()
   802  	}
   803  
   804  	if err := daemon.cleanupMounts(); err != nil {
   805  		return err
   806  	}
   807  
   808  	return nil
   809  }
   810  
   811  // Mount sets container.BaseFS
   812  // (is it not set coming in? why is it unset?)
   813  func (daemon *Daemon) Mount(container *container.Container) error {
   814      fmt.Println("daemon/daemon.go  Mount()")
   815  	dir, err := container.RWLayer.Mount(container.GetMountLabel())
   816  	if err != nil {
   817  		return err
   818  	}
   819  	logrus.Debugf("container mounted via layerStore: %v", dir)
   820  
   821      fmt.Println("daemon/daemon.go  container mounted via laystore: ", dir)
   822  
   823  	if container.BaseFS != dir {
   824  		// The mount path reported by the graph driver should always be trusted on Windows, since the
   825  		// volume path for a given mounted layer may change over time.  This should only be an error
   826  		// on non-Windows operating systems.
   827  		if container.BaseFS != "" && runtime.GOOS != "windows" {
   828  			daemon.Unmount(container)
   829  			return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
   830  				daemon.GraphDriverName(), container.ID, container.BaseFS, dir)
   831  		}
   832  	}
   833  	container.BaseFS = dir // TODO: combine these fields
   834  	return nil
   835  }
   836  
   837  // Unmount unsets the container base filesystem
   838  func (daemon *Daemon) Unmount(container *container.Container) error {
   839  	if err := container.RWLayer.Unmount(); err != nil {
   840  		logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
   841  		return err
   842  	}
   843  
   844  	return nil
   845  }
   846  
   847  // V4Subnets returns the IPv4 subnets of networks that are managed by Docker.
   848  func (daemon *Daemon) V4Subnets() []net.IPNet {
   849  	var subnets []net.IPNet
   850  
   851  	managedNetworks := daemon.netController.Networks()
   852  
   853  	for _, managedNetwork := range managedNetworks {
   854  		v4Infos, _ := managedNetwork.Info().IpamInfo()
   855  		for _, v4Info := range v4Infos {
   856  			if v4Info.IPAMData.Pool != nil {
   857  				subnets = append(subnets, *v4Info.IPAMData.Pool)
   858  			}
   859  		}
   860  	}
   861  
   862  	return subnets
   863  }
   864  
   865  // V6Subnets returns the IPv6 subnets of networks that are managed by Docker.
   866  func (daemon *Daemon) V6Subnets() []net.IPNet {
   867  	var subnets []net.IPNet
   868  
   869  	managedNetworks := daemon.netController.Networks()
   870  
   871  	for _, managedNetwork := range managedNetworks {
   872  		_, v6Infos := managedNetwork.Info().IpamInfo()
   873  		for _, v6Info := range v6Infos {
   874  			if v6Info.IPAMData.Pool != nil {
   875  				subnets = append(subnets, *v6Info.IPAMData.Pool)
   876  			}
   877  		}
   878  	}
   879  
   880  	return subnets
   881  }
   882  
   883  func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) {
   884  	progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false)
   885  	operationCancelled := false
   886  
   887  	for prog := range progressChan {
   888  		if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled {
   889  			// don't log broken pipe errors as this is the normal case when a client aborts
   890  			if isBrokenPipe(err) {
   891  				logrus.Info("Pull session cancelled")
   892  			} else {
   893  				logrus.Errorf("error writing progress to client: %v", err)
   894  			}
   895  			cancelFunc()
   896  			operationCancelled = true
   897  			// Don't return, because we need to continue draining
   898  			// progressChan until it's closed to avoid a deadlock.
   899  		}
   900  	}
   901  }
   902  
   903  func isBrokenPipe(e error) bool {
   904  	if netErr, ok := e.(*net.OpError); ok {
   905  		e = netErr.Err
   906  		if sysErr, ok := netErr.Err.(*os.SyscallError); ok {
   907  			e = sysErr.Err
   908  		}
   909  	}
   910  	return e == syscall.EPIPE
   911  }
   912  
   913  // GraphDriverName returns the name of the graph driver used by the layer.Store
   914  func (daemon *Daemon) GraphDriverName() string {
   915  	return daemon.layerStore.DriverName()
   916  }
   917  
   918  // GetUIDGIDMaps returns the current daemon's user namespace settings
   919  // for the full uid and gid maps which will be applied to containers
   920  // started in this instance.
   921  func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) {
   922  	return daemon.uidMaps, daemon.gidMaps
   923  }
   924  
   925  // GetRemappedUIDGID returns the current daemon's uid and gid values
   926  // if user namespaces are in use for this daemon instance.  If not
   927  // this function will return "real" root values of 0, 0.
   928  func (daemon *Daemon) GetRemappedUIDGID() (int, int) {
   929  	uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps)
   930  	return uid, gid
   931  }
   932  
   933  // tempDir returns the default directory to use for temporary files.
   934  func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
   935  	var tmpDir string
   936  	if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
   937  		tmpDir = filepath.Join(rootDir, "tmp")
   938  	}
   939  	return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID)
   940  }
   941  
   942  func (daemon *Daemon) setupInitLayer(initPath string) error {
   943  	rootUID, rootGID := daemon.GetRemappedUIDGID()
   944  	return setupInitLayer(initPath, rootUID, rootGID)
   945  }
   946  
   947  func setDefaultMtu(config *Config) {
   948  	// do nothing if the config does not have the default 0 value.
   949  	if config.Mtu != 0 {
   950  		return
   951  	}
   952  	config.Mtu = defaultNetworkMtu
   953  }
   954  
   955  func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) {
   956  	volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID)
   957  	if err != nil {
   958  		return nil, err
   959  	}
   960  
   961  	volumedrivers.RegisterPluginGetter(daemon.PluginStore)
   962  
   963  	if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) {
   964  		return nil, fmt.Errorf("local volume driver could not be registered")
   965  	}
   966  	return store.New(daemon.configStore.Root)
   967  }
   968  
   969  // IsShuttingDown tells whether the daemon is shutting down or not
   970  func (daemon *Daemon) IsShuttingDown() bool {
   971  	return daemon.shutdown
   972  }
   973  
   974  // initDiscovery initializes the discovery watcher for this daemon.
   975  func (daemon *Daemon) initDiscovery(config *Config) error {
   976  	advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise)
   977  	if err != nil {
   978  		if err == errDiscoveryDisabled {
   979  			return nil
   980  		}
   981  		return err
   982  	}
   983  
   984  	config.ClusterAdvertise = advertise
   985  	discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts)
   986  	if err != nil {
   987  		return fmt.Errorf("discovery initialization failed (%v)", err)
   988  	}
   989  
   990  	daemon.discoveryWatcher = discoveryWatcher
   991  	return nil
   992  }
   993  
   994  // Reload reads configuration changes and modifies the
   995  // daemon according to those changes.
   996  // These are the settings that Reload changes:
   997  // - Daemon labels.
   998  // - Daemon debug log level.
   999  // - Daemon insecure registries.
  1000  // - Daemon max concurrent downloads
  1001  // - Daemon max concurrent uploads
  1002  // - Cluster discovery (reconfigure and restart).
  1003  // - Daemon live restore
  1004  // - Daemon shutdown timeout (in seconds).
  1005  func (daemon *Daemon) Reload(config *Config) (err error) {
  1006  
  1007  	daemon.configStore.reloadLock.Lock()
  1008  
  1009  	attributes := daemon.platformReload(config)
  1010  
  1011  	defer func() {
  1012  		// we're unlocking here, because
  1013  		// LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes()
  1014  		// holds that lock too.
  1015  		daemon.configStore.reloadLock.Unlock()
  1016  		if err == nil {
  1017  			daemon.LogDaemonEventWithAttributes("reload", attributes)
  1018  		}
  1019  	}()
  1020  
  1021  	if err := daemon.reloadClusterDiscovery(config); err != nil {
  1022  		return err
  1023  	}
  1024  
  1025  	if config.IsValueSet("labels") {
  1026  		daemon.configStore.Labels = config.Labels
  1027  	}
  1028  	if config.IsValueSet("debug") {
  1029  		daemon.configStore.Debug = config.Debug
  1030  	}
  1031  	if config.IsValueSet("insecure-registries") {
  1032  		daemon.configStore.InsecureRegistries = config.InsecureRegistries
  1033  		if err := daemon.RegistryService.LoadInsecureRegistries(config.InsecureRegistries); err != nil {
  1034  			return err
  1035  		}
  1036  	}
  1037  	if config.IsValueSet("live-restore") {
  1038  		daemon.configStore.LiveRestoreEnabled = config.LiveRestoreEnabled
  1039  		if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestoreEnabled)); err != nil {
  1040  			return err
  1041  		}
  1042  	}
  1043  
  1044  	// If no value is set for max-concurrent-downloads we assume it is the default value
  1045  	// We always "reset" as the cost is lightweight and easy to maintain.
  1046  	if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil {
  1047  		*daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads
  1048  	} else {
  1049  		maxConcurrentDownloads := defaultMaxConcurrentDownloads
  1050  		daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads
  1051  	}
  1052  	logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads)
  1053  	if daemon.downloadManager != nil {
  1054  		daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads)
  1055  	}
  1056  
  1057  	// If no value is set for max-concurrent-upload we assume it is the default value
  1058  	// We always "reset" as the cost is lightweight and easy to maintain.
  1059  	if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil {
  1060  		*daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads
  1061  	} else {
  1062  		maxConcurrentUploads := defaultMaxConcurrentUploads
  1063  		daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads
  1064  	}
  1065  	logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads)
  1066  	if daemon.uploadManager != nil {
  1067  		daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads)
  1068  	}
  1069  
  1070  	if config.IsValueSet("shutdown-timeout") {
  1071  		daemon.configStore.ShutdownTimeout = config.ShutdownTimeout
  1072  		logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout)
  1073  	}
  1074  
  1075  	// We emit daemon reload event here with updatable configurations
  1076  	attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug)
  1077  	attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled)
  1078  
  1079  	if daemon.configStore.InsecureRegistries != nil {
  1080  		insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries)
  1081  		if err != nil {
  1082  			return err
  1083  		}
  1084  		attributes["insecure-registries"] = string(insecureRegistries)
  1085  	} else {
  1086  		attributes["insecure-registries"] = "[]"
  1087  	}
  1088  
  1089  	attributes["cluster-store"] = daemon.configStore.ClusterStore
  1090  	if daemon.configStore.ClusterOpts != nil {
  1091  		opts, err := json.Marshal(daemon.configStore.ClusterOpts)
  1092  		if err != nil {
  1093  			return err
  1094  		}
  1095  		attributes["cluster-store-opts"] = string(opts)
  1096  	} else {
  1097  		attributes["cluster-store-opts"] = "{}"
  1098  	}
  1099  	attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise
  1100  
  1101  	if daemon.configStore.Labels != nil {
  1102  		labels, err := json.Marshal(daemon.configStore.Labels)
  1103  		if err != nil {
  1104  			return err
  1105  		}
  1106  		attributes["labels"] = string(labels)
  1107  	} else {
  1108  		attributes["labels"] = "[]"
  1109  	}
  1110  
  1111  	attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads)
  1112  	attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads)
  1113  	attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout)
  1114  
  1115  	return nil
  1116  }
  1117  
  1118  func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
  1119  	var err error
  1120  	newAdvertise := daemon.configStore.ClusterAdvertise
  1121  	newClusterStore := daemon.configStore.ClusterStore
  1122  	if config.IsValueSet("cluster-advertise") {
  1123  		if config.IsValueSet("cluster-store") {
  1124  			newClusterStore = config.ClusterStore
  1125  		}
  1126  		newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise)
  1127  		if err != nil && err != errDiscoveryDisabled {
  1128  			return err
  1129  		}
  1130  	}
  1131  
  1132  	if daemon.clusterProvider != nil {
  1133  		if err := config.isSwarmCompatible(); err != nil {
  1134  			return err
  1135  		}
  1136  	}
  1137  
  1138  	// check discovery modifications
  1139  	if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) {
  1140  		return nil
  1141  	}
  1142  
  1143  	// enable discovery for the first time if it was not previously enabled
  1144  	if daemon.discoveryWatcher == nil {
  1145  		discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts)
  1146  		if err != nil {
  1147  			return fmt.Errorf("discovery initialization failed (%v)", err)
  1148  		}
  1149  		daemon.discoveryWatcher = discoveryWatcher
  1150  	} else {
  1151  		if err == errDiscoveryDisabled {
  1152  			// disable discovery if it was previously enabled and it's disabled now
  1153  			daemon.discoveryWatcher.Stop()
  1154  		} else {
  1155  			// reload discovery
  1156  			if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil {
  1157  				return err
  1158  			}
  1159  		}
  1160  	}
  1161  
  1162  	daemon.configStore.ClusterStore = newClusterStore
  1163  	daemon.configStore.ClusterOpts = config.ClusterOpts
  1164  	daemon.configStore.ClusterAdvertise = newAdvertise
  1165  
  1166  	if daemon.netController == nil {
  1167  		return nil
  1168  	}
  1169  	netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil)
  1170  	if err != nil {
  1171  		logrus.WithError(err).Warnf("failed to get options with network controller")
  1172  		return nil
  1173  	}
  1174  	err = daemon.netController.ReloadConfiguration(netOptions...)
  1175  	if err != nil {
  1176  		logrus.Warnf("Failed to reload configuration with network controller: %v", err)
  1177  	}
  1178  
  1179  	return nil
  1180  }
  1181  
  1182  func isBridgeNetworkDisabled(config *Config) bool {
  1183  	return config.bridgeConfig.Iface == disableNetworkBridge
  1184  }
  1185  
  1186  func (daemon *Daemon) networkOptions(dconfig *Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
  1187  	options := []nwconfig.Option{}
  1188  	if dconfig == nil {
  1189  		return options, nil
  1190  	}
  1191  
  1192  	options = append(options, nwconfig.OptionDataDir(dconfig.Root))
  1193  	options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
  1194  
  1195  	dd := runconfig.DefaultDaemonNetworkMode()
  1196  	dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
  1197  	options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
  1198  	options = append(options, nwconfig.OptionDefaultNetwork(dn))
  1199  
  1200  	if strings.TrimSpace(dconfig.ClusterStore) != "" {
  1201  		kv := strings.Split(dconfig.ClusterStore, "://")
  1202  		if len(kv) != 2 {
  1203  			return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
  1204  		}
  1205  		options = append(options, nwconfig.OptionKVProvider(kv[0]))
  1206  		options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
  1207  	}
  1208  	if len(dconfig.ClusterOpts) > 0 {
  1209  		options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
  1210  	}
  1211  
  1212  	if daemon.discoveryWatcher != nil {
  1213  		options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
  1214  	}
  1215  
  1216  	if dconfig.ClusterAdvertise != "" {
  1217  		options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
  1218  	}
  1219  
  1220  	options = append(options, nwconfig.OptionLabels(dconfig.Labels))
  1221  	options = append(options, driverOptions(dconfig)...)
  1222  
  1223  	if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 {
  1224  		options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
  1225  	}
  1226  
  1227  	if pg != nil {
  1228  		options = append(options, nwconfig.OptionPluginGetter(pg))
  1229  	}
  1230  
  1231  	return options, nil
  1232  }
  1233  
  1234  func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry {
  1235  	out := make([]types.BlkioStatEntry, len(entries))
  1236  	for i, re := range entries {
  1237  		out[i] = types.BlkioStatEntry{
  1238  			Major: re.Major,
  1239  			Minor: re.Minor,
  1240  			Op:    re.Op,
  1241  			Value: re.Value,
  1242  		}
  1243  	}
  1244  	return out
  1245  }
  1246  
  1247  // GetCluster returns the cluster
  1248  func (daemon *Daemon) GetCluster() Cluster {
  1249  	return daemon.cluster
  1250  }
  1251  
  1252  // SetCluster sets the cluster
  1253  func (daemon *Daemon) SetCluster(cluster Cluster) {
  1254  	daemon.cluster = cluster
  1255  }
  1256  
  1257  func (daemon *Daemon) pluginInit(cfg *Config, remote libcontainerd.Remote) error {
  1258  	return plugin.Init(cfg.Root, daemon.PluginStore, remote, daemon.RegistryService, cfg.LiveRestoreEnabled, daemon.LogPluginEvent)
  1259  }
  1260  
  1261  func (daemon *Daemon) pluginShutdown() {
  1262  	manager := plugin.GetManager()
  1263  	// Check for a valid manager object. In error conditions, daemon init can fail
  1264  	// and shutdown called, before plugin manager is initialized.
  1265  	if manager != nil {
  1266  		manager.Shutdown()
  1267  	}
  1268  }
  1269  
  1270  // CreateDaemonRoot creates the root for the daemon
  1271  func CreateDaemonRoot(config *Config) error {
  1272  	// get the canonical path to the Docker root directory
  1273  	var realRoot string
  1274  	if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
  1275  		realRoot = config.Root
  1276  	} else {
  1277  		realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
  1278  		if err != nil {
  1279  			return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
  1280  		}
  1281  	}
  1282  
  1283  	uidMaps, gidMaps, err := setupRemappedRoot(config)
  1284  	if err != nil {
  1285  		return err
  1286  	}
  1287  	rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
  1288  	if err != nil {
  1289  		return err
  1290  	}
  1291  
  1292  	if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil {
  1293  		return err
  1294  	}
  1295  
  1296  	return nil
  1297  }