github.com/endophage/docker@v1.4.2-0.20161027011718-242853499895/daemon/daemon.go (about)

     1  // Package daemon exposes the functions that occur on the host server
     2  // that the Docker daemon is running.
     3  //
     4  // In implementing the various functions of the daemon, there is often
     5  // a method-specific struct for configuring the runtime behavior.
     6  package daemon
     7  
     8  import (
     9  	"encoding/json"
    10  	"fmt"
    11  	"io"
    12  	"io/ioutil"
    13  	"net"
    14  	"os"
    15  	"path"
    16  	"path/filepath"
    17  	"runtime"
    18  	"strings"
    19  	"sync"
    20  	"syscall"
    21  	"time"
    22  
    23  	"github.com/Sirupsen/logrus"
    24  	containerd "github.com/docker/containerd/api/grpc/types"
    25  	"github.com/docker/docker/api"
    26  	"github.com/docker/docker/api/types"
    27  	containertypes "github.com/docker/docker/api/types/container"
    28  	"github.com/docker/docker/container"
    29  	"github.com/docker/docker/daemon/events"
    30  	"github.com/docker/docker/daemon/exec"
    31  	"github.com/docker/libnetwork/cluster"
    32  	// register graph drivers
    33  	_ "github.com/docker/docker/daemon/graphdriver/register"
    34  	dmetadata "github.com/docker/docker/distribution/metadata"
    35  	"github.com/docker/docker/distribution/xfer"
    36  	"github.com/docker/docker/image"
    37  	"github.com/docker/docker/layer"
    38  	"github.com/docker/docker/libcontainerd"
    39  	"github.com/docker/docker/migrate/v1"
    40  	"github.com/docker/docker/pkg/fileutils"
    41  	"github.com/docker/docker/pkg/graphdb"
    42  	"github.com/docker/docker/pkg/idtools"
    43  	"github.com/docker/docker/pkg/plugingetter"
    44  	"github.com/docker/docker/pkg/progress"
    45  	"github.com/docker/docker/pkg/registrar"
    46  	"github.com/docker/docker/pkg/signal"
    47  	"github.com/docker/docker/pkg/streamformatter"
    48  	"github.com/docker/docker/pkg/sysinfo"
    49  	"github.com/docker/docker/pkg/system"
    50  	"github.com/docker/docker/pkg/truncindex"
    51  	pluginstore "github.com/docker/docker/plugin/store"
    52  	"github.com/docker/docker/reference"
    53  	"github.com/docker/docker/registry"
    54  	"github.com/docker/docker/runconfig"
    55  	volumedrivers "github.com/docker/docker/volume/drivers"
    56  	"github.com/docker/docker/volume/local"
    57  	"github.com/docker/docker/volume/store"
    58  	"github.com/docker/libnetwork"
    59  	nwconfig "github.com/docker/libnetwork/config"
    60  	"github.com/docker/libtrust"
    61  )
    62  
    63  var (
    64  	// DefaultRuntimeBinary is the default runtime to be used by
    65  	// containerd if none is specified
    66  	DefaultRuntimeBinary = "docker-runc"
    67  
    68  	errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.")
    69  )
    70  
    71  // Daemon holds information about the Docker daemon.
    72  type Daemon struct {
    73  	ID                        string
    74  	repository                string
    75  	containers                container.Store
    76  	execCommands              *exec.Store
    77  	referenceStore            reference.Store
    78  	downloadManager           *xfer.LayerDownloadManager
    79  	uploadManager             *xfer.LayerUploadManager
    80  	distributionMetadataStore dmetadata.Store
    81  	trustKey                  libtrust.PrivateKey
    82  	idIndex                   *truncindex.TruncIndex
    83  	configStore               *Config
    84  	statsCollector            *statsCollector
    85  	defaultLogConfig          containertypes.LogConfig
    86  	RegistryService           registry.Service
    87  	EventsService             *events.Events
    88  	netController             libnetwork.NetworkController
    89  	volumes                   *store.VolumeStore
    90  	discoveryWatcher          discoveryReloader
    91  	root                      string
    92  	seccompEnabled            bool
    93  	shutdown                  bool
    94  	uidMaps                   []idtools.IDMap
    95  	gidMaps                   []idtools.IDMap
    96  	layerStore                layer.Store
    97  	imageStore                image.Store
    98  	PluginStore               *pluginstore.Store
    99  	nameIndex                 *registrar.Registrar
   100  	linkIndex                 *linkIndex
   101  	containerd                libcontainerd.Client
   102  	containerdRemote          libcontainerd.Remote
   103  	defaultIsolation          containertypes.Isolation // Default isolation mode on Windows
   104  	clusterProvider           cluster.Provider
   105  	cluster                   Cluster
   106  }
   107  
   108  // HasExperimental returns whether the experimental features of the daemon are enabled or not
   109  func (daemon *Daemon) HasExperimental() bool {
   110  	if daemon.configStore != nil && daemon.configStore.Experimental {
   111  		return true
   112  	}
   113  	return false
   114  }
   115  
   116  func (daemon *Daemon) restore() error {
   117  	var (
   118  		currentDriver = daemon.GraphDriverName()
   119  		containers    = make(map[string]*container.Container)
   120  	)
   121  
   122  	logrus.Info("Loading containers: start.")
   123  
   124  	dir, err := ioutil.ReadDir(daemon.repository)
   125  	if err != nil {
   126  		return err
   127  	}
   128  
   129  	for _, v := range dir {
   130  		id := v.Name()
   131  		container, err := daemon.load(id)
   132  		if err != nil {
   133  			logrus.Errorf("Failed to load container %v: %v", id, err)
   134  			continue
   135  		}
   136  
   137  		// Ignore the container if it does not support the current driver being used by the graph
   138  		if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
   139  			rwlayer, err := daemon.layerStore.GetRWLayer(container.ID)
   140  			if err != nil {
   141  				logrus.Errorf("Failed to load container mount %v: %v", id, err)
   142  				continue
   143  			}
   144  			container.RWLayer = rwlayer
   145  			logrus.Debugf("Loaded container %v", container.ID)
   146  
   147  			containers[container.ID] = container
   148  		} else {
   149  			logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
   150  		}
   151  	}
   152  
   153  	var migrateLegacyLinks bool
   154  	removeContainers := make(map[string]*container.Container)
   155  	restartContainers := make(map[*container.Container]chan struct{})
   156  	activeSandboxes := make(map[string]interface{})
   157  	for id, c := range containers {
   158  		if err := daemon.registerName(c); err != nil {
   159  			logrus.Errorf("Failed to register container %s: %s", c.ID, err)
   160  			delete(containers, id)
   161  			continue
   162  		}
   163  		if err := daemon.Register(c); err != nil {
   164  			logrus.Errorf("Failed to register container %s: %s", c.ID, err)
   165  			delete(containers, id)
   166  			continue
   167  		}
   168  
   169  		// verify that all volumes valid and have been migrated from the pre-1.7 layout
   170  		if err := daemon.verifyVolumesInfo(c); err != nil {
   171  			// don't skip the container due to error
   172  			logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err)
   173  		}
   174  
   175  		// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
   176  		// We should rewrite it to use the daemon defaults.
   177  		// Fixes https://github.com/docker/docker/issues/22536
   178  		if c.HostConfig.LogConfig.Type == "" {
   179  			if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
   180  				logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
   181  				continue
   182  			}
   183  		}
   184  	}
   185  	var wg sync.WaitGroup
   186  	var mapLock sync.Mutex
   187  	for _, c := range containers {
   188  		wg.Add(1)
   189  		go func(c *container.Container) {
   190  			defer wg.Done()
   191  			if err := backportMountSpec(c); err != nil {
   192  				logrus.Errorf("Failed to migrate old mounts to use new spec format")
   193  			}
   194  
   195  			if c.IsRunning() || c.IsPaused() {
   196  				c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
   197  				if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil {
   198  					logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err)
   199  					return
   200  				}
   201  				c.ResetRestartManager(false)
   202  				if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
   203  					options, err := daemon.buildSandboxOptions(c)
   204  					if err != nil {
   205  						logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
   206  					}
   207  					mapLock.Lock()
   208  					activeSandboxes[c.NetworkSettings.SandboxID] = options
   209  					mapLock.Unlock()
   210  				}
   211  
   212  			}
   213  			// fixme: only if not running
   214  			// get list of containers we need to restart
   215  			if !c.IsRunning() && !c.IsPaused() {
   216  				// Do not autostart containers which
   217  				// has endpoints in a swarm scope
   218  				// network yet since the cluster is
   219  				// not initialized yet. We will start
   220  				// it after the cluster is
   221  				// initialized.
   222  				if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
   223  					mapLock.Lock()
   224  					restartContainers[c] = make(chan struct{})
   225  					mapLock.Unlock()
   226  				} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
   227  					mapLock.Lock()
   228  					removeContainers[c.ID] = c
   229  					mapLock.Unlock()
   230  				}
   231  			}
   232  
   233  			if c.RemovalInProgress {
   234  				// We probably crashed in the middle of a removal, reset
   235  				// the flag.
   236  				//
   237  				// We DO NOT remove the container here as we do not
   238  				// know if the user had requested for either the
   239  				// associated volumes, network links or both to also
   240  				// be removed. So we put the container in the "dead"
   241  				// state and leave further processing up to them.
   242  				logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
   243  				c.ResetRemovalInProgress()
   244  				c.SetDead()
   245  				c.ToDisk()
   246  			}
   247  
   248  			// if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated
   249  			if c.HostConfig != nil && c.HostConfig.Links == nil {
   250  				migrateLegacyLinks = true
   251  			}
   252  		}(c)
   253  	}
   254  	wg.Wait()
   255  	daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
   256  	if err != nil {
   257  		return fmt.Errorf("Error initializing network controller: %v", err)
   258  	}
   259  
   260  	// migrate any legacy links from sqlite
   261  	linkdbFile := filepath.Join(daemon.root, "linkgraph.db")
   262  	var legacyLinkDB *graphdb.Database
   263  	if migrateLegacyLinks {
   264  		legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile)
   265  		if err != nil {
   266  			return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err)
   267  		}
   268  		defer legacyLinkDB.Close()
   269  	}
   270  
   271  	// Now that all the containers are registered, register the links
   272  	for _, c := range containers {
   273  		if migrateLegacyLinks {
   274  			if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil {
   275  				return err
   276  			}
   277  		}
   278  		if err := daemon.registerLinks(c, c.HostConfig); err != nil {
   279  			logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
   280  		}
   281  	}
   282  
   283  	group := sync.WaitGroup{}
   284  	for c, notifier := range restartContainers {
   285  		group.Add(1)
   286  
   287  		go func(c *container.Container, chNotify chan struct{}) {
   288  			defer group.Done()
   289  
   290  			logrus.Debugf("Starting container %s", c.ID)
   291  
   292  			// ignore errors here as this is a best effort to wait for children to be
   293  			//   running before we try to start the container
   294  			children := daemon.children(c)
   295  			timeout := time.After(5 * time.Second)
   296  			for _, child := range children {
   297  				if notifier, exists := restartContainers[child]; exists {
   298  					select {
   299  					case <-notifier:
   300  					case <-timeout:
   301  					}
   302  				}
   303  			}
   304  
   305  			// Make sure networks are available before starting
   306  			daemon.waitForNetworks(c)
   307  			if err := daemon.containerStart(c, "", true); err != nil {
   308  				logrus.Errorf("Failed to start container %s: %s", c.ID, err)
   309  			}
   310  			close(chNotify)
   311  		}(c, notifier)
   312  
   313  	}
   314  	group.Wait()
   315  
   316  	removeGroup := sync.WaitGroup{}
   317  	for id := range removeContainers {
   318  		removeGroup.Add(1)
   319  		go func(cid string) {
   320  			if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
   321  				logrus.Errorf("Failed to remove container %s: %s", cid, err)
   322  			}
   323  			removeGroup.Done()
   324  		}(id)
   325  	}
   326  	removeGroup.Wait()
   327  
   328  	// any containers that were started above would already have had this done,
   329  	// however we need to now prepare the mountpoints for the rest of the containers as well.
   330  	// This shouldn't cause any issue running on the containers that already had this run.
   331  	// This must be run after any containers with a restart policy so that containerized plugins
   332  	// can have a chance to be running before we try to initialize them.
   333  	for _, c := range containers {
   334  		// if the container has restart policy, do not
   335  		// prepare the mountpoints since it has been done on restarting.
   336  		// This is to speed up the daemon start when a restart container
   337  		// has a volume and the volume dirver is not available.
   338  		if _, ok := restartContainers[c]; ok {
   339  			continue
   340  		} else if _, ok := removeContainers[c.ID]; ok {
   341  			// container is automatically removed, skip it.
   342  			continue
   343  		}
   344  
   345  		group.Add(1)
   346  		go func(c *container.Container) {
   347  			defer group.Done()
   348  			if err := daemon.prepareMountPoints(c); err != nil {
   349  				logrus.Error(err)
   350  			}
   351  		}(c)
   352  	}
   353  
   354  	group.Wait()
   355  
   356  	logrus.Info("Loading containers: done.")
   357  
   358  	return nil
   359  }
   360  
   361  // RestartSwarmContainers restarts any autostart container which has a
   362  // swarm endpoint.
   363  func (daemon *Daemon) RestartSwarmContainers() {
   364  	group := sync.WaitGroup{}
   365  	for _, c := range daemon.List() {
   366  		if !c.IsRunning() && !c.IsPaused() {
   367  			// Autostart all the containers which has a
   368  			// swarm endpoint now that the cluster is
   369  			// initialized.
   370  			if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint {
   371  				group.Add(1)
   372  				go func(c *container.Container) {
   373  					defer group.Done()
   374  					if err := daemon.containerStart(c, "", true); err != nil {
   375  						logrus.Error(err)
   376  					}
   377  				}(c)
   378  			}
   379  		}
   380  
   381  	}
   382  	group.Wait()
   383  }
   384  
   385  // waitForNetworks is used during daemon initialization when starting up containers
   386  // It ensures that all of a container's networks are available before the daemon tries to start the container.
   387  // In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
   388  func (daemon *Daemon) waitForNetworks(c *container.Container) {
   389  	if daemon.discoveryWatcher == nil {
   390  		return
   391  	}
   392  	// Make sure if the container has a network that requires discovery that the discovery service is available before starting
   393  	for netName := range c.NetworkSettings.Networks {
   394  		// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
   395  		// Most likely this is because the K/V store used for discovery is in a container and needs to be started
   396  		if _, err := daemon.netController.NetworkByName(netName); err != nil {
   397  			if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
   398  				continue
   399  			}
   400  			// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
   401  			// FIXME: why is this slow???
   402  			logrus.Debugf("Container %s waiting for network to be ready", c.Name)
   403  			select {
   404  			case <-daemon.discoveryWatcher.ReadyCh():
   405  			case <-time.After(60 * time.Second):
   406  			}
   407  			return
   408  		}
   409  	}
   410  }
   411  
   412  func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
   413  	return daemon.linkIndex.children(c)
   414  }
   415  
   416  // parents returns the names of the parent containers of the container
   417  // with the given name.
   418  func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
   419  	return daemon.linkIndex.parents(c)
   420  }
   421  
   422  func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
   423  	fullName := path.Join(parent.Name, alias)
   424  	if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil {
   425  		if err == registrar.ErrNameReserved {
   426  			logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
   427  			return nil
   428  		}
   429  		return err
   430  	}
   431  	daemon.linkIndex.link(parent, child, fullName)
   432  	return nil
   433  }
   434  
   435  // SetClusterProvider sets a component for querying the current cluster state.
   436  func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) {
   437  	daemon.clusterProvider = clusterProvider
   438  	daemon.netController.SetClusterProvider(clusterProvider)
   439  }
   440  
   441  // IsSwarmCompatible verifies if the current daemon
   442  // configuration is compatible with the swarm mode
   443  func (daemon *Daemon) IsSwarmCompatible() error {
   444  	if daemon.configStore == nil {
   445  		return nil
   446  	}
   447  	return daemon.configStore.isSwarmCompatible()
   448  }
   449  
   450  // NewDaemon sets up everything for the daemon to be able to service
   451  // requests from the webserver.
   452  func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) {
   453  	setDefaultMtu(config)
   454  
   455  	// Ensure that we have a correct root key limit for launching containers.
   456  	if err := ModifyRootKeyLimit(); err != nil {
   457  		logrus.Warnf("unable to modify root key limit, number of containers could be limitied by this quota: %v", err)
   458  	}
   459  
   460  	// Ensure we have compatible and valid configuration options
   461  	if err := verifyDaemonSettings(config); err != nil {
   462  		return nil, err
   463  	}
   464  
   465  	// Do we have a disabled network?
   466  	config.DisableBridge = isBridgeNetworkDisabled(config)
   467  
   468  	// Verify the platform is supported as a daemon
   469  	if !platformSupported {
   470  		return nil, errSystemNotSupported
   471  	}
   472  
   473  	// Validate platform-specific requirements
   474  	if err := checkSystem(); err != nil {
   475  		return nil, err
   476  	}
   477  
   478  	// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
   479  	// on Windows to dump Go routine stacks
   480  	setupDumpStackTrap(config.Root)
   481  
   482  	uidMaps, gidMaps, err := setupRemappedRoot(config)
   483  	if err != nil {
   484  		return nil, err
   485  	}
   486  	rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
   487  	if err != nil {
   488  		return nil, err
   489  	}
   490  
   491  	// get the canonical path to the Docker root directory
   492  	var realRoot string
   493  	if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
   494  		realRoot = config.Root
   495  	} else {
   496  		realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
   497  		if err != nil {
   498  			return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
   499  		}
   500  	}
   501  
   502  	if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil {
   503  		return nil, err
   504  	}
   505  
   506  	if err := setupDaemonProcess(config); err != nil {
   507  		return nil, err
   508  	}
   509  
   510  	// set up the tmpDir to use a canonical path
   511  	tmp, err := tempDir(config.Root, rootUID, rootGID)
   512  	if err != nil {
   513  		return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
   514  	}
   515  	realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
   516  	if err != nil {
   517  		return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
   518  	}
   519  	os.Setenv("TMPDIR", realTmp)
   520  
   521  	d := &Daemon{configStore: config}
   522  	// Ensure the daemon is properly shutdown if there is a failure during
   523  	// initialization
   524  	defer func() {
   525  		if err != nil {
   526  			if err := d.Shutdown(); err != nil {
   527  				logrus.Error(err)
   528  			}
   529  		}
   530  	}()
   531  
   532  	// Set the default isolation mode (only applicable on Windows)
   533  	if err := d.setDefaultIsolation(); err != nil {
   534  		return nil, fmt.Errorf("error setting default isolation mode: %v", err)
   535  	}
   536  
   537  	logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
   538  
   539  	if err := configureMaxThreads(config); err != nil {
   540  		logrus.Warnf("Failed to configure golang's threads limit: %v", err)
   541  	}
   542  
   543  	installDefaultAppArmorProfile()
   544  	daemonRepo := filepath.Join(config.Root, "containers")
   545  	if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
   546  		return nil, err
   547  	}
   548  
   549  	if runtime.GOOS == "windows" {
   550  		if err := idtools.MkdirAllAs(filepath.Join(config.Root, "credentialspecs"), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
   551  			return nil, err
   552  		}
   553  	}
   554  
   555  	driverName := os.Getenv("DOCKER_DRIVER")
   556  	if driverName == "" {
   557  		driverName = config.GraphDriver
   558  	}
   559  
   560  	d.PluginStore = pluginstore.NewStore(config.Root)
   561  
   562  	d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
   563  		StorePath:                 config.Root,
   564  		MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
   565  		GraphDriver:               driverName,
   566  		GraphDriverOptions:        config.GraphOptions,
   567  		UIDMaps:                   uidMaps,
   568  		GIDMaps:                   gidMaps,
   569  		PluginGetter:              d.PluginStore,
   570  	})
   571  	if err != nil {
   572  		return nil, err
   573  	}
   574  
   575  	graphDriver := d.layerStore.DriverName()
   576  	imageRoot := filepath.Join(config.Root, "image", graphDriver)
   577  
   578  	// Configure and validate the kernels security support
   579  	if err := configureKernelSecuritySupport(config, graphDriver); err != nil {
   580  		return nil, err
   581  	}
   582  
   583  	logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
   584  	d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads)
   585  	logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
   586  	d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads)
   587  
   588  	ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
   589  	if err != nil {
   590  		return nil, err
   591  	}
   592  
   593  	d.imageStore, err = image.NewImageStore(ifs, d.layerStore)
   594  	if err != nil {
   595  		return nil, err
   596  	}
   597  
   598  	// Configure the volumes driver
   599  	volStore, err := d.configureVolumes(rootUID, rootGID)
   600  	if err != nil {
   601  		return nil, err
   602  	}
   603  
   604  	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
   605  	if err != nil {
   606  		return nil, err
   607  	}
   608  
   609  	trustDir := filepath.Join(config.Root, "trust")
   610  
   611  	if err := system.MkdirAll(trustDir, 0700); err != nil {
   612  		return nil, err
   613  	}
   614  
   615  	distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
   616  	if err != nil {
   617  		return nil, err
   618  	}
   619  
   620  	eventsService := events.New()
   621  
   622  	referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json"))
   623  	if err != nil {
   624  		return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err)
   625  	}
   626  
   627  	migrationStart := time.Now()
   628  	if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil {
   629  		logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
   630  	}
   631  	logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
   632  
   633  	// Discovery is only enabled when the daemon is launched with an address to advertise.  When
   634  	// initialized, the daemon is registered and we can store the discovery backend as its read-only
   635  	if err := d.initDiscovery(config); err != nil {
   636  		return nil, err
   637  	}
   638  
   639  	sysInfo := sysinfo.New(false)
   640  	// Check if Devices cgroup is mounted, it is hard requirement for container security,
   641  	// on Linux.
   642  	if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled {
   643  		return nil, fmt.Errorf("Devices cgroup isn't mounted")
   644  	}
   645  
   646  	d.ID = trustKey.PublicKey().KeyID()
   647  	d.repository = daemonRepo
   648  	d.containers = container.NewMemoryStore()
   649  	d.execCommands = exec.NewStore()
   650  	d.referenceStore = referenceStore
   651  	d.distributionMetadataStore = distributionMetadataStore
   652  	d.trustKey = trustKey
   653  	d.idIndex = truncindex.NewTruncIndex([]string{})
   654  	d.statsCollector = d.newStatsCollector(1 * time.Second)
   655  	d.defaultLogConfig = containertypes.LogConfig{
   656  		Type:   config.LogConfig.Type,
   657  		Config: config.LogConfig.Config,
   658  	}
   659  	d.RegistryService = registryService
   660  	d.EventsService = eventsService
   661  	d.volumes = volStore
   662  	d.root = config.Root
   663  	d.uidMaps = uidMaps
   664  	d.gidMaps = gidMaps
   665  	d.seccompEnabled = sysInfo.Seccomp
   666  
   667  	d.nameIndex = registrar.NewRegistrar()
   668  	d.linkIndex = newLinkIndex()
   669  	d.containerdRemote = containerdRemote
   670  
   671  	go d.execCommandGC()
   672  
   673  	d.containerd, err = containerdRemote.Client(d)
   674  	if err != nil {
   675  		return nil, err
   676  	}
   677  
   678  	// Plugin system initialization should happen before restore. Do not change order.
   679  	if err := d.pluginInit(config, containerdRemote); err != nil {
   680  		return nil, err
   681  	}
   682  
   683  	if err := d.restore(); err != nil {
   684  		return nil, err
   685  	}
   686  
   687  	return d, nil
   688  }
   689  
   690  func (daemon *Daemon) shutdownContainer(c *container.Container) error {
   691  	stopTimeout := c.StopTimeout()
   692  
   693  	// TODO(windows): Handle docker restart with paused containers
   694  	if c.IsPaused() {
   695  		// To terminate a process in freezer cgroup, we should send
   696  		// SIGTERM to this process then unfreeze it, and the process will
   697  		// force to terminate immediately.
   698  		logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID)
   699  		sig, ok := signal.SignalMap["TERM"]
   700  		if !ok {
   701  			return fmt.Errorf("System does not support SIGTERM")
   702  		}
   703  		if err := daemon.kill(c, int(sig)); err != nil {
   704  			return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
   705  		}
   706  		if err := daemon.containerUnpause(c); err != nil {
   707  			return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
   708  		}
   709  		if _, err := c.WaitStop(time.Duration(stopTimeout) * time.Second); err != nil {
   710  			logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout)
   711  			sig, ok := signal.SignalMap["KILL"]
   712  			if !ok {
   713  				return fmt.Errorf("System does not support SIGKILL")
   714  			}
   715  			if err := daemon.kill(c, int(sig)); err != nil {
   716  				logrus.Errorf("Failed to SIGKILL container %s", c.ID)
   717  			}
   718  			c.WaitStop(-1 * time.Second)
   719  			return err
   720  		}
   721  	}
   722  	// If container failed to exit in stopTimeout seconds of SIGTERM, then using the force
   723  	if err := daemon.containerStop(c, stopTimeout); err != nil {
   724  		return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
   725  	}
   726  
   727  	c.WaitStop(-1 * time.Second)
   728  	return nil
   729  }
   730  
   731  // ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers,
   732  // and is limited by daemon's ShutdownTimeout.
   733  func (daemon *Daemon) ShutdownTimeout() int {
   734  	// By default we use daemon's ShutdownTimeout.
   735  	shutdownTimeout := daemon.configStore.ShutdownTimeout
   736  
   737  	graceTimeout := 5
   738  	if daemon.containers != nil {
   739  		for _, c := range daemon.containers.List() {
   740  			if shutdownTimeout >= 0 {
   741  				stopTimeout := c.StopTimeout()
   742  				if stopTimeout < 0 {
   743  					shutdownTimeout = -1
   744  				} else {
   745  					if stopTimeout+graceTimeout > shutdownTimeout {
   746  						shutdownTimeout = stopTimeout + graceTimeout
   747  					}
   748  				}
   749  			}
   750  		}
   751  	}
   752  	return shutdownTimeout
   753  }
   754  
   755  // Shutdown stops the daemon.
   756  func (daemon *Daemon) Shutdown() error {
   757  	daemon.shutdown = true
   758  	// Keep mounts and networking running on daemon shutdown if
   759  	// we are to keep containers running and restore them.
   760  
   761  	if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
   762  		// check if there are any running containers, if none we should do some cleanup
   763  		if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
   764  			return nil
   765  		}
   766  	}
   767  
   768  	if daemon.containers != nil {
   769  		logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout)
   770  		daemon.containers.ApplyAll(func(c *container.Container) {
   771  			if !c.IsRunning() {
   772  				return
   773  			}
   774  			logrus.Debugf("stopping %s", c.ID)
   775  			if err := daemon.shutdownContainer(c); err != nil {
   776  				logrus.Errorf("Stop container error: %v", err)
   777  				return
   778  			}
   779  			if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
   780  				daemon.cleanupMountsByID(mountid)
   781  			}
   782  			logrus.Debugf("container stopped %s", c.ID)
   783  		})
   784  	}
   785  
   786  	// Shutdown plugins after containers. Dont change the order.
   787  	daemon.pluginShutdown()
   788  
   789  	// trigger libnetwork Stop only if it's initialized
   790  	if daemon.netController != nil {
   791  		daemon.netController.Stop()
   792  	}
   793  
   794  	if daemon.layerStore != nil {
   795  		if err := daemon.layerStore.Cleanup(); err != nil {
   796  			logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
   797  		}
   798  	}
   799  
   800  	if err := daemon.cleanupMounts(); err != nil {
   801  		return err
   802  	}
   803  
   804  	return nil
   805  }
   806  
   807  // Mount sets container.BaseFS
   808  // (is it not set coming in? why is it unset?)
   809  func (daemon *Daemon) Mount(container *container.Container) error {
   810  	dir, err := container.RWLayer.Mount(container.GetMountLabel())
   811  	if err != nil {
   812  		return err
   813  	}
   814  	logrus.Debugf("container mounted via layerStore: %v", dir)
   815  
   816  	if container.BaseFS != dir {
   817  		// The mount path reported by the graph driver should always be trusted on Windows, since the
   818  		// volume path for a given mounted layer may change over time.  This should only be an error
   819  		// on non-Windows operating systems.
   820  		if container.BaseFS != "" && runtime.GOOS != "windows" {
   821  			daemon.Unmount(container)
   822  			return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
   823  				daemon.GraphDriverName(), container.ID, container.BaseFS, dir)
   824  		}
   825  	}
   826  	container.BaseFS = dir // TODO: combine these fields
   827  	return nil
   828  }
   829  
   830  // Unmount unsets the container base filesystem
   831  func (daemon *Daemon) Unmount(container *container.Container) error {
   832  	if err := container.RWLayer.Unmount(); err != nil {
   833  		logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
   834  		return err
   835  	}
   836  	return nil
   837  }
   838  
   839  // V4Subnets returns the IPv4 subnets of networks that are managed by Docker.
   840  func (daemon *Daemon) V4Subnets() []net.IPNet {
   841  	var subnets []net.IPNet
   842  
   843  	managedNetworks := daemon.netController.Networks()
   844  
   845  	for _, managedNetwork := range managedNetworks {
   846  		v4Infos, _ := managedNetwork.Info().IpamInfo()
   847  		for _, v4Info := range v4Infos {
   848  			if v4Info.IPAMData.Pool != nil {
   849  				subnets = append(subnets, *v4Info.IPAMData.Pool)
   850  			}
   851  		}
   852  	}
   853  
   854  	return subnets
   855  }
   856  
   857  // V6Subnets returns the IPv6 subnets of networks that are managed by Docker.
   858  func (daemon *Daemon) V6Subnets() []net.IPNet {
   859  	var subnets []net.IPNet
   860  
   861  	managedNetworks := daemon.netController.Networks()
   862  
   863  	for _, managedNetwork := range managedNetworks {
   864  		_, v6Infos := managedNetwork.Info().IpamInfo()
   865  		for _, v6Info := range v6Infos {
   866  			if v6Info.IPAMData.Pool != nil {
   867  				subnets = append(subnets, *v6Info.IPAMData.Pool)
   868  			}
   869  		}
   870  	}
   871  
   872  	return subnets
   873  }
   874  
   875  func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) {
   876  	progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false)
   877  	operationCancelled := false
   878  
   879  	for prog := range progressChan {
   880  		if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled {
   881  			// don't log broken pipe errors as this is the normal case when a client aborts
   882  			if isBrokenPipe(err) {
   883  				logrus.Info("Pull session cancelled")
   884  			} else {
   885  				logrus.Errorf("error writing progress to client: %v", err)
   886  			}
   887  			cancelFunc()
   888  			operationCancelled = true
   889  			// Don't return, because we need to continue draining
   890  			// progressChan until it's closed to avoid a deadlock.
   891  		}
   892  	}
   893  }
   894  
   895  func isBrokenPipe(e error) bool {
   896  	if netErr, ok := e.(*net.OpError); ok {
   897  		e = netErr.Err
   898  		if sysErr, ok := netErr.Err.(*os.SyscallError); ok {
   899  			e = sysErr.Err
   900  		}
   901  	}
   902  	return e == syscall.EPIPE
   903  }
   904  
   905  // GraphDriverName returns the name of the graph driver used by the layer.Store
   906  func (daemon *Daemon) GraphDriverName() string {
   907  	return daemon.layerStore.DriverName()
   908  }
   909  
   910  // GetUIDGIDMaps returns the current daemon's user namespace settings
   911  // for the full uid and gid maps which will be applied to containers
   912  // started in this instance.
   913  func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) {
   914  	return daemon.uidMaps, daemon.gidMaps
   915  }
   916  
   917  // GetRemappedUIDGID returns the current daemon's uid and gid values
   918  // if user namespaces are in use for this daemon instance.  If not
   919  // this function will return "real" root values of 0, 0.
   920  func (daemon *Daemon) GetRemappedUIDGID() (int, int) {
   921  	uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps)
   922  	return uid, gid
   923  }
   924  
   925  // tempDir returns the default directory to use for temporary files.
   926  func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
   927  	var tmpDir string
   928  	if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
   929  		tmpDir = filepath.Join(rootDir, "tmp")
   930  	}
   931  	return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID)
   932  }
   933  
   934  func (daemon *Daemon) setupInitLayer(initPath string) error {
   935  	rootUID, rootGID := daemon.GetRemappedUIDGID()
   936  	return setupInitLayer(initPath, rootUID, rootGID)
   937  }
   938  
   939  func setDefaultMtu(config *Config) {
   940  	// do nothing if the config does not have the default 0 value.
   941  	if config.Mtu != 0 {
   942  		return
   943  	}
   944  	config.Mtu = defaultNetworkMtu
   945  }
   946  
   947  func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) {
   948  	volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID)
   949  	if err != nil {
   950  		return nil, err
   951  	}
   952  
   953  	volumedrivers.RegisterPluginGetter(daemon.PluginStore)
   954  
   955  	if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) {
   956  		return nil, fmt.Errorf("local volume driver could not be registered")
   957  	}
   958  	return store.New(daemon.configStore.Root)
   959  }
   960  
   961  // IsShuttingDown tells whether the daemon is shutting down or not
   962  func (daemon *Daemon) IsShuttingDown() bool {
   963  	return daemon.shutdown
   964  }
   965  
   966  // initDiscovery initializes the discovery watcher for this daemon.
   967  func (daemon *Daemon) initDiscovery(config *Config) error {
   968  	advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise)
   969  	if err != nil {
   970  		if err == errDiscoveryDisabled {
   971  			return nil
   972  		}
   973  		return err
   974  	}
   975  
   976  	config.ClusterAdvertise = advertise
   977  	discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts)
   978  	if err != nil {
   979  		return fmt.Errorf("discovery initialization failed (%v)", err)
   980  	}
   981  
   982  	daemon.discoveryWatcher = discoveryWatcher
   983  	return nil
   984  }
   985  
   986  // Reload reads configuration changes and modifies the
   987  // daemon according to those changes.
   988  // These are the settings that Reload changes:
   989  // - Daemon labels.
   990  // - Daemon debug log level.
   991  // - Daemon insecure registries.
   992  // - Daemon max concurrent downloads
   993  // - Daemon max concurrent uploads
   994  // - Cluster discovery (reconfigure and restart).
   995  // - Daemon live restore
   996  // - Daemon shutdown timeout (in seconds).
   997  func (daemon *Daemon) Reload(config *Config) (err error) {
   998  
   999  	daemon.configStore.reloadLock.Lock()
  1000  
  1001  	attributes := daemon.platformReload(config)
  1002  
  1003  	defer func() {
  1004  		// we're unlocking here, because
  1005  		// LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes()
  1006  		// holds that lock too.
  1007  		daemon.configStore.reloadLock.Unlock()
  1008  		if err == nil {
  1009  			daemon.LogDaemonEventWithAttributes("reload", attributes)
  1010  		}
  1011  	}()
  1012  
  1013  	if err := daemon.reloadClusterDiscovery(config); err != nil {
  1014  		return err
  1015  	}
  1016  
  1017  	if config.IsValueSet("labels") {
  1018  		daemon.configStore.Labels = config.Labels
  1019  	}
  1020  	if config.IsValueSet("debug") {
  1021  		daemon.configStore.Debug = config.Debug
  1022  	}
  1023  	if config.IsValueSet("insecure-registries") {
  1024  		daemon.configStore.InsecureRegistries = config.InsecureRegistries
  1025  		if err := daemon.RegistryService.LoadInsecureRegistries(config.InsecureRegistries); err != nil {
  1026  			return err
  1027  		}
  1028  	}
  1029  	if config.IsValueSet("live-restore") {
  1030  		daemon.configStore.LiveRestoreEnabled = config.LiveRestoreEnabled
  1031  		if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestoreEnabled)); err != nil {
  1032  			return err
  1033  		}
  1034  	}
  1035  
  1036  	// If no value is set for max-concurrent-downloads we assume it is the default value
  1037  	// We always "reset" as the cost is lightweight and easy to maintain.
  1038  	if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil {
  1039  		*daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads
  1040  	} else {
  1041  		maxConcurrentDownloads := defaultMaxConcurrentDownloads
  1042  		daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads
  1043  	}
  1044  	logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads)
  1045  	if daemon.downloadManager != nil {
  1046  		daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads)
  1047  	}
  1048  
  1049  	// If no value is set for max-concurrent-upload we assume it is the default value
  1050  	// We always "reset" as the cost is lightweight and easy to maintain.
  1051  	if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil {
  1052  		*daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads
  1053  	} else {
  1054  		maxConcurrentUploads := defaultMaxConcurrentUploads
  1055  		daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads
  1056  	}
  1057  	logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads)
  1058  	if daemon.uploadManager != nil {
  1059  		daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads)
  1060  	}
  1061  
  1062  	if config.IsValueSet("shutdown-timeout") {
  1063  		daemon.configStore.ShutdownTimeout = config.ShutdownTimeout
  1064  		logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout)
  1065  	}
  1066  
  1067  	// We emit daemon reload event here with updatable configurations
  1068  	attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug)
  1069  	attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled)
  1070  
  1071  	if daemon.configStore.InsecureRegistries != nil {
  1072  		insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries)
  1073  		if err != nil {
  1074  			return err
  1075  		}
  1076  		attributes["insecure-registries"] = string(insecureRegistries)
  1077  	} else {
  1078  		attributes["insecure-registries"] = "[]"
  1079  	}
  1080  
  1081  	attributes["cluster-store"] = daemon.configStore.ClusterStore
  1082  	if daemon.configStore.ClusterOpts != nil {
  1083  		opts, err := json.Marshal(daemon.configStore.ClusterOpts)
  1084  		if err != nil {
  1085  			return err
  1086  		}
  1087  		attributes["cluster-store-opts"] = string(opts)
  1088  	} else {
  1089  		attributes["cluster-store-opts"] = "{}"
  1090  	}
  1091  	attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise
  1092  
  1093  	if daemon.configStore.Labels != nil {
  1094  		labels, err := json.Marshal(daemon.configStore.Labels)
  1095  		if err != nil {
  1096  			return err
  1097  		}
  1098  		attributes["labels"] = string(labels)
  1099  	} else {
  1100  		attributes["labels"] = "[]"
  1101  	}
  1102  
  1103  	attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads)
  1104  	attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads)
  1105  	attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout)
  1106  
  1107  	return nil
  1108  }
  1109  
  1110  func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
  1111  	var err error
  1112  	newAdvertise := daemon.configStore.ClusterAdvertise
  1113  	newClusterStore := daemon.configStore.ClusterStore
  1114  	if config.IsValueSet("cluster-advertise") {
  1115  		if config.IsValueSet("cluster-store") {
  1116  			newClusterStore = config.ClusterStore
  1117  		}
  1118  		newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise)
  1119  		if err != nil && err != errDiscoveryDisabled {
  1120  			return err
  1121  		}
  1122  	}
  1123  
  1124  	if daemon.clusterProvider != nil {
  1125  		if err := config.isSwarmCompatible(); err != nil {
  1126  			return err
  1127  		}
  1128  	}
  1129  
  1130  	// check discovery modifications
  1131  	if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) {
  1132  		return nil
  1133  	}
  1134  
  1135  	// enable discovery for the first time if it was not previously enabled
  1136  	if daemon.discoveryWatcher == nil {
  1137  		discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts)
  1138  		if err != nil {
  1139  			return fmt.Errorf("discovery initialization failed (%v)", err)
  1140  		}
  1141  		daemon.discoveryWatcher = discoveryWatcher
  1142  	} else {
  1143  		if err == errDiscoveryDisabled {
  1144  			// disable discovery if it was previously enabled and it's disabled now
  1145  			daemon.discoveryWatcher.Stop()
  1146  		} else {
  1147  			// reload discovery
  1148  			if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil {
  1149  				return err
  1150  			}
  1151  		}
  1152  	}
  1153  
  1154  	daemon.configStore.ClusterStore = newClusterStore
  1155  	daemon.configStore.ClusterOpts = config.ClusterOpts
  1156  	daemon.configStore.ClusterAdvertise = newAdvertise
  1157  
  1158  	if daemon.netController == nil {
  1159  		return nil
  1160  	}
  1161  	netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil)
  1162  	if err != nil {
  1163  		logrus.WithError(err).Warnf("failed to get options with network controller")
  1164  		return nil
  1165  	}
  1166  	err = daemon.netController.ReloadConfiguration(netOptions...)
  1167  	if err != nil {
  1168  		logrus.Warnf("Failed to reload configuration with network controller: %v", err)
  1169  	}
  1170  
  1171  	return nil
  1172  }
  1173  
  1174  func isBridgeNetworkDisabled(config *Config) bool {
  1175  	return config.bridgeConfig.Iface == disableNetworkBridge
  1176  }
  1177  
  1178  func (daemon *Daemon) networkOptions(dconfig *Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
  1179  	options := []nwconfig.Option{}
  1180  	if dconfig == nil {
  1181  		return options, nil
  1182  	}
  1183  
  1184  	options = append(options, nwconfig.OptionDataDir(dconfig.Root))
  1185  	options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
  1186  
  1187  	dd := runconfig.DefaultDaemonNetworkMode()
  1188  	dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
  1189  	options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
  1190  	options = append(options, nwconfig.OptionDefaultNetwork(dn))
  1191  
  1192  	if strings.TrimSpace(dconfig.ClusterStore) != "" {
  1193  		kv := strings.Split(dconfig.ClusterStore, "://")
  1194  		if len(kv) != 2 {
  1195  			return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
  1196  		}
  1197  		options = append(options, nwconfig.OptionKVProvider(kv[0]))
  1198  		options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
  1199  	}
  1200  	if len(dconfig.ClusterOpts) > 0 {
  1201  		options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
  1202  	}
  1203  
  1204  	if daemon.discoveryWatcher != nil {
  1205  		options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
  1206  	}
  1207  
  1208  	if dconfig.ClusterAdvertise != "" {
  1209  		options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
  1210  	}
  1211  
  1212  	options = append(options, nwconfig.OptionLabels(dconfig.Labels))
  1213  	options = append(options, driverOptions(dconfig)...)
  1214  
  1215  	if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 {
  1216  		options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
  1217  	}
  1218  
  1219  	if pg != nil {
  1220  		options = append(options, nwconfig.OptionPluginGetter(pg))
  1221  	}
  1222  
  1223  	return options, nil
  1224  }
  1225  
  1226  func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry {
  1227  	out := make([]types.BlkioStatEntry, len(entries))
  1228  	for i, re := range entries {
  1229  		out[i] = types.BlkioStatEntry{
  1230  			Major: re.Major,
  1231  			Minor: re.Minor,
  1232  			Op:    re.Op,
  1233  			Value: re.Value,
  1234  		}
  1235  	}
  1236  	return out
  1237  }
  1238  
  1239  // GetCluster returns the cluster
  1240  func (daemon *Daemon) GetCluster() Cluster {
  1241  	return daemon.cluster
  1242  }
  1243  
  1244  // SetCluster sets the cluster
  1245  func (daemon *Daemon) SetCluster(cluster Cluster) {
  1246  	daemon.cluster = cluster
  1247  }