github.com/brahmaroutu/docker@v1.2.1-0.20160809185609-eb28dde01f16/daemon/daemon.go (about)

     1  // Package daemon exposes the functions that occur on the host server
     2  // that the Docker daemon is running.
     3  //
     4  // In implementing the various functions of the daemon, there is often
     5  // a method-specific struct for configuring the runtime behavior.
     6  package daemon
     7  
     8  import (
     9  	"encoding/json"
    10  	"fmt"
    11  	"io"
    12  	"io/ioutil"
    13  	"net"
    14  	"os"
    15  	"path"
    16  	"path/filepath"
    17  	"runtime"
    18  	"strings"
    19  	"sync"
    20  	"syscall"
    21  	"time"
    22  
    23  	"github.com/Sirupsen/logrus"
    24  	containerd "github.com/docker/containerd/api/grpc/types"
    25  	"github.com/docker/docker/api"
    26  	"github.com/docker/docker/container"
    27  	"github.com/docker/docker/daemon/events"
    28  	"github.com/docker/docker/daemon/exec"
    29  	"github.com/docker/engine-api/types"
    30  	containertypes "github.com/docker/engine-api/types/container"
    31  	"github.com/docker/libnetwork/cluster"
    32  	// register graph drivers
    33  	_ "github.com/docker/docker/daemon/graphdriver/register"
    34  	dmetadata "github.com/docker/docker/distribution/metadata"
    35  	"github.com/docker/docker/distribution/xfer"
    36  	"github.com/docker/docker/image"
    37  	"github.com/docker/docker/layer"
    38  	"github.com/docker/docker/libcontainerd"
    39  	"github.com/docker/docker/migrate/v1"
    40  	"github.com/docker/docker/pkg/fileutils"
    41  	"github.com/docker/docker/pkg/graphdb"
    42  	"github.com/docker/docker/pkg/idtools"
    43  	"github.com/docker/docker/pkg/progress"
    44  	"github.com/docker/docker/pkg/registrar"
    45  	"github.com/docker/docker/pkg/signal"
    46  	"github.com/docker/docker/pkg/streamformatter"
    47  	"github.com/docker/docker/pkg/sysinfo"
    48  	"github.com/docker/docker/pkg/system"
    49  	"github.com/docker/docker/pkg/truncindex"
    50  	"github.com/docker/docker/reference"
    51  	"github.com/docker/docker/registry"
    52  	"github.com/docker/docker/runconfig"
    53  	"github.com/docker/docker/utils"
    54  	volumedrivers "github.com/docker/docker/volume/drivers"
    55  	"github.com/docker/docker/volume/local"
    56  	"github.com/docker/docker/volume/store"
    57  	"github.com/docker/libnetwork"
    58  	nwconfig "github.com/docker/libnetwork/config"
    59  	"github.com/docker/libtrust"
    60  )
    61  
    62  var (
    63  	// DefaultRuntimeBinary is the default runtime to be used by
    64  	// containerd if none is specified
    65  	DefaultRuntimeBinary = "docker-runc"
    66  
    67  	errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.")
    68  )
    69  
    70  // Daemon holds information about the Docker daemon.
    71  type Daemon struct {
    72  	ID                        string
    73  	repository                string
    74  	containers                container.Store
    75  	execCommands              *exec.Store
    76  	referenceStore            reference.Store
    77  	downloadManager           *xfer.LayerDownloadManager
    78  	uploadManager             *xfer.LayerUploadManager
    79  	distributionMetadataStore dmetadata.Store
    80  	trustKey                  libtrust.PrivateKey
    81  	idIndex                   *truncindex.TruncIndex
    82  	configStore               *Config
    83  	statsCollector            *statsCollector
    84  	defaultLogConfig          containertypes.LogConfig
    85  	RegistryService           registry.Service
    86  	EventsService             *events.Events
    87  	netController             libnetwork.NetworkController
    88  	volumes                   *store.VolumeStore
    89  	discoveryWatcher          discoveryReloader
    90  	root                      string
    91  	seccompEnabled            bool
    92  	shutdown                  bool
    93  	uidMaps                   []idtools.IDMap
    94  	gidMaps                   []idtools.IDMap
    95  	layerStore                layer.Store
    96  	imageStore                image.Store
    97  	nameIndex                 *registrar.Registrar
    98  	linkIndex                 *linkIndex
    99  	containerd                libcontainerd.Client
   100  	containerdRemote          libcontainerd.Remote
   101  	defaultIsolation          containertypes.Isolation // Default isolation mode on Windows
   102  	clusterProvider           cluster.Provider
   103  }
   104  
   105  func (daemon *Daemon) restore() error {
   106  	var (
   107  		debug         = utils.IsDebugEnabled()
   108  		currentDriver = daemon.GraphDriverName()
   109  		containers    = make(map[string]*container.Container)
   110  	)
   111  
   112  	if !debug {
   113  		logrus.Info("Loading containers: start.")
   114  	}
   115  	dir, err := ioutil.ReadDir(daemon.repository)
   116  	if err != nil {
   117  		return err
   118  	}
   119  
   120  	containerCount := 0
   121  	for _, v := range dir {
   122  		id := v.Name()
   123  		container, err := daemon.load(id)
   124  		if !debug && logrus.GetLevel() == logrus.InfoLevel {
   125  			fmt.Print(".")
   126  			containerCount++
   127  		}
   128  		if err != nil {
   129  			logrus.Errorf("Failed to load container %v: %v", id, err)
   130  			continue
   131  		}
   132  
   133  		// Ignore the container if it does not support the current driver being used by the graph
   134  		if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
   135  			rwlayer, err := daemon.layerStore.GetRWLayer(container.ID)
   136  			if err != nil {
   137  				logrus.Errorf("Failed to load container mount %v: %v", id, err)
   138  				continue
   139  			}
   140  			container.RWLayer = rwlayer
   141  			logrus.Debugf("Loaded container %v", container.ID)
   142  
   143  			containers[container.ID] = container
   144  		} else {
   145  			logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
   146  		}
   147  	}
   148  
   149  	var migrateLegacyLinks bool
   150  	removeContainers := make(map[string]*container.Container)
   151  	restartContainers := make(map[*container.Container]chan struct{})
   152  	activeSandboxes := make(map[string]interface{})
   153  	for _, c := range containers {
   154  		if err := daemon.registerName(c); err != nil {
   155  			logrus.Errorf("Failed to register container %s: %s", c.ID, err)
   156  			continue
   157  		}
   158  		if err := daemon.Register(c); err != nil {
   159  			logrus.Errorf("Failed to register container %s: %s", c.ID, err)
   160  			continue
   161  		}
   162  
   163  		// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
   164  		// We should rewrite it to use the daemon defaults.
   165  		// Fixes https://github.com/docker/docker/issues/22536
   166  		if c.HostConfig.LogConfig.Type == "" {
   167  			if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
   168  				logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
   169  				continue
   170  			}
   171  		}
   172  	}
   173  	var wg sync.WaitGroup
   174  	var mapLock sync.Mutex
   175  	for _, c := range containers {
   176  		wg.Add(1)
   177  		go func(c *container.Container) {
   178  			defer wg.Done()
   179  			rm := c.RestartManager(false)
   180  			if c.IsRunning() || c.IsPaused() {
   181  				if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(rm)); err != nil {
   182  					logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err)
   183  					return
   184  				}
   185  				if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
   186  					options, err := daemon.buildSandboxOptions(c)
   187  					if err != nil {
   188  						logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
   189  					}
   190  					mapLock.Lock()
   191  					activeSandboxes[c.NetworkSettings.SandboxID] = options
   192  					mapLock.Unlock()
   193  				}
   194  
   195  			}
   196  			// fixme: only if not running
   197  			// get list of containers we need to restart
   198  			if !c.IsRunning() && !c.IsPaused() {
   199  				if daemon.configStore.AutoRestart && c.ShouldRestart() {
   200  					mapLock.Lock()
   201  					restartContainers[c] = make(chan struct{})
   202  					mapLock.Unlock()
   203  				} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
   204  					removeContainers[c.ID] = c
   205  				}
   206  			}
   207  
   208  			if c.RemovalInProgress {
   209  				// We probably crashed in the middle of a removal, reset
   210  				// the flag.
   211  				//
   212  				// We DO NOT remove the container here as we do not
   213  				// know if the user had requested for either the
   214  				// associated volumes, network links or both to also
   215  				// be removed. So we put the container in the "dead"
   216  				// state and leave further processing up to them.
   217  				logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
   218  				c.ResetRemovalInProgress()
   219  				c.SetDead()
   220  				c.ToDisk()
   221  			}
   222  
   223  			// if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated
   224  			if c.HostConfig != nil && c.HostConfig.Links == nil {
   225  				migrateLegacyLinks = true
   226  			}
   227  		}(c)
   228  	}
   229  	wg.Wait()
   230  	daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
   231  	if err != nil {
   232  		return fmt.Errorf("Error initializing network controller: %v", err)
   233  	}
   234  
   235  	// migrate any legacy links from sqlite
   236  	linkdbFile := filepath.Join(daemon.root, "linkgraph.db")
   237  	var legacyLinkDB *graphdb.Database
   238  	if migrateLegacyLinks {
   239  		legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile)
   240  		if err != nil {
   241  			return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err)
   242  		}
   243  		defer legacyLinkDB.Close()
   244  	}
   245  
   246  	// Now that all the containers are registered, register the links
   247  	for _, c := range containers {
   248  		if migrateLegacyLinks {
   249  			if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil {
   250  				return err
   251  			}
   252  		}
   253  		if err := daemon.registerLinks(c, c.HostConfig); err != nil {
   254  			logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
   255  		}
   256  	}
   257  
   258  	group := sync.WaitGroup{}
   259  	for c, notifier := range restartContainers {
   260  		group.Add(1)
   261  
   262  		go func(c *container.Container, chNotify chan struct{}) {
   263  			defer group.Done()
   264  
   265  			logrus.Debugf("Starting container %s", c.ID)
   266  
   267  			// ignore errors here as this is a best effort to wait for children to be
   268  			//   running before we try to start the container
   269  			children := daemon.children(c)
   270  			timeout := time.After(5 * time.Second)
   271  			for _, child := range children {
   272  				if notifier, exists := restartContainers[child]; exists {
   273  					select {
   274  					case <-notifier:
   275  					case <-timeout:
   276  					}
   277  				}
   278  			}
   279  
   280  			// Make sure networks are available before starting
   281  			daemon.waitForNetworks(c)
   282  			if err := daemon.containerStart(c); err != nil {
   283  				logrus.Errorf("Failed to start container %s: %s", c.ID, err)
   284  			}
   285  			close(chNotify)
   286  		}(c, notifier)
   287  
   288  	}
   289  	group.Wait()
   290  
   291  	removeGroup := sync.WaitGroup{}
   292  	for id := range removeContainers {
   293  		removeGroup.Add(1)
   294  		go func(cid string) {
   295  			if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
   296  				logrus.Errorf("Failed to remove container %s: %s", cid, err)
   297  			}
   298  			removeGroup.Done()
   299  		}(id)
   300  	}
   301  	removeGroup.Wait()
   302  
   303  	// any containers that were started above would already have had this done,
   304  	// however we need to now prepare the mountpoints for the rest of the containers as well.
   305  	// This shouldn't cause any issue running on the containers that already had this run.
   306  	// This must be run after any containers with a restart policy so that containerized plugins
   307  	// can have a chance to be running before we try to initialize them.
   308  	for _, c := range containers {
   309  		// if the container has restart policy, do not
   310  		// prepare the mountpoints since it has been done on restarting.
   311  		// This is to speed up the daemon start when a restart container
   312  		// has a volume and the volume dirver is not available.
   313  		if _, ok := restartContainers[c]; ok {
   314  			continue
   315  		} else if _, ok := removeContainers[c.ID]; ok {
   316  			// container is automatically removed, skip it.
   317  			continue
   318  		}
   319  
   320  		group.Add(1)
   321  		go func(c *container.Container) {
   322  			defer group.Done()
   323  			if err := daemon.prepareMountPoints(c); err != nil {
   324  				logrus.Error(err)
   325  			}
   326  		}(c)
   327  	}
   328  
   329  	group.Wait()
   330  
   331  	if !debug {
   332  		if logrus.GetLevel() == logrus.InfoLevel && containerCount > 0 {
   333  			fmt.Println()
   334  		}
   335  		logrus.Info("Loading containers: done.")
   336  	}
   337  
   338  	return nil
   339  }
   340  
   341  // waitForNetworks is used during daemon initialization when starting up containers
   342  // It ensures that all of a container's networks are available before the daemon tries to start the container.
   343  // In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
   344  func (daemon *Daemon) waitForNetworks(c *container.Container) {
   345  	if daemon.discoveryWatcher == nil {
   346  		return
   347  	}
   348  	// Make sure if the container has a network that requires discovery that the discovery service is available before starting
   349  	for netName := range c.NetworkSettings.Networks {
   350  		// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
   351  		// Most likely this is because the K/V store used for discovery is in a container and needs to be started
   352  		if _, err := daemon.netController.NetworkByName(netName); err != nil {
   353  			if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
   354  				continue
   355  			}
   356  			// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
   357  			// FIXME: why is this slow???
   358  			logrus.Debugf("Container %s waiting for network to be ready", c.Name)
   359  			select {
   360  			case <-daemon.discoveryWatcher.ReadyCh():
   361  			case <-time.After(60 * time.Second):
   362  			}
   363  			return
   364  		}
   365  	}
   366  }
   367  
   368  func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
   369  	return daemon.linkIndex.children(c)
   370  }
   371  
   372  // parents returns the names of the parent containers of the container
   373  // with the given name.
   374  func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
   375  	return daemon.linkIndex.parents(c)
   376  }
   377  
   378  func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
   379  	fullName := path.Join(parent.Name, alias)
   380  	if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil {
   381  		if err == registrar.ErrNameReserved {
   382  			logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
   383  			return nil
   384  		}
   385  		return err
   386  	}
   387  	daemon.linkIndex.link(parent, child, fullName)
   388  	return nil
   389  }
   390  
   391  // SetClusterProvider sets a component for querying the current cluster state.
   392  func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) {
   393  	daemon.clusterProvider = clusterProvider
   394  	daemon.netController.SetClusterProvider(clusterProvider)
   395  }
   396  
   397  // IsSwarmCompatible verifies if the current daemon
   398  // configuration is compatible with the swarm mode
   399  func (daemon *Daemon) IsSwarmCompatible() error {
   400  	if daemon.configStore == nil {
   401  		return nil
   402  	}
   403  	return daemon.configStore.isSwarmCompatible()
   404  }
   405  
   406  // NewDaemon sets up everything for the daemon to be able to service
   407  // requests from the webserver.
   408  func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) {
   409  	setDefaultMtu(config)
   410  
   411  	// Ensure that we have a correct root key limit for launching containers.
   412  	if err := ModifyRootKeyLimit(); err != nil {
   413  		logrus.Warnf("unable to modify root key limit, number of containers could be limitied by this quota: %v", err)
   414  	}
   415  
   416  	// Ensure we have compatible and valid configuration options
   417  	if err := verifyDaemonSettings(config); err != nil {
   418  		return nil, err
   419  	}
   420  
   421  	// Do we have a disabled network?
   422  	config.DisableBridge = isBridgeNetworkDisabled(config)
   423  
   424  	// Verify the platform is supported as a daemon
   425  	if !platformSupported {
   426  		return nil, errSystemNotSupported
   427  	}
   428  
   429  	// Validate platform-specific requirements
   430  	if err := checkSystem(); err != nil {
   431  		return nil, err
   432  	}
   433  
   434  	// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
   435  	// on Windows to dump Go routine stacks
   436  	setupDumpStackTrap(config.Root)
   437  
   438  	uidMaps, gidMaps, err := setupRemappedRoot(config)
   439  	if err != nil {
   440  		return nil, err
   441  	}
   442  	rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
   443  	if err != nil {
   444  		return nil, err
   445  	}
   446  
   447  	// get the canonical path to the Docker root directory
   448  	var realRoot string
   449  	if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
   450  		realRoot = config.Root
   451  	} else {
   452  		realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
   453  		if err != nil {
   454  			return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
   455  		}
   456  	}
   457  
   458  	if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil {
   459  		return nil, err
   460  	}
   461  
   462  	if err := setupDaemonProcess(config); err != nil {
   463  		return nil, err
   464  	}
   465  
   466  	// set up the tmpDir to use a canonical path
   467  	tmp, err := tempDir(config.Root, rootUID, rootGID)
   468  	if err != nil {
   469  		return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
   470  	}
   471  	realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
   472  	if err != nil {
   473  		return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
   474  	}
   475  	os.Setenv("TMPDIR", realTmp)
   476  
   477  	d := &Daemon{configStore: config}
   478  	// Ensure the daemon is properly shutdown if there is a failure during
   479  	// initialization
   480  	defer func() {
   481  		if err != nil {
   482  			if err := d.Shutdown(); err != nil {
   483  				logrus.Error(err)
   484  			}
   485  		}
   486  	}()
   487  
   488  	// Set the default isolation mode (only applicable on Windows)
   489  	if err := d.setDefaultIsolation(); err != nil {
   490  		return nil, fmt.Errorf("error setting default isolation mode: %v", err)
   491  	}
   492  
   493  	logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
   494  
   495  	if err := configureMaxThreads(config); err != nil {
   496  		logrus.Warnf("Failed to configure golang's threads limit: %v", err)
   497  	}
   498  
   499  	installDefaultAppArmorProfile()
   500  	daemonRepo := filepath.Join(config.Root, "containers")
   501  	if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
   502  		return nil, err
   503  	}
   504  
   505  	driverName := os.Getenv("DOCKER_DRIVER")
   506  	if driverName == "" {
   507  		driverName = config.GraphDriver
   508  	}
   509  	d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
   510  		StorePath:                 config.Root,
   511  		MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
   512  		GraphDriver:               driverName,
   513  		GraphDriverOptions:        config.GraphOptions,
   514  		UIDMaps:                   uidMaps,
   515  		GIDMaps:                   gidMaps,
   516  	})
   517  	if err != nil {
   518  		return nil, err
   519  	}
   520  
   521  	graphDriver := d.layerStore.DriverName()
   522  	imageRoot := filepath.Join(config.Root, "image", graphDriver)
   523  
   524  	// Configure and validate the kernels security support
   525  	if err := configureKernelSecuritySupport(config, graphDriver); err != nil {
   526  		return nil, err
   527  	}
   528  
   529  	logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
   530  	d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads)
   531  	logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
   532  	d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads)
   533  
   534  	ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
   535  	if err != nil {
   536  		return nil, err
   537  	}
   538  
   539  	d.imageStore, err = image.NewImageStore(ifs, d.layerStore)
   540  	if err != nil {
   541  		return nil, err
   542  	}
   543  
   544  	// Configure the volumes driver
   545  	volStore, err := d.configureVolumes(rootUID, rootGID)
   546  	if err != nil {
   547  		return nil, err
   548  	}
   549  
   550  	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
   551  	if err != nil {
   552  		return nil, err
   553  	}
   554  
   555  	trustDir := filepath.Join(config.Root, "trust")
   556  
   557  	if err := system.MkdirAll(trustDir, 0700); err != nil {
   558  		return nil, err
   559  	}
   560  
   561  	distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
   562  	if err != nil {
   563  		return nil, err
   564  	}
   565  
   566  	eventsService := events.New()
   567  
   568  	referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json"))
   569  	if err != nil {
   570  		return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err)
   571  	}
   572  
   573  	migrationStart := time.Now()
   574  	if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil {
   575  		logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
   576  	}
   577  	logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
   578  
   579  	// Discovery is only enabled when the daemon is launched with an address to advertise.  When
   580  	// initialized, the daemon is registered and we can store the discovery backend as its read-only
   581  	if err := d.initDiscovery(config); err != nil {
   582  		return nil, err
   583  	}
   584  
   585  	sysInfo := sysinfo.New(false)
   586  	// Check if Devices cgroup is mounted, it is hard requirement for container security,
   587  	// on Linux.
   588  	if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled {
   589  		return nil, fmt.Errorf("Devices cgroup isn't mounted")
   590  	}
   591  
   592  	d.ID = trustKey.PublicKey().KeyID()
   593  	d.repository = daemonRepo
   594  	d.containers = container.NewMemoryStore()
   595  	d.execCommands = exec.NewStore()
   596  	d.referenceStore = referenceStore
   597  	d.distributionMetadataStore = distributionMetadataStore
   598  	d.trustKey = trustKey
   599  	d.idIndex = truncindex.NewTruncIndex([]string{})
   600  	d.statsCollector = d.newStatsCollector(1 * time.Second)
   601  	d.defaultLogConfig = containertypes.LogConfig{
   602  		Type:   config.LogConfig.Type,
   603  		Config: config.LogConfig.Config,
   604  	}
   605  	d.RegistryService = registryService
   606  	d.EventsService = eventsService
   607  	d.volumes = volStore
   608  	d.root = config.Root
   609  	d.uidMaps = uidMaps
   610  	d.gidMaps = gidMaps
   611  	d.seccompEnabled = sysInfo.Seccomp
   612  
   613  	d.nameIndex = registrar.NewRegistrar()
   614  	d.linkIndex = newLinkIndex()
   615  	d.containerdRemote = containerdRemote
   616  
   617  	go d.execCommandGC()
   618  
   619  	d.containerd, err = containerdRemote.Client(d)
   620  	if err != nil {
   621  		return nil, err
   622  	}
   623  
   624  	if err := d.restore(); err != nil {
   625  		return nil, err
   626  	}
   627  
   628  	if err := pluginInit(d, config, containerdRemote); err != nil {
   629  		return nil, err
   630  	}
   631  
   632  	return d, nil
   633  }
   634  
   635  func (daemon *Daemon) shutdownContainer(c *container.Container) error {
   636  	// TODO(windows): Handle docker restart with paused containers
   637  	if c.IsPaused() {
   638  		// To terminate a process in freezer cgroup, we should send
   639  		// SIGTERM to this process then unfreeze it, and the process will
   640  		// force to terminate immediately.
   641  		logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID)
   642  		sig, ok := signal.SignalMap["TERM"]
   643  		if !ok {
   644  			return fmt.Errorf("System does not support SIGTERM")
   645  		}
   646  		if err := daemon.kill(c, int(sig)); err != nil {
   647  			return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
   648  		}
   649  		if err := daemon.containerUnpause(c); err != nil {
   650  			return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
   651  		}
   652  		if _, err := c.WaitStop(10 * time.Second); err != nil {
   653  			logrus.Debugf("container %s failed to exit in 10 seconds of SIGTERM, sending SIGKILL to force", c.ID)
   654  			sig, ok := signal.SignalMap["KILL"]
   655  			if !ok {
   656  				return fmt.Errorf("System does not support SIGKILL")
   657  			}
   658  			if err := daemon.kill(c, int(sig)); err != nil {
   659  				logrus.Errorf("Failed to SIGKILL container %s", c.ID)
   660  			}
   661  			c.WaitStop(-1 * time.Second)
   662  			return err
   663  		}
   664  	}
   665  	// If container failed to exit in 10 seconds of SIGTERM, then using the force
   666  	if err := daemon.containerStop(c, 10); err != nil {
   667  		return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
   668  	}
   669  
   670  	c.WaitStop(-1 * time.Second)
   671  	return nil
   672  }
   673  
   674  // Shutdown stops the daemon.
   675  func (daemon *Daemon) Shutdown() error {
   676  	daemon.shutdown = true
   677  	// Keep mounts and networking running on daemon shutdown if
   678  	// we are to keep containers running and restore them.
   679  
   680  	pluginShutdown()
   681  
   682  	if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
   683  		// check if there are any running containers, if none we should do some cleanup
   684  		if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
   685  			return nil
   686  		}
   687  	}
   688  
   689  	if daemon.containers != nil {
   690  		logrus.Debug("starting clean shutdown of all containers...")
   691  		daemon.containers.ApplyAll(func(c *container.Container) {
   692  			if !c.IsRunning() {
   693  				return
   694  			}
   695  			logrus.Debugf("stopping %s", c.ID)
   696  			if err := daemon.shutdownContainer(c); err != nil {
   697  				logrus.Errorf("Stop container error: %v", err)
   698  				return
   699  			}
   700  			if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
   701  				daemon.cleanupMountsByID(mountid)
   702  			}
   703  			logrus.Debugf("container stopped %s", c.ID)
   704  		})
   705  	}
   706  
   707  	// trigger libnetwork Stop only if it's initialized
   708  	if daemon.netController != nil {
   709  		daemon.netController.Stop()
   710  	}
   711  
   712  	if daemon.layerStore != nil {
   713  		if err := daemon.layerStore.Cleanup(); err != nil {
   714  			logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
   715  		}
   716  	}
   717  
   718  	if err := daemon.cleanupMounts(); err != nil {
   719  		return err
   720  	}
   721  
   722  	return nil
   723  }
   724  
   725  // Mount sets container.BaseFS
   726  // (is it not set coming in? why is it unset?)
   727  func (daemon *Daemon) Mount(container *container.Container) error {
   728  	dir, err := container.RWLayer.Mount(container.GetMountLabel())
   729  	if err != nil {
   730  		return err
   731  	}
   732  	logrus.Debugf("container mounted via layerStore: %v", dir)
   733  
   734  	if container.BaseFS != dir {
   735  		// The mount path reported by the graph driver should always be trusted on Windows, since the
   736  		// volume path for a given mounted layer may change over time.  This should only be an error
   737  		// on non-Windows operating systems.
   738  		if container.BaseFS != "" && runtime.GOOS != "windows" {
   739  			daemon.Unmount(container)
   740  			return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
   741  				daemon.GraphDriverName(), container.ID, container.BaseFS, dir)
   742  		}
   743  	}
   744  	container.BaseFS = dir // TODO: combine these fields
   745  	return nil
   746  }
   747  
   748  // Unmount unsets the container base filesystem
   749  func (daemon *Daemon) Unmount(container *container.Container) error {
   750  	if err := container.RWLayer.Unmount(); err != nil {
   751  		logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
   752  		return err
   753  	}
   754  	return nil
   755  }
   756  
   757  // V4Subnets returns the IPv4 subnets of networks that are managed by Docker.
   758  func (daemon *Daemon) V4Subnets() []net.IPNet {
   759  	var subnets []net.IPNet
   760  
   761  	managedNetworks := daemon.netController.Networks()
   762  
   763  	for _, managedNetwork := range managedNetworks {
   764  		v4Infos, _ := managedNetwork.Info().IpamInfo()
   765  		for _, v4Info := range v4Infos {
   766  			if v4Info.IPAMData.Pool != nil {
   767  				subnets = append(subnets, *v4Info.IPAMData.Pool)
   768  			}
   769  		}
   770  	}
   771  
   772  	return subnets
   773  }
   774  
   775  // V6Subnets returns the IPv6 subnets of networks that are managed by Docker.
   776  func (daemon *Daemon) V6Subnets() []net.IPNet {
   777  	var subnets []net.IPNet
   778  
   779  	managedNetworks := daemon.netController.Networks()
   780  
   781  	for _, managedNetwork := range managedNetworks {
   782  		_, v6Infos := managedNetwork.Info().IpamInfo()
   783  		for _, v6Info := range v6Infos {
   784  			if v6Info.IPAMData.Pool != nil {
   785  				subnets = append(subnets, *v6Info.IPAMData.Pool)
   786  			}
   787  		}
   788  	}
   789  
   790  	return subnets
   791  }
   792  
   793  func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) {
   794  	progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false)
   795  	operationCancelled := false
   796  
   797  	for prog := range progressChan {
   798  		if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled {
   799  			// don't log broken pipe errors as this is the normal case when a client aborts
   800  			if isBrokenPipe(err) {
   801  				logrus.Info("Pull session cancelled")
   802  			} else {
   803  				logrus.Errorf("error writing progress to client: %v", err)
   804  			}
   805  			cancelFunc()
   806  			operationCancelled = true
   807  			// Don't return, because we need to continue draining
   808  			// progressChan until it's closed to avoid a deadlock.
   809  		}
   810  	}
   811  }
   812  
   813  func isBrokenPipe(e error) bool {
   814  	if netErr, ok := e.(*net.OpError); ok {
   815  		e = netErr.Err
   816  		if sysErr, ok := netErr.Err.(*os.SyscallError); ok {
   817  			e = sysErr.Err
   818  		}
   819  	}
   820  	return e == syscall.EPIPE
   821  }
   822  
   823  // GraphDriverName returns the name of the graph driver used by the layer.Store
   824  func (daemon *Daemon) GraphDriverName() string {
   825  	return daemon.layerStore.DriverName()
   826  }
   827  
   828  // GetUIDGIDMaps returns the current daemon's user namespace settings
   829  // for the full uid and gid maps which will be applied to containers
   830  // started in this instance.
   831  func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) {
   832  	return daemon.uidMaps, daemon.gidMaps
   833  }
   834  
   835  // GetRemappedUIDGID returns the current daemon's uid and gid values
   836  // if user namespaces are in use for this daemon instance.  If not
   837  // this function will return "real" root values of 0, 0.
   838  func (daemon *Daemon) GetRemappedUIDGID() (int, int) {
   839  	uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps)
   840  	return uid, gid
   841  }
   842  
   843  // tempDir returns the default directory to use for temporary files.
   844  func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
   845  	var tmpDir string
   846  	if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
   847  		tmpDir = filepath.Join(rootDir, "tmp")
   848  	}
   849  	return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID)
   850  }
   851  
   852  func (daemon *Daemon) setupInitLayer(initPath string) error {
   853  	rootUID, rootGID := daemon.GetRemappedUIDGID()
   854  	return setupInitLayer(initPath, rootUID, rootGID)
   855  }
   856  
   857  func setDefaultMtu(config *Config) {
   858  	// do nothing if the config does not have the default 0 value.
   859  	if config.Mtu != 0 {
   860  		return
   861  	}
   862  	config.Mtu = defaultNetworkMtu
   863  }
   864  
   865  func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) {
   866  	volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID)
   867  	if err != nil {
   868  		return nil, err
   869  	}
   870  
   871  	if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) {
   872  		return nil, fmt.Errorf("local volume driver could not be registered")
   873  	}
   874  	return store.New(daemon.configStore.Root)
   875  }
   876  
   877  // IsShuttingDown tells whether the daemon is shutting down or not
   878  func (daemon *Daemon) IsShuttingDown() bool {
   879  	return daemon.shutdown
   880  }
   881  
   882  // initDiscovery initializes the discovery watcher for this daemon.
   883  func (daemon *Daemon) initDiscovery(config *Config) error {
   884  	advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise)
   885  	if err != nil {
   886  		if err == errDiscoveryDisabled {
   887  			return nil
   888  		}
   889  		return err
   890  	}
   891  
   892  	config.ClusterAdvertise = advertise
   893  	discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts)
   894  	if err != nil {
   895  		return fmt.Errorf("discovery initialization failed (%v)", err)
   896  	}
   897  
   898  	daemon.discoveryWatcher = discoveryWatcher
   899  	return nil
   900  }
   901  
   902  // Reload reads configuration changes and modifies the
   903  // daemon according to those changes.
   904  // These are the settings that Reload changes:
   905  // - Daemon labels.
   906  // - Daemon debug log level.
   907  // - Daemon max concurrent downloads
   908  // - Daemon max concurrent uploads
   909  // - Cluster discovery (reconfigure and restart).
   910  // - Daemon live restore
   911  func (daemon *Daemon) Reload(config *Config) error {
   912  	var err error
   913  	// used to hold reloaded changes
   914  	attributes := map[string]string{}
   915  
   916  	// We need defer here to ensure the lock is released as
   917  	// daemon.SystemInfo() will try to get it too
   918  	defer func() {
   919  		if err == nil {
   920  			daemon.LogDaemonEventWithAttributes("reload", attributes)
   921  		}
   922  	}()
   923  
   924  	daemon.configStore.reloadLock.Lock()
   925  	defer daemon.configStore.reloadLock.Unlock()
   926  
   927  	daemon.platformReload(config, &attributes)
   928  
   929  	if err = daemon.reloadClusterDiscovery(config); err != nil {
   930  		return err
   931  	}
   932  
   933  	if config.IsValueSet("labels") {
   934  		daemon.configStore.Labels = config.Labels
   935  	}
   936  	if config.IsValueSet("debug") {
   937  		daemon.configStore.Debug = config.Debug
   938  	}
   939  	if config.IsValueSet("live-restore") {
   940  		daemon.configStore.LiveRestoreEnabled = config.LiveRestoreEnabled
   941  		if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestoreEnabled)); err != nil {
   942  			return err
   943  		}
   944  
   945  	}
   946  
   947  	// If no value is set for max-concurrent-downloads we assume it is the default value
   948  	// We always "reset" as the cost is lightweight and easy to maintain.
   949  	if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil {
   950  		*daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads
   951  	} else {
   952  		maxConcurrentDownloads := defaultMaxConcurrentDownloads
   953  		daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads
   954  	}
   955  	logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads)
   956  	if daemon.downloadManager != nil {
   957  		daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads)
   958  	}
   959  
   960  	// If no value is set for max-concurrent-upload we assume it is the default value
   961  	// We always "reset" as the cost is lightweight and easy to maintain.
   962  	if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil {
   963  		*daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads
   964  	} else {
   965  		maxConcurrentUploads := defaultMaxConcurrentUploads
   966  		daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads
   967  	}
   968  	logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads)
   969  	if daemon.uploadManager != nil {
   970  		daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads)
   971  	}
   972  
   973  	// We emit daemon reload event here with updatable configurations
   974  	attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug)
   975  	attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled)
   976  	attributes["cluster-store"] = daemon.configStore.ClusterStore
   977  	if daemon.configStore.ClusterOpts != nil {
   978  		opts, _ := json.Marshal(daemon.configStore.ClusterOpts)
   979  		attributes["cluster-store-opts"] = string(opts)
   980  	} else {
   981  		attributes["cluster-store-opts"] = "{}"
   982  	}
   983  	attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise
   984  	if daemon.configStore.Labels != nil {
   985  		labels, _ := json.Marshal(daemon.configStore.Labels)
   986  		attributes["labels"] = string(labels)
   987  	} else {
   988  		attributes["labels"] = "[]"
   989  	}
   990  	attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads)
   991  	attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads)
   992  
   993  	return nil
   994  }
   995  
   996  func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
   997  	var err error
   998  	newAdvertise := daemon.configStore.ClusterAdvertise
   999  	newClusterStore := daemon.configStore.ClusterStore
  1000  	if config.IsValueSet("cluster-advertise") {
  1001  		if config.IsValueSet("cluster-store") {
  1002  			newClusterStore = config.ClusterStore
  1003  		}
  1004  		newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise)
  1005  		if err != nil && err != errDiscoveryDisabled {
  1006  			return err
  1007  		}
  1008  	}
  1009  
  1010  	if daemon.clusterProvider != nil {
  1011  		if err := config.isSwarmCompatible(); err != nil {
  1012  			return err
  1013  		}
  1014  	}
  1015  
  1016  	// check discovery modifications
  1017  	if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) {
  1018  		return nil
  1019  	}
  1020  
  1021  	// enable discovery for the first time if it was not previously enabled
  1022  	if daemon.discoveryWatcher == nil {
  1023  		discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts)
  1024  		if err != nil {
  1025  			return fmt.Errorf("discovery initialization failed (%v)", err)
  1026  		}
  1027  		daemon.discoveryWatcher = discoveryWatcher
  1028  	} else {
  1029  		if err == errDiscoveryDisabled {
  1030  			// disable discovery if it was previously enabled and it's disabled now
  1031  			daemon.discoveryWatcher.Stop()
  1032  		} else {
  1033  			// reload discovery
  1034  			if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil {
  1035  				return err
  1036  			}
  1037  		}
  1038  	}
  1039  
  1040  	daemon.configStore.ClusterStore = newClusterStore
  1041  	daemon.configStore.ClusterOpts = config.ClusterOpts
  1042  	daemon.configStore.ClusterAdvertise = newAdvertise
  1043  
  1044  	if daemon.netController == nil {
  1045  		return nil
  1046  	}
  1047  	netOptions, err := daemon.networkOptions(daemon.configStore, nil)
  1048  	if err != nil {
  1049  		logrus.Warnf("Failed to reload configuration with network controller: %v", err)
  1050  		return nil
  1051  	}
  1052  	err = daemon.netController.ReloadConfiguration(netOptions...)
  1053  	if err != nil {
  1054  		logrus.Warnf("Failed to reload configuration with network controller: %v", err)
  1055  	}
  1056  
  1057  	return nil
  1058  }
  1059  
  1060  func isBridgeNetworkDisabled(config *Config) bool {
  1061  	return config.bridgeConfig.Iface == disableNetworkBridge
  1062  }
  1063  
  1064  func (daemon *Daemon) networkOptions(dconfig *Config, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
  1065  	options := []nwconfig.Option{}
  1066  	if dconfig == nil {
  1067  		return options, nil
  1068  	}
  1069  
  1070  	options = append(options, nwconfig.OptionDataDir(dconfig.Root))
  1071  	options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
  1072  
  1073  	dd := runconfig.DefaultDaemonNetworkMode()
  1074  	dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
  1075  	options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
  1076  	options = append(options, nwconfig.OptionDefaultNetwork(dn))
  1077  
  1078  	if strings.TrimSpace(dconfig.ClusterStore) != "" {
  1079  		kv := strings.Split(dconfig.ClusterStore, "://")
  1080  		if len(kv) != 2 {
  1081  			return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
  1082  		}
  1083  		options = append(options, nwconfig.OptionKVProvider(kv[0]))
  1084  		options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
  1085  	}
  1086  	if len(dconfig.ClusterOpts) > 0 {
  1087  		options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
  1088  	}
  1089  
  1090  	if daemon.discoveryWatcher != nil {
  1091  		options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
  1092  	}
  1093  
  1094  	if dconfig.ClusterAdvertise != "" {
  1095  		options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
  1096  	}
  1097  
  1098  	options = append(options, nwconfig.OptionLabels(dconfig.Labels))
  1099  	options = append(options, driverOptions(dconfig)...)
  1100  
  1101  	if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 {
  1102  		options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
  1103  	}
  1104  
  1105  	return options, nil
  1106  }
  1107  
  1108  func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry {
  1109  	out := make([]types.BlkioStatEntry, len(entries))
  1110  	for i, re := range entries {
  1111  		out[i] = types.BlkioStatEntry{
  1112  			Major: re.Major,
  1113  			Minor: re.Minor,
  1114  			Op:    re.Op,
  1115  			Value: re.Value,
  1116  		}
  1117  	}
  1118  	return out
  1119  }