github.com/DaoCloud/dao@v0.0.0-20161212064103-c3dbfd13ee36/daemon/daemon.go (about) 1 // Package daemon exposes the functions that occur on the host server 2 // that the Docker daemon is running. 3 // 4 // In implementing the various functions of the daemon, there is often 5 // a method-specific struct for configuring the runtime behavior. 6 package daemon 7 8 import ( 9 "encoding/json" 10 "fmt" 11 "io" 12 "io/ioutil" 13 "net" 14 "os" 15 "path" 16 "path/filepath" 17 "runtime" 18 "strings" 19 "sync" 20 "syscall" 21 "time" 22 23 "github.com/Sirupsen/logrus" 24 containerd "github.com/docker/containerd/api/grpc/types" 25 "github.com/docker/docker/api" 26 "github.com/docker/docker/container" 27 "github.com/docker/docker/daemon/events" 28 "github.com/docker/docker/daemon/exec" 29 "github.com/docker/engine-api/types" 30 containertypes "github.com/docker/engine-api/types/container" 31 "github.com/docker/libnetwork/cluster" 32 // register graph drivers 33 _ "github.com/docker/docker/daemon/graphdriver/register" 34 dmetadata "github.com/docker/docker/distribution/metadata" 35 "github.com/docker/docker/distribution/xfer" 36 "github.com/docker/docker/image" 37 "github.com/docker/docker/layer" 38 "github.com/docker/docker/libcontainerd" 39 "github.com/docker/docker/migrate/v1" 40 "github.com/docker/docker/pkg/fileutils" 41 "github.com/docker/docker/pkg/graphdb" 42 "github.com/docker/docker/pkg/idtools" 43 "github.com/docker/docker/pkg/progress" 44 "github.com/docker/docker/pkg/registrar" 45 "github.com/docker/docker/pkg/signal" 46 "github.com/docker/docker/pkg/streamformatter" 47 "github.com/docker/docker/pkg/sysinfo" 48 "github.com/docker/docker/pkg/system" 49 "github.com/docker/docker/pkg/truncindex" 50 "github.com/docker/docker/reference" 51 "github.com/docker/docker/registry" 52 "github.com/docker/docker/runconfig" 53 "github.com/docker/docker/utils" 54 volumedrivers "github.com/docker/docker/volume/drivers" 55 "github.com/docker/docker/volume/local" 56 "github.com/docker/docker/volume/store" 57 "github.com/docker/libnetwork" 58 nwconfig "github.com/docker/libnetwork/config" 59 "github.com/docker/libtrust" 60 ) 61 62 var ( 63 // DefaultRuntimeBinary is the default runtime to be used by 64 // containerd if none is specified 65 DefaultRuntimeBinary = "docker-runc" 66 67 errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") 68 ) 69 70 // Daemon holds information about the Docker daemon. 71 type Daemon struct { 72 ID string 73 repository string 74 containers container.Store 75 execCommands *exec.Store 76 referenceStore reference.Store 77 downloadManager *xfer.LayerDownloadManager 78 uploadManager *xfer.LayerUploadManager 79 distributionMetadataStore dmetadata.Store 80 trustKey libtrust.PrivateKey 81 idIndex *truncindex.TruncIndex 82 configStore *Config 83 statsCollector *statsCollector 84 defaultLogConfig containertypes.LogConfig 85 RegistryService registry.Service 86 EventsService *events.Events 87 netController libnetwork.NetworkController 88 volumes *store.VolumeStore 89 discoveryWatcher discoveryReloader 90 root string 91 seccompEnabled bool 92 shutdown bool 93 uidMaps []idtools.IDMap 94 gidMaps []idtools.IDMap 95 layerStore layer.Store 96 imageStore image.Store 97 nameIndex *registrar.Registrar 98 linkIndex *linkIndex 99 containerd libcontainerd.Client 100 containerdRemote libcontainerd.Remote 101 defaultIsolation containertypes.Isolation // Default isolation mode on Windows 102 clusterProvider cluster.Provider 103 } 104 105 func (daemon *Daemon) restore() error { 106 var ( 107 debug = utils.IsDebugEnabled() 108 currentDriver = daemon.GraphDriverName() 109 containers = make(map[string]*container.Container) 110 ) 111 112 if !debug { 113 logrus.Info("Loading containers: start.") 114 } 115 dir, err := ioutil.ReadDir(daemon.repository) 116 if err != nil { 117 return err 118 } 119 120 for _, v := range dir { 121 id := v.Name() 122 container, err := daemon.load(id) 123 if !debug && logrus.GetLevel() == logrus.InfoLevel { 124 fmt.Print(".") 125 } 126 if err != nil { 127 logrus.Errorf("Failed to load container %v: %v", id, err) 128 continue 129 } 130 131 // Ignore the container if it does not support the current driver being used by the graph 132 if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { 133 rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) 134 if err != nil { 135 logrus.Errorf("Failed to load container mount %v: %v", id, err) 136 continue 137 } 138 container.RWLayer = rwlayer 139 logrus.Debugf("Loaded container %v", container.ID) 140 141 containers[container.ID] = container 142 } else { 143 logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) 144 } 145 } 146 147 var migrateLegacyLinks bool 148 restartContainers := make(map[*container.Container]chan struct{}) 149 activeSandboxes := make(map[string]interface{}) 150 for _, c := range containers { 151 if err := daemon.registerName(c); err != nil { 152 logrus.Errorf("Failed to register container %s: %s", c.ID, err) 153 continue 154 } 155 if err := daemon.Register(c); err != nil { 156 logrus.Errorf("Failed to register container %s: %s", c.ID, err) 157 continue 158 } 159 160 // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. 161 // We should rewrite it to use the daemon defaults. 162 // Fixes https://github.com/docker/docker/issues/22536 163 if c.HostConfig.LogConfig.Type == "" { 164 if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { 165 logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) 166 continue 167 } 168 } 169 } 170 var wg sync.WaitGroup 171 var mapLock sync.Mutex 172 for _, c := range containers { 173 wg.Add(1) 174 go func(c *container.Container) { 175 defer wg.Done() 176 rm := c.RestartManager(false) 177 if c.IsRunning() || c.IsPaused() { 178 if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(rm)); err != nil { 179 logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err) 180 return 181 } 182 if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { 183 options, err := daemon.buildSandboxOptions(c) 184 if err != nil { 185 logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) 186 } 187 mapLock.Lock() 188 activeSandboxes[c.NetworkSettings.SandboxID] = options 189 mapLock.Unlock() 190 } 191 192 } 193 // fixme: only if not running 194 // get list of containers we need to restart 195 if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestart() { 196 mapLock.Lock() 197 restartContainers[c] = make(chan struct{}) 198 mapLock.Unlock() 199 } 200 201 if c.RemovalInProgress { 202 // We probably crashed in the middle of a removal, reset 203 // the flag. 204 // 205 // We DO NOT remove the container here as we do not 206 // know if the user had requested for either the 207 // associated volumes, network links or both to also 208 // be removed. So we put the container in the "dead" 209 // state and leave further processing up to them. 210 logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) 211 c.ResetRemovalInProgress() 212 c.SetDead() 213 c.ToDisk() 214 } 215 216 // if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated 217 if c.HostConfig != nil && c.HostConfig.Links == nil { 218 migrateLegacyLinks = true 219 } 220 }(c) 221 } 222 wg.Wait() 223 daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) 224 if err != nil { 225 return fmt.Errorf("Error initializing network controller: %v", err) 226 } 227 228 // migrate any legacy links from sqlite 229 linkdbFile := filepath.Join(daemon.root, "linkgraph.db") 230 var legacyLinkDB *graphdb.Database 231 if migrateLegacyLinks { 232 legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile) 233 if err != nil { 234 return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err) 235 } 236 defer legacyLinkDB.Close() 237 } 238 239 // Now that all the containers are registered, register the links 240 for _, c := range containers { 241 if migrateLegacyLinks { 242 if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil { 243 return err 244 } 245 } 246 if err := daemon.registerLinks(c, c.HostConfig); err != nil { 247 logrus.Errorf("failed to register link for container %s: %v", c.ID, err) 248 } 249 } 250 251 group := sync.WaitGroup{} 252 for c, notifier := range restartContainers { 253 group.Add(1) 254 255 go func(c *container.Container, chNotify chan struct{}) { 256 defer group.Done() 257 258 logrus.Debugf("Starting container %s", c.ID) 259 260 // ignore errors here as this is a best effort to wait for children to be 261 // running before we try to start the container 262 children := daemon.children(c) 263 timeout := time.After(5 * time.Second) 264 for _, child := range children { 265 if notifier, exists := restartContainers[child]; exists { 266 select { 267 case <-notifier: 268 case <-timeout: 269 } 270 } 271 } 272 273 // Make sure networks are available before starting 274 daemon.waitForNetworks(c) 275 if err := daemon.containerStart(c); err != nil { 276 logrus.Errorf("Failed to start container %s: %s", c.ID, err) 277 } 278 close(chNotify) 279 }(c, notifier) 280 281 } 282 group.Wait() 283 284 // any containers that were started above would already have had this done, 285 // however we need to now prepare the mountpoints for the rest of the containers as well. 286 // This shouldn't cause any issue running on the containers that already had this run. 287 // This must be run after any containers with a restart policy so that containerized plugins 288 // can have a chance to be running before we try to initialize them. 289 for _, c := range containers { 290 // if the container has restart policy, do not 291 // prepare the mountpoints since it has been done on restarting. 292 // This is to speed up the daemon start when a restart container 293 // has a volume and the volume dirver is not available. 294 if _, ok := restartContainers[c]; ok { 295 continue 296 } 297 group.Add(1) 298 go func(c *container.Container) { 299 defer group.Done() 300 if err := daemon.prepareMountPoints(c); err != nil { 301 logrus.Error(err) 302 } 303 }(c) 304 } 305 306 group.Wait() 307 308 if !debug { 309 if logrus.GetLevel() == logrus.InfoLevel { 310 fmt.Println() 311 } 312 logrus.Info("Loading containers: done.") 313 } 314 315 return nil 316 } 317 318 // waitForNetworks is used during daemon initialization when starting up containers 319 // It ensures that all of a container's networks are available before the daemon tries to start the container. 320 // In practice it just makes sure the discovery service is available for containers which use a network that require discovery. 321 func (daemon *Daemon) waitForNetworks(c *container.Container) { 322 if daemon.discoveryWatcher == nil { 323 return 324 } 325 // Make sure if the container has a network that requires discovery that the discovery service is available before starting 326 for netName := range c.NetworkSettings.Networks { 327 // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready 328 // Most likely this is because the K/V store used for discovery is in a container and needs to be started 329 if _, err := daemon.netController.NetworkByName(netName); err != nil { 330 if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { 331 continue 332 } 333 // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host 334 // FIXME: why is this slow??? 335 logrus.Debugf("Container %s waiting for network to be ready", c.Name) 336 select { 337 case <-daemon.discoveryWatcher.ReadyCh(): 338 case <-time.After(60 * time.Second): 339 } 340 return 341 } 342 } 343 } 344 345 func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { 346 return daemon.linkIndex.children(c) 347 } 348 349 // parents returns the names of the parent containers of the container 350 // with the given name. 351 func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { 352 return daemon.linkIndex.parents(c) 353 } 354 355 func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { 356 fullName := path.Join(parent.Name, alias) 357 if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { 358 if err == registrar.ErrNameReserved { 359 logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) 360 return nil 361 } 362 return err 363 } 364 daemon.linkIndex.link(parent, child, fullName) 365 return nil 366 } 367 368 // SetClusterProvider sets a component for querying the current cluster state. 369 func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) { 370 daemon.clusterProvider = clusterProvider 371 daemon.netController.SetClusterProvider(clusterProvider) 372 } 373 374 // IsSwarmCompatible verifies if the current daemon 375 // configuration is compatible with the swarm mode 376 func (daemon *Daemon) IsSwarmCompatible() error { 377 if daemon.configStore == nil { 378 return nil 379 } 380 return daemon.configStore.isSwarmCompatible() 381 } 382 383 // NewDaemon sets up everything for the daemon to be able to service 384 // requests from the webserver. 385 func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { 386 setDefaultMtu(config) 387 388 // Ensure that we have a correct root key limit for launching containers. 389 if err := ModifyRootKeyLimit(); err != nil { 390 logrus.Warnf("unable to modify root key limit, number of containers could be limitied by this quota: %v", err) 391 } 392 393 // Ensure we have compatible and valid configuration options 394 if err := verifyDaemonSettings(config); err != nil { 395 return nil, err 396 } 397 398 // Do we have a disabled network? 399 config.DisableBridge = isBridgeNetworkDisabled(config) 400 401 // Verify the platform is supported as a daemon 402 if !platformSupported { 403 return nil, errSystemNotSupported 404 } 405 406 // Validate platform-specific requirements 407 if err := checkSystem(); err != nil { 408 return nil, err 409 } 410 411 // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event 412 // on Windows to dump Go routine stacks 413 setupDumpStackTrap() 414 415 uidMaps, gidMaps, err := setupRemappedRoot(config) 416 if err != nil { 417 return nil, err 418 } 419 rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) 420 if err != nil { 421 return nil, err 422 } 423 424 // get the canonical path to the Docker root directory 425 var realRoot string 426 if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { 427 realRoot = config.Root 428 } else { 429 realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) 430 if err != nil { 431 return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) 432 } 433 } 434 435 if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { 436 return nil, err 437 } 438 439 if err := setupDaemonProcess(config); err != nil { 440 return nil, err 441 } 442 443 // set up the tmpDir to use a canonical path 444 tmp, err := tempDir(config.Root, rootUID, rootGID) 445 if err != nil { 446 return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) 447 } 448 realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) 449 if err != nil { 450 return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) 451 } 452 os.Setenv("TMPDIR", realTmp) 453 454 d := &Daemon{configStore: config} 455 // Ensure the daemon is properly shutdown if there is a failure during 456 // initialization 457 defer func() { 458 if err != nil { 459 if err := d.Shutdown(); err != nil { 460 logrus.Error(err) 461 } 462 } 463 }() 464 465 // Set the default isolation mode (only applicable on Windows) 466 if err := d.setDefaultIsolation(); err != nil { 467 return nil, fmt.Errorf("error setting default isolation mode: %v", err) 468 } 469 470 logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) 471 472 if err := configureMaxThreads(config); err != nil { 473 logrus.Warnf("Failed to configure golang's threads limit: %v", err) 474 } 475 476 installDefaultAppArmorProfile() 477 daemonRepo := filepath.Join(config.Root, "containers") 478 if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { 479 return nil, err 480 } 481 482 driverName := os.Getenv("DOCKER_DRIVER") 483 if driverName == "" { 484 driverName = config.GraphDriver 485 } 486 d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ 487 StorePath: config.Root, 488 MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), 489 GraphDriver: driverName, 490 GraphDriverOptions: config.GraphOptions, 491 UIDMaps: uidMaps, 492 GIDMaps: gidMaps, 493 }) 494 if err != nil { 495 return nil, err 496 } 497 498 graphDriver := d.layerStore.DriverName() 499 imageRoot := filepath.Join(config.Root, "image", graphDriver) 500 501 // Configure and validate the kernels security support 502 if err := configureKernelSecuritySupport(config, graphDriver); err != nil { 503 return nil, err 504 } 505 506 logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) 507 d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) 508 logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) 509 d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) 510 511 ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) 512 if err != nil { 513 return nil, err 514 } 515 516 d.imageStore, err = image.NewImageStore(ifs, d.layerStore) 517 if err != nil { 518 return nil, err 519 } 520 521 // Configure the volumes driver 522 volStore, err := d.configureVolumes(rootUID, rootGID) 523 if err != nil { 524 return nil, err 525 } 526 527 trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) 528 if err != nil { 529 return nil, err 530 } 531 532 trustDir := filepath.Join(config.Root, "trust") 533 534 if err := system.MkdirAll(trustDir, 0700); err != nil { 535 return nil, err 536 } 537 538 distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) 539 if err != nil { 540 return nil, err 541 } 542 543 eventsService := events.New() 544 545 referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) 546 if err != nil { 547 return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) 548 } 549 550 if err := restoreCustomImage(d.imageStore, d.layerStore, referenceStore); err != nil { 551 return nil, fmt.Errorf("Couldn't restore custom images: %s", err) 552 } 553 554 migrationStart := time.Now() 555 if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { 556 logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) 557 } 558 logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) 559 560 // Discovery is only enabled when the daemon is launched with an address to advertise. When 561 // initialized, the daemon is registered and we can store the discovery backend as its read-only 562 if err := d.initDiscovery(config); err != nil { 563 return nil, err 564 } 565 566 sysInfo := sysinfo.New(false) 567 // Check if Devices cgroup is mounted, it is hard requirement for container security, 568 // on Linux. 569 if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { 570 return nil, fmt.Errorf("Devices cgroup isn't mounted") 571 } 572 573 d.ID = trustKey.PublicKey().KeyID() 574 d.repository = daemonRepo 575 d.containers = container.NewMemoryStore() 576 d.execCommands = exec.NewStore() 577 d.referenceStore = referenceStore 578 d.distributionMetadataStore = distributionMetadataStore 579 d.trustKey = trustKey 580 d.idIndex = truncindex.NewTruncIndex([]string{}) 581 d.statsCollector = d.newStatsCollector(1 * time.Second) 582 d.defaultLogConfig = containertypes.LogConfig{ 583 Type: config.LogConfig.Type, 584 Config: config.LogConfig.Config, 585 } 586 d.RegistryService = registryService 587 d.EventsService = eventsService 588 d.volumes = volStore 589 d.root = config.Root 590 d.uidMaps = uidMaps 591 d.gidMaps = gidMaps 592 d.seccompEnabled = sysInfo.Seccomp 593 594 d.nameIndex = registrar.NewRegistrar() 595 d.linkIndex = newLinkIndex() 596 d.containerdRemote = containerdRemote 597 598 go d.execCommandGC() 599 600 d.containerd, err = containerdRemote.Client(d) 601 if err != nil { 602 return nil, err 603 } 604 605 if err := d.restore(); err != nil { 606 return nil, err 607 } 608 609 // Plugin system initialization should happen before restore. Do not change order. 610 if err := pluginInit(d, config, containerdRemote); err != nil { 611 return nil, err 612 } 613 614 return d, nil 615 } 616 617 func (daemon *Daemon) shutdownContainer(c *container.Container) error { 618 // TODO(windows): Handle docker restart with paused containers 619 if c.IsPaused() { 620 // To terminate a process in freezer cgroup, we should send 621 // SIGTERM to this process then unfreeze it, and the process will 622 // force to terminate immediately. 623 logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID) 624 sig, ok := signal.SignalMap["TERM"] 625 if !ok { 626 return fmt.Errorf("System does not support SIGTERM") 627 } 628 if err := daemon.kill(c, int(sig)); err != nil { 629 return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) 630 } 631 if err := daemon.containerUnpause(c); err != nil { 632 return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) 633 } 634 if _, err := c.WaitStop(10 * time.Second); err != nil { 635 logrus.Debugf("container %s failed to exit in 10 seconds of SIGTERM, sending SIGKILL to force", c.ID) 636 sig, ok := signal.SignalMap["KILL"] 637 if !ok { 638 return fmt.Errorf("System does not support SIGKILL") 639 } 640 if err := daemon.kill(c, int(sig)); err != nil { 641 logrus.Errorf("Failed to SIGKILL container %s", c.ID) 642 } 643 c.WaitStop(-1 * time.Second) 644 return err 645 } 646 } 647 // If container failed to exit in 10 seconds of SIGTERM, then using the force 648 if err := daemon.containerStop(c, 10); err != nil { 649 return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) 650 } 651 652 c.WaitStop(-1 * time.Second) 653 return nil 654 } 655 656 // Shutdown stops the daemon. 657 func (daemon *Daemon) Shutdown() error { 658 daemon.shutdown = true 659 // Keep mounts and networking running on daemon shutdown if 660 // we are to keep containers running and restore them. 661 662 pluginShutdown() 663 664 if daemon.configStore.LiveRestore && daemon.containers != nil { 665 // check if there are any running containers, if none we should do some cleanup 666 if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { 667 return nil 668 } 669 } 670 671 if daemon.containers != nil { 672 logrus.Debug("starting clean shutdown of all containers...") 673 daemon.containers.ApplyAll(func(c *container.Container) { 674 if !c.IsRunning() { 675 return 676 } 677 logrus.Debugf("stopping %s", c.ID) 678 if err := daemon.shutdownContainer(c); err != nil { 679 logrus.Errorf("Stop container error: %v", err) 680 return 681 } 682 if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { 683 daemon.cleanupMountsByID(mountid) 684 } 685 logrus.Debugf("container stopped %s", c.ID) 686 }) 687 } 688 689 // trigger libnetwork Stop only if it's initialized 690 if daemon.netController != nil { 691 daemon.netController.Stop() 692 } 693 694 if daemon.layerStore != nil { 695 if err := daemon.layerStore.Cleanup(); err != nil { 696 logrus.Errorf("Error during layer Store.Cleanup(): %v", err) 697 } 698 } 699 700 if err := daemon.cleanupMounts(); err != nil { 701 return err 702 } 703 704 return nil 705 } 706 707 // Mount sets container.BaseFS 708 // (is it not set coming in? why is it unset?) 709 func (daemon *Daemon) Mount(container *container.Container) error { 710 dir, err := container.RWLayer.Mount(container.GetMountLabel()) 711 if err != nil { 712 return err 713 } 714 logrus.Debugf("container mounted via layerStore: %v", dir) 715 716 if container.BaseFS != dir { 717 // The mount path reported by the graph driver should always be trusted on Windows, since the 718 // volume path for a given mounted layer may change over time. This should only be an error 719 // on non-Windows operating systems. 720 if container.BaseFS != "" && runtime.GOOS != "windows" { 721 daemon.Unmount(container) 722 return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", 723 daemon.GraphDriverName(), container.ID, container.BaseFS, dir) 724 } 725 } 726 container.BaseFS = dir // TODO: combine these fields 727 return nil 728 } 729 730 // Unmount unsets the container base filesystem 731 func (daemon *Daemon) Unmount(container *container.Container) error { 732 if err := container.RWLayer.Unmount(); err != nil { 733 logrus.Errorf("Error unmounting container %s: %s", container.ID, err) 734 return err 735 } 736 return nil 737 } 738 739 // V4Subnets returns the IPv4 subnets of networks that are managed by Docker. 740 func (daemon *Daemon) V4Subnets() []net.IPNet { 741 var subnets []net.IPNet 742 743 managedNetworks := daemon.netController.Networks() 744 745 for _, managedNetwork := range managedNetworks { 746 v4Infos, _ := managedNetwork.Info().IpamInfo() 747 for _, v4Info := range v4Infos { 748 if v4Info.IPAMData.Pool != nil { 749 subnets = append(subnets, *v4Info.IPAMData.Pool) 750 } 751 } 752 } 753 754 return subnets 755 } 756 757 // V6Subnets returns the IPv6 subnets of networks that are managed by Docker. 758 func (daemon *Daemon) V6Subnets() []net.IPNet { 759 var subnets []net.IPNet 760 761 managedNetworks := daemon.netController.Networks() 762 763 for _, managedNetwork := range managedNetworks { 764 _, v6Infos := managedNetwork.Info().IpamInfo() 765 for _, v6Info := range v6Infos { 766 if v6Info.IPAMData.Pool != nil { 767 subnets = append(subnets, *v6Info.IPAMData.Pool) 768 } 769 } 770 } 771 772 return subnets 773 } 774 775 func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { 776 progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) 777 operationCancelled := false 778 779 for prog := range progressChan { 780 if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { 781 // don't log broken pipe errors as this is the normal case when a client aborts 782 if isBrokenPipe(err) { 783 logrus.Info("Pull session cancelled") 784 } else { 785 logrus.Errorf("error writing progress to client: %v", err) 786 } 787 cancelFunc() 788 operationCancelled = true 789 // Don't return, because we need to continue draining 790 // progressChan until it's closed to avoid a deadlock. 791 } 792 } 793 } 794 795 func isBrokenPipe(e error) bool { 796 if netErr, ok := e.(*net.OpError); ok { 797 e = netErr.Err 798 if sysErr, ok := netErr.Err.(*os.SyscallError); ok { 799 e = sysErr.Err 800 } 801 } 802 return e == syscall.EPIPE 803 } 804 805 // GraphDriverName returns the name of the graph driver used by the layer.Store 806 func (daemon *Daemon) GraphDriverName() string { 807 return daemon.layerStore.DriverName() 808 } 809 810 // GetUIDGIDMaps returns the current daemon's user namespace settings 811 // for the full uid and gid maps which will be applied to containers 812 // started in this instance. 813 func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { 814 return daemon.uidMaps, daemon.gidMaps 815 } 816 817 // GetRemappedUIDGID returns the current daemon's uid and gid values 818 // if user namespaces are in use for this daemon instance. If not 819 // this function will return "real" root values of 0, 0. 820 func (daemon *Daemon) GetRemappedUIDGID() (int, int) { 821 uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) 822 return uid, gid 823 } 824 825 // tempDir returns the default directory to use for temporary files. 826 func tempDir(rootDir string, rootUID, rootGID int) (string, error) { 827 var tmpDir string 828 if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { 829 tmpDir = filepath.Join(rootDir, "tmp") 830 } 831 return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) 832 } 833 834 func (daemon *Daemon) setupInitLayer(initPath string) error { 835 rootUID, rootGID := daemon.GetRemappedUIDGID() 836 return setupInitLayer(initPath, rootUID, rootGID) 837 } 838 839 func setDefaultMtu(config *Config) { 840 // do nothing if the config does not have the default 0 value. 841 if config.Mtu != 0 { 842 return 843 } 844 config.Mtu = defaultNetworkMtu 845 } 846 847 func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) { 848 volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID) 849 if err != nil { 850 return nil, err 851 } 852 853 if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { 854 return nil, fmt.Errorf("local volume driver could not be registered") 855 } 856 return store.New(daemon.configStore.Root) 857 } 858 859 // IsShuttingDown tells whether the daemon is shutting down or not 860 func (daemon *Daemon) IsShuttingDown() bool { 861 return daemon.shutdown 862 } 863 864 // initDiscovery initializes the discovery watcher for this daemon. 865 func (daemon *Daemon) initDiscovery(config *Config) error { 866 advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise) 867 if err != nil { 868 if err == errDiscoveryDisabled { 869 return nil 870 } 871 return err 872 } 873 874 config.ClusterAdvertise = advertise 875 discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts) 876 if err != nil { 877 return fmt.Errorf("discovery initialization failed (%v)", err) 878 } 879 880 daemon.discoveryWatcher = discoveryWatcher 881 return nil 882 } 883 884 // Reload reads configuration changes and modifies the 885 // daemon according to those changes. 886 // These are the settings that Reload changes: 887 // - Daemon labels. 888 // - Daemon debug log level. 889 // - Daemon max concurrent downloads 890 // - Daemon max concurrent uploads 891 // - Cluster discovery (reconfigure and restart). 892 // - Daemon live restore 893 func (daemon *Daemon) Reload(config *Config) error { 894 var err error 895 // used to hold reloaded changes 896 attributes := map[string]string{} 897 898 // We need defer here to ensure the lock is released as 899 // daemon.SystemInfo() will try to get it too 900 defer func() { 901 if err == nil { 902 daemon.LogDaemonEventWithAttributes("reload", attributes) 903 } 904 }() 905 906 daemon.configStore.reloadLock.Lock() 907 defer daemon.configStore.reloadLock.Unlock() 908 909 daemon.platformReload(config, &attributes) 910 911 if err = daemon.reloadClusterDiscovery(config); err != nil { 912 return err 913 } 914 915 if config.IsValueSet("labels") { 916 daemon.configStore.Labels = config.Labels 917 } 918 if config.IsValueSet("debug") { 919 daemon.configStore.Debug = config.Debug 920 } 921 if config.IsValueSet("live-restore") { 922 daemon.configStore.LiveRestore = config.LiveRestore 923 if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestore)); err != nil { 924 return err 925 } 926 927 } 928 929 // If no value is set for max-concurrent-downloads we assume it is the default value 930 // We always "reset" as the cost is lightweight and easy to maintain. 931 if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil { 932 *daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads 933 } else { 934 maxConcurrentDownloads := defaultMaxConcurrentDownloads 935 daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads 936 } 937 logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) 938 if daemon.downloadManager != nil { 939 daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) 940 } 941 942 // If no value is set for max-concurrent-upload we assume it is the default value 943 // We always "reset" as the cost is lightweight and easy to maintain. 944 if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil { 945 *daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads 946 } else { 947 maxConcurrentUploads := defaultMaxConcurrentUploads 948 daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads 949 } 950 logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) 951 if daemon.uploadManager != nil { 952 daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) 953 } 954 955 // We emit daemon reload event here with updatable configurations 956 attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) 957 attributes["cluster-store"] = daemon.configStore.ClusterStore 958 if daemon.configStore.ClusterOpts != nil { 959 opts, _ := json.Marshal(daemon.configStore.ClusterOpts) 960 attributes["cluster-store-opts"] = string(opts) 961 } else { 962 attributes["cluster-store-opts"] = "{}" 963 } 964 attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise 965 if daemon.configStore.Labels != nil { 966 labels, _ := json.Marshal(daemon.configStore.Labels) 967 attributes["labels"] = string(labels) 968 } else { 969 attributes["labels"] = "[]" 970 } 971 attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) 972 attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) 973 974 return nil 975 } 976 977 func (daemon *Daemon) reloadClusterDiscovery(config *Config) error { 978 var err error 979 newAdvertise := daemon.configStore.ClusterAdvertise 980 newClusterStore := daemon.configStore.ClusterStore 981 if config.IsValueSet("cluster-advertise") { 982 if config.IsValueSet("cluster-store") { 983 newClusterStore = config.ClusterStore 984 } 985 newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise) 986 if err != nil && err != errDiscoveryDisabled { 987 return err 988 } 989 } 990 991 if daemon.clusterProvider != nil { 992 if err := config.isSwarmCompatible(); err != nil { 993 return err 994 } 995 } 996 997 // check discovery modifications 998 if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) { 999 return nil 1000 } 1001 1002 // enable discovery for the first time if it was not previously enabled 1003 if daemon.discoveryWatcher == nil { 1004 discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts) 1005 if err != nil { 1006 return fmt.Errorf("discovery initialization failed (%v)", err) 1007 } 1008 daemon.discoveryWatcher = discoveryWatcher 1009 } else { 1010 if err == errDiscoveryDisabled { 1011 // disable discovery if it was previously enabled and it's disabled now 1012 daemon.discoveryWatcher.Stop() 1013 } else { 1014 // reload discovery 1015 if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil { 1016 return err 1017 } 1018 } 1019 } 1020 1021 daemon.configStore.ClusterStore = newClusterStore 1022 daemon.configStore.ClusterOpts = config.ClusterOpts 1023 daemon.configStore.ClusterAdvertise = newAdvertise 1024 1025 if daemon.netController == nil { 1026 return nil 1027 } 1028 netOptions, err := daemon.networkOptions(daemon.configStore, nil) 1029 if err != nil { 1030 logrus.Warnf("Failed to reload configuration with network controller: %v", err) 1031 return nil 1032 } 1033 err = daemon.netController.ReloadConfiguration(netOptions...) 1034 if err != nil { 1035 logrus.Warnf("Failed to reload configuration with network controller: %v", err) 1036 } 1037 1038 return nil 1039 } 1040 1041 func isBridgeNetworkDisabled(config *Config) bool { 1042 return config.bridgeConfig.Iface == disableNetworkBridge 1043 } 1044 1045 func (daemon *Daemon) networkOptions(dconfig *Config, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { 1046 options := []nwconfig.Option{} 1047 if dconfig == nil { 1048 return options, nil 1049 } 1050 1051 options = append(options, nwconfig.OptionDataDir(dconfig.Root)) 1052 options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) 1053 1054 dd := runconfig.DefaultDaemonNetworkMode() 1055 dn := runconfig.DefaultDaemonNetworkMode().NetworkName() 1056 options = append(options, nwconfig.OptionDefaultDriver(string(dd))) 1057 options = append(options, nwconfig.OptionDefaultNetwork(dn)) 1058 1059 if strings.TrimSpace(dconfig.ClusterStore) != "" { 1060 kv := strings.Split(dconfig.ClusterStore, "://") 1061 if len(kv) != 2 { 1062 return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL") 1063 } 1064 options = append(options, nwconfig.OptionKVProvider(kv[0])) 1065 options = append(options, nwconfig.OptionKVProviderURL(kv[1])) 1066 } 1067 if len(dconfig.ClusterOpts) > 0 { 1068 options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) 1069 } 1070 1071 if daemon.discoveryWatcher != nil { 1072 options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) 1073 } 1074 1075 if dconfig.ClusterAdvertise != "" { 1076 options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) 1077 } 1078 1079 options = append(options, nwconfig.OptionLabels(dconfig.Labels)) 1080 options = append(options, driverOptions(dconfig)...) 1081 1082 if daemon.configStore != nil && daemon.configStore.LiveRestore && len(activeSandboxes) != 0 { 1083 options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) 1084 } 1085 1086 return options, nil 1087 } 1088 1089 func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { 1090 out := make([]types.BlkioStatEntry, len(entries)) 1091 for i, re := range entries { 1092 out[i] = types.BlkioStatEntry{ 1093 Major: re.Major, 1094 Minor: re.Minor, 1095 Op: re.Op, 1096 Value: re.Value, 1097 } 1098 } 1099 return out 1100 }