github.com/noxiouz/docker@v0.7.3-0.20160629055221-3d231c78e8c5/daemon/daemon.go (about) 1 // Package daemon exposes the functions that occur on the host server 2 // that the Docker daemon is running. 3 // 4 // In implementing the various functions of the daemon, there is often 5 // a method-specific struct for configuring the runtime behavior. 6 package daemon 7 8 import ( 9 "encoding/json" 10 "fmt" 11 "io" 12 "io/ioutil" 13 "net" 14 "os" 15 "path" 16 "path/filepath" 17 "runtime" 18 "strings" 19 "sync" 20 "syscall" 21 "time" 22 23 "github.com/Sirupsen/logrus" 24 containerd "github.com/docker/containerd/api/grpc/types" 25 "github.com/docker/docker/api" 26 "github.com/docker/docker/container" 27 "github.com/docker/docker/daemon/events" 28 "github.com/docker/docker/daemon/exec" 29 "github.com/docker/engine-api/types" 30 containertypes "github.com/docker/engine-api/types/container" 31 "github.com/docker/libnetwork/cluster" 32 // register graph drivers 33 _ "github.com/docker/docker/daemon/graphdriver/register" 34 dmetadata "github.com/docker/docker/distribution/metadata" 35 "github.com/docker/docker/distribution/xfer" 36 "github.com/docker/docker/image" 37 "github.com/docker/docker/layer" 38 "github.com/docker/docker/libcontainerd" 39 "github.com/docker/docker/migrate/v1" 40 "github.com/docker/docker/pkg/fileutils" 41 "github.com/docker/docker/pkg/graphdb" 42 "github.com/docker/docker/pkg/idtools" 43 "github.com/docker/docker/pkg/progress" 44 "github.com/docker/docker/pkg/registrar" 45 "github.com/docker/docker/pkg/signal" 46 "github.com/docker/docker/pkg/streamformatter" 47 "github.com/docker/docker/pkg/sysinfo" 48 "github.com/docker/docker/pkg/system" 49 "github.com/docker/docker/pkg/truncindex" 50 "github.com/docker/docker/reference" 51 "github.com/docker/docker/registry" 52 "github.com/docker/docker/runconfig" 53 "github.com/docker/docker/utils" 54 volumedrivers "github.com/docker/docker/volume/drivers" 55 "github.com/docker/docker/volume/local" 56 "github.com/docker/docker/volume/store" 57 "github.com/docker/libnetwork" 58 nwconfig "github.com/docker/libnetwork/config" 59 "github.com/docker/libtrust" 60 ) 61 62 var ( 63 // DefaultRuntimeBinary is the default runtime to be used by 64 // containerd if none is specified 65 DefaultRuntimeBinary = "docker-runc" 66 67 errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") 68 ) 69 70 // Daemon holds information about the Docker daemon. 71 type Daemon struct { 72 ID string 73 repository string 74 containers container.Store 75 execCommands *exec.Store 76 referenceStore reference.Store 77 downloadManager *xfer.LayerDownloadManager 78 uploadManager *xfer.LayerUploadManager 79 distributionMetadataStore dmetadata.Store 80 trustKey libtrust.PrivateKey 81 idIndex *truncindex.TruncIndex 82 configStore *Config 83 statsCollector *statsCollector 84 defaultLogConfig containertypes.LogConfig 85 RegistryService registry.Service 86 EventsService *events.Events 87 netController libnetwork.NetworkController 88 volumes *store.VolumeStore 89 discoveryWatcher discoveryReloader 90 root string 91 seccompEnabled bool 92 shutdown bool 93 uidMaps []idtools.IDMap 94 gidMaps []idtools.IDMap 95 layerStore layer.Store 96 imageStore image.Store 97 nameIndex *registrar.Registrar 98 linkIndex *linkIndex 99 containerd libcontainerd.Client 100 containerdRemote libcontainerd.Remote 101 defaultIsolation containertypes.Isolation // Default isolation mode on Windows 102 clusterProvider cluster.Provider 103 } 104 105 func (daemon *Daemon) restore() error { 106 var ( 107 debug = utils.IsDebugEnabled() 108 currentDriver = daemon.GraphDriverName() 109 containers = make(map[string]*container.Container) 110 ) 111 112 if !debug { 113 logrus.Info("Loading containers: start.") 114 } 115 dir, err := ioutil.ReadDir(daemon.repository) 116 if err != nil { 117 return err 118 } 119 120 containerCount := 0 121 for _, v := range dir { 122 id := v.Name() 123 container, err := daemon.load(id) 124 if !debug && logrus.GetLevel() == logrus.InfoLevel { 125 fmt.Print(".") 126 containerCount++ 127 } 128 if err != nil { 129 logrus.Errorf("Failed to load container %v: %v", id, err) 130 continue 131 } 132 133 // Ignore the container if it does not support the current driver being used by the graph 134 if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { 135 rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) 136 if err != nil { 137 logrus.Errorf("Failed to load container mount %v: %v", id, err) 138 continue 139 } 140 container.RWLayer = rwlayer 141 logrus.Debugf("Loaded container %v", container.ID) 142 143 containers[container.ID] = container 144 } else { 145 logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) 146 } 147 } 148 149 var migrateLegacyLinks bool 150 restartContainers := make(map[*container.Container]chan struct{}) 151 activeSandboxes := make(map[string]interface{}) 152 for _, c := range containers { 153 if err := daemon.registerName(c); err != nil { 154 logrus.Errorf("Failed to register container %s: %s", c.ID, err) 155 continue 156 } 157 if err := daemon.Register(c); err != nil { 158 logrus.Errorf("Failed to register container %s: %s", c.ID, err) 159 continue 160 } 161 162 // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. 163 // We should rewrite it to use the daemon defaults. 164 // Fixes https://github.com/docker/docker/issues/22536 165 if c.HostConfig.LogConfig.Type == "" { 166 if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { 167 logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) 168 continue 169 } 170 } 171 } 172 var wg sync.WaitGroup 173 var mapLock sync.Mutex 174 for _, c := range containers { 175 wg.Add(1) 176 go func(c *container.Container) { 177 defer wg.Done() 178 rm := c.RestartManager(false) 179 if c.IsRunning() || c.IsPaused() { 180 if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(rm)); err != nil { 181 logrus.Errorf("Failed to restore with containerd: %q", err) 182 return 183 } 184 if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { 185 options, err := daemon.buildSandboxOptions(c) 186 if err != nil { 187 logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) 188 } 189 mapLock.Lock() 190 activeSandboxes[c.NetworkSettings.SandboxID] = options 191 mapLock.Unlock() 192 } 193 194 } 195 // fixme: only if not running 196 // get list of containers we need to restart 197 if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestart() { 198 mapLock.Lock() 199 restartContainers[c] = make(chan struct{}) 200 mapLock.Unlock() 201 } 202 203 if c.RemovalInProgress { 204 // We probably crashed in the middle of a removal, reset 205 // the flag. 206 // 207 // We DO NOT remove the container here as we do not 208 // know if the user had requested for either the 209 // associated volumes, network links or both to also 210 // be removed. So we put the container in the "dead" 211 // state and leave further processing up to them. 212 logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) 213 c.ResetRemovalInProgress() 214 c.SetDead() 215 c.ToDisk() 216 } 217 218 // if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated 219 if c.HostConfig != nil && c.HostConfig.Links == nil { 220 migrateLegacyLinks = true 221 } 222 }(c) 223 } 224 wg.Wait() 225 daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) 226 if err != nil { 227 return fmt.Errorf("Error initializing network controller: %v", err) 228 } 229 230 // migrate any legacy links from sqlite 231 linkdbFile := filepath.Join(daemon.root, "linkgraph.db") 232 var legacyLinkDB *graphdb.Database 233 if migrateLegacyLinks { 234 legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile) 235 if err != nil { 236 return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err) 237 } 238 defer legacyLinkDB.Close() 239 } 240 241 // Now that all the containers are registered, register the links 242 for _, c := range containers { 243 if migrateLegacyLinks { 244 if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil { 245 return err 246 } 247 } 248 if err := daemon.registerLinks(c, c.HostConfig); err != nil { 249 logrus.Errorf("failed to register link for container %s: %v", c.ID, err) 250 } 251 } 252 253 group := sync.WaitGroup{} 254 for c, notifier := range restartContainers { 255 group.Add(1) 256 257 go func(c *container.Container, chNotify chan struct{}) { 258 defer group.Done() 259 260 logrus.Debugf("Starting container %s", c.ID) 261 262 // ignore errors here as this is a best effort to wait for children to be 263 // running before we try to start the container 264 children := daemon.children(c) 265 timeout := time.After(5 * time.Second) 266 for _, child := range children { 267 if notifier, exists := restartContainers[child]; exists { 268 select { 269 case <-notifier: 270 case <-timeout: 271 } 272 } 273 } 274 275 // Make sure networks are available before starting 276 daemon.waitForNetworks(c) 277 if err := daemon.containerStart(c); err != nil { 278 logrus.Errorf("Failed to start container %s: %s", c.ID, err) 279 } 280 close(chNotify) 281 }(c, notifier) 282 283 } 284 group.Wait() 285 286 // any containers that were started above would already have had this done, 287 // however we need to now prepare the mountpoints for the rest of the containers as well. 288 // This shouldn't cause any issue running on the containers that already had this run. 289 // This must be run after any containers with a restart policy so that containerized plugins 290 // can have a chance to be running before we try to initialize them. 291 for _, c := range containers { 292 // if the container has restart policy, do not 293 // prepare the mountpoints since it has been done on restarting. 294 // This is to speed up the daemon start when a restart container 295 // has a volume and the volume dirver is not available. 296 if _, ok := restartContainers[c]; ok { 297 continue 298 } 299 group.Add(1) 300 go func(c *container.Container) { 301 defer group.Done() 302 if err := daemon.prepareMountPoints(c); err != nil { 303 logrus.Error(err) 304 } 305 }(c) 306 } 307 308 group.Wait() 309 310 if !debug { 311 if logrus.GetLevel() == logrus.InfoLevel && containerCount > 0 { 312 fmt.Println() 313 } 314 logrus.Info("Loading containers: done.") 315 } 316 317 return nil 318 } 319 320 // waitForNetworks is used during daemon initialization when starting up containers 321 // It ensures that all of a container's networks are available before the daemon tries to start the container. 322 // In practice it just makes sure the discovery service is available for containers which use a network that require discovery. 323 func (daemon *Daemon) waitForNetworks(c *container.Container) { 324 if daemon.discoveryWatcher == nil { 325 return 326 } 327 // Make sure if the container has a network that requires discovery that the discovery service is available before starting 328 for netName := range c.NetworkSettings.Networks { 329 // If we get `ErrNoSuchNetwork` here, it can assumed that it is due to discovery not being ready 330 // Most likely this is because the K/V store used for discovery is in a container and needs to be started 331 if _, err := daemon.netController.NetworkByName(netName); err != nil { 332 if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { 333 continue 334 } 335 // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host 336 // FIXME: why is this slow??? 337 logrus.Debugf("Container %s waiting for network to be ready", c.Name) 338 select { 339 case <-daemon.discoveryWatcher.ReadyCh(): 340 case <-time.After(60 * time.Second): 341 } 342 return 343 } 344 } 345 } 346 347 func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { 348 return daemon.linkIndex.children(c) 349 } 350 351 // parents returns the names of the parent containers of the container 352 // with the given name. 353 func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { 354 return daemon.linkIndex.parents(c) 355 } 356 357 func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { 358 fullName := path.Join(parent.Name, alias) 359 if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { 360 if err == registrar.ErrNameReserved { 361 logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) 362 return nil 363 } 364 return err 365 } 366 daemon.linkIndex.link(parent, child, fullName) 367 return nil 368 } 369 370 // SetClusterProvider sets a component for quering the current cluster state. 371 func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) { 372 daemon.clusterProvider = clusterProvider 373 daemon.netController.SetClusterProvider(clusterProvider) 374 } 375 376 // IsSwarmCompatible verifies if the current daemon 377 // configuration is compatible with the swarm mode 378 func (daemon *Daemon) IsSwarmCompatible() error { 379 if daemon.configStore == nil { 380 return nil 381 } 382 return daemon.configStore.isSwarmCompatible() 383 } 384 385 // NewDaemon sets up everything for the daemon to be able to service 386 // requests from the webserver. 387 func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { 388 setDefaultMtu(config) 389 390 // Ensure that we have a correct root key limit for launching containers. 391 if err := ModifyRootKeyLimit(); err != nil { 392 logrus.Warnf("unable to modify root key limit, number of containers could be limitied by this quota: %v", err) 393 } 394 395 // Ensure we have compatible and valid configuration options 396 if err := verifyDaemonSettings(config); err != nil { 397 return nil, err 398 } 399 400 // Do we have a disabled network? 401 config.DisableBridge = isBridgeNetworkDisabled(config) 402 403 // Verify the platform is supported as a daemon 404 if !platformSupported { 405 return nil, errSystemNotSupported 406 } 407 408 // Validate platform-specific requirements 409 if err := checkSystem(); err != nil { 410 return nil, err 411 } 412 413 // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event 414 // on Windows to dump Go routine stacks 415 setupDumpStackTrap() 416 417 uidMaps, gidMaps, err := setupRemappedRoot(config) 418 if err != nil { 419 return nil, err 420 } 421 rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) 422 if err != nil { 423 return nil, err 424 } 425 426 // get the canonical path to the Docker root directory 427 var realRoot string 428 if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { 429 realRoot = config.Root 430 } else { 431 realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) 432 if err != nil { 433 return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) 434 } 435 } 436 437 if err = setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { 438 return nil, err 439 } 440 441 // set up the tmpDir to use a canonical path 442 tmp, err := tempDir(config.Root, rootUID, rootGID) 443 if err != nil { 444 return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) 445 } 446 realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) 447 if err != nil { 448 return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) 449 } 450 os.Setenv("TMPDIR", realTmp) 451 452 d := &Daemon{configStore: config} 453 // Ensure the daemon is properly shutdown if there is a failure during 454 // initialization 455 defer func() { 456 if err != nil { 457 if err := d.Shutdown(); err != nil { 458 logrus.Error(err) 459 } 460 } 461 }() 462 463 // Set the default isolation mode (only applicable on Windows) 464 if err := d.setDefaultIsolation(); err != nil { 465 return nil, fmt.Errorf("error setting default isolation mode: %v", err) 466 } 467 468 logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) 469 470 if err := configureMaxThreads(config); err != nil { 471 logrus.Warnf("Failed to configure golang's threads limit: %v", err) 472 } 473 474 installDefaultAppArmorProfile() 475 daemonRepo := filepath.Join(config.Root, "containers") 476 if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { 477 return nil, err 478 } 479 480 driverName := os.Getenv("DOCKER_DRIVER") 481 if driverName == "" { 482 driverName = config.GraphDriver 483 } 484 d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ 485 StorePath: config.Root, 486 MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), 487 GraphDriver: driverName, 488 GraphDriverOptions: config.GraphOptions, 489 UIDMaps: uidMaps, 490 GIDMaps: gidMaps, 491 }) 492 if err != nil { 493 return nil, err 494 } 495 496 graphDriver := d.layerStore.DriverName() 497 imageRoot := filepath.Join(config.Root, "image", graphDriver) 498 499 // Configure and validate the kernels security support 500 if err := configureKernelSecuritySupport(config, graphDriver); err != nil { 501 return nil, err 502 } 503 504 logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) 505 d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) 506 logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) 507 d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) 508 509 ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) 510 if err != nil { 511 return nil, err 512 } 513 514 d.imageStore, err = image.NewImageStore(ifs, d.layerStore) 515 if err != nil { 516 return nil, err 517 } 518 519 // Configure the volumes driver 520 volStore, err := d.configureVolumes(rootUID, rootGID) 521 if err != nil { 522 return nil, err 523 } 524 525 trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) 526 if err != nil { 527 return nil, err 528 } 529 530 trustDir := filepath.Join(config.Root, "trust") 531 532 if err := system.MkdirAll(trustDir, 0700); err != nil { 533 return nil, err 534 } 535 536 distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) 537 if err != nil { 538 return nil, err 539 } 540 541 eventsService := events.New() 542 543 referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) 544 if err != nil { 545 return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) 546 } 547 548 if err := restoreCustomImage(d.imageStore, d.layerStore, referenceStore); err != nil { 549 return nil, fmt.Errorf("Couldn't restore custom images: %s", err) 550 } 551 552 migrationStart := time.Now() 553 if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { 554 logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) 555 } 556 logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) 557 558 // Discovery is only enabled when the daemon is launched with an address to advertise. When 559 // initialized, the daemon is registered and we can store the discovery backend as its read-only 560 if err := d.initDiscovery(config); err != nil { 561 return nil, err 562 } 563 564 sysInfo := sysinfo.New(false) 565 // Check if Devices cgroup is mounted, it is hard requirement for container security, 566 // on Linux. 567 if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { 568 return nil, fmt.Errorf("Devices cgroup isn't mounted") 569 } 570 571 d.ID = trustKey.PublicKey().KeyID() 572 d.repository = daemonRepo 573 d.containers = container.NewMemoryStore() 574 d.execCommands = exec.NewStore() 575 d.referenceStore = referenceStore 576 d.distributionMetadataStore = distributionMetadataStore 577 d.trustKey = trustKey 578 d.idIndex = truncindex.NewTruncIndex([]string{}) 579 d.statsCollector = d.newStatsCollector(1 * time.Second) 580 d.defaultLogConfig = containertypes.LogConfig{ 581 Type: config.LogConfig.Type, 582 Config: config.LogConfig.Config, 583 } 584 d.RegistryService = registryService 585 d.EventsService = eventsService 586 d.volumes = volStore 587 d.root = config.Root 588 d.uidMaps = uidMaps 589 d.gidMaps = gidMaps 590 d.seccompEnabled = sysInfo.Seccomp 591 592 d.nameIndex = registrar.NewRegistrar() 593 d.linkIndex = newLinkIndex() 594 d.containerdRemote = containerdRemote 595 596 go d.execCommandGC() 597 598 d.containerd, err = containerdRemote.Client(d) 599 if err != nil { 600 return nil, err 601 } 602 603 if err := d.restore(); err != nil { 604 return nil, err 605 } 606 607 return d, nil 608 } 609 610 func (daemon *Daemon) shutdownContainer(c *container.Container) error { 611 // TODO(windows): Handle docker restart with paused containers 612 if c.IsPaused() { 613 // To terminate a process in freezer cgroup, we should send 614 // SIGTERM to this process then unfreeze it, and the process will 615 // force to terminate immediately. 616 logrus.Debugf("Found container %s is paused, sending SIGTERM before unpause it", c.ID) 617 sig, ok := signal.SignalMap["TERM"] 618 if !ok { 619 return fmt.Errorf("System doesn not support SIGTERM") 620 } 621 if err := daemon.kill(c, int(sig)); err != nil { 622 return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) 623 } 624 if err := daemon.containerUnpause(c); err != nil { 625 return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) 626 } 627 if _, err := c.WaitStop(10 * time.Second); err != nil { 628 logrus.Debugf("container %s failed to exit in 10 second of SIGTERM, sending SIGKILL to force", c.ID) 629 sig, ok := signal.SignalMap["KILL"] 630 if !ok { 631 return fmt.Errorf("System does not support SIGKILL") 632 } 633 if err := daemon.kill(c, int(sig)); err != nil { 634 logrus.Errorf("Failed to SIGKILL container %s", c.ID) 635 } 636 c.WaitStop(-1 * time.Second) 637 return err 638 } 639 } 640 // If container failed to exit in 10 seconds of SIGTERM, then using the force 641 if err := daemon.containerStop(c, 10); err != nil { 642 return fmt.Errorf("Stop container %s with error: %v", c.ID, err) 643 } 644 645 c.WaitStop(-1 * time.Second) 646 return nil 647 } 648 649 // Shutdown stops the daemon. 650 func (daemon *Daemon) Shutdown() error { 651 daemon.shutdown = true 652 // Keep mounts and networking running on daemon shutdown if 653 // we are to keep containers running and restore them. 654 if daemon.configStore.LiveRestore { 655 return nil 656 } 657 if daemon.containers != nil { 658 logrus.Debug("starting clean shutdown of all containers...") 659 daemon.containers.ApplyAll(func(c *container.Container) { 660 if !c.IsRunning() { 661 return 662 } 663 logrus.Debugf("stopping %s", c.ID) 664 if err := daemon.shutdownContainer(c); err != nil { 665 logrus.Errorf("Stop container error: %v", err) 666 return 667 } 668 if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { 669 daemon.cleanupMountsByID(mountid) 670 } 671 logrus.Debugf("container stopped %s", c.ID) 672 }) 673 } 674 675 // trigger libnetwork Stop only if it's initialized 676 if daemon.netController != nil { 677 daemon.netController.Stop() 678 } 679 680 if daemon.layerStore != nil { 681 if err := daemon.layerStore.Cleanup(); err != nil { 682 logrus.Errorf("Error during layer Store.Cleanup(): %v", err) 683 } 684 } 685 686 if err := daemon.cleanupMounts(); err != nil { 687 return err 688 } 689 690 return nil 691 } 692 693 // Mount sets container.BaseFS 694 // (is it not set coming in? why is it unset?) 695 func (daemon *Daemon) Mount(container *container.Container) error { 696 dir, err := container.RWLayer.Mount(container.GetMountLabel()) 697 if err != nil { 698 return err 699 } 700 logrus.Debugf("container mounted via layerStore: %v", dir) 701 702 if container.BaseFS != dir { 703 // The mount path reported by the graph driver should always be trusted on Windows, since the 704 // volume path for a given mounted layer may change over time. This should only be an error 705 // on non-Windows operating systems. 706 if container.BaseFS != "" && runtime.GOOS != "windows" { 707 daemon.Unmount(container) 708 return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", 709 daemon.GraphDriverName(), container.ID, container.BaseFS, dir) 710 } 711 } 712 container.BaseFS = dir // TODO: combine these fields 713 return nil 714 } 715 716 // Unmount unsets the container base filesystem 717 func (daemon *Daemon) Unmount(container *container.Container) error { 718 if err := container.RWLayer.Unmount(); err != nil { 719 logrus.Errorf("Error unmounting container %s: %s", container.ID, err) 720 return err 721 } 722 return nil 723 } 724 725 func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { 726 progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) 727 operationCancelled := false 728 729 for prog := range progressChan { 730 if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { 731 // don't log broken pipe errors as this is the normal case when a client aborts 732 if isBrokenPipe(err) { 733 logrus.Info("Pull session cancelled") 734 } else { 735 logrus.Errorf("error writing progress to client: %v", err) 736 } 737 cancelFunc() 738 operationCancelled = true 739 // Don't return, because we need to continue draining 740 // progressChan until it's closed to avoid a deadlock. 741 } 742 } 743 } 744 745 func isBrokenPipe(e error) bool { 746 if netErr, ok := e.(*net.OpError); ok { 747 e = netErr.Err 748 if sysErr, ok := netErr.Err.(*os.SyscallError); ok { 749 e = sysErr.Err 750 } 751 } 752 return e == syscall.EPIPE 753 } 754 755 // GraphDriverName returns the name of the graph driver used by the layer.Store 756 func (daemon *Daemon) GraphDriverName() string { 757 return daemon.layerStore.DriverName() 758 } 759 760 // GetUIDGIDMaps returns the current daemon's user namespace settings 761 // for the full uid and gid maps which will be applied to containers 762 // started in this instance. 763 func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { 764 return daemon.uidMaps, daemon.gidMaps 765 } 766 767 // GetRemappedUIDGID returns the current daemon's uid and gid values 768 // if user namespaces are in use for this daemon instance. If not 769 // this function will return "real" root values of 0, 0. 770 func (daemon *Daemon) GetRemappedUIDGID() (int, int) { 771 uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) 772 return uid, gid 773 } 774 775 // tempDir returns the default directory to use for temporary files. 776 func tempDir(rootDir string, rootUID, rootGID int) (string, error) { 777 var tmpDir string 778 if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { 779 tmpDir = filepath.Join(rootDir, "tmp") 780 } 781 return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) 782 } 783 784 func (daemon *Daemon) setupInitLayer(initPath string) error { 785 rootUID, rootGID := daemon.GetRemappedUIDGID() 786 return setupInitLayer(initPath, rootUID, rootGID) 787 } 788 789 func setDefaultMtu(config *Config) { 790 // do nothing if the config does not have the default 0 value. 791 if config.Mtu != 0 { 792 return 793 } 794 config.Mtu = defaultNetworkMtu 795 } 796 797 func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) { 798 volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID) 799 if err != nil { 800 return nil, err 801 } 802 803 if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { 804 return nil, fmt.Errorf("local volume driver could not be registered") 805 } 806 return store.New(daemon.configStore.Root) 807 } 808 809 // IsShuttingDown tells whether the daemon is shutting down or not 810 func (daemon *Daemon) IsShuttingDown() bool { 811 return daemon.shutdown 812 } 813 814 // initDiscovery initializes the discovery watcher for this daemon. 815 func (daemon *Daemon) initDiscovery(config *Config) error { 816 advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise) 817 if err != nil { 818 if err == errDiscoveryDisabled { 819 return nil 820 } 821 return err 822 } 823 824 config.ClusterAdvertise = advertise 825 discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts) 826 if err != nil { 827 return fmt.Errorf("discovery initialization failed (%v)", err) 828 } 829 830 daemon.discoveryWatcher = discoveryWatcher 831 return nil 832 } 833 834 // Reload reads configuration changes and modifies the 835 // daemon according to those changes. 836 // These are the settings that Reload changes: 837 // - Daemon labels. 838 // - Daemon debug log level. 839 // - Daemon max concurrent downloads 840 // - Daemon max concurrent uploads 841 // - Cluster discovery (reconfigure and restart). 842 // - Daemon live restore 843 func (daemon *Daemon) Reload(config *Config) error { 844 var err error 845 // used to hold reloaded changes 846 attributes := map[string]string{} 847 848 // We need defer here to ensure the lock is released as 849 // daemon.SystemInfo() will try to get it too 850 defer func() { 851 if err == nil { 852 daemon.LogDaemonEventWithAttributes("reload", attributes) 853 } 854 }() 855 856 daemon.configStore.reloadLock.Lock() 857 defer daemon.configStore.reloadLock.Unlock() 858 859 daemon.platformReload(config, &attributes) 860 861 if err = daemon.reloadClusterDiscovery(config); err != nil { 862 return err 863 } 864 865 if config.IsValueSet("labels") { 866 daemon.configStore.Labels = config.Labels 867 } 868 if config.IsValueSet("debug") { 869 daemon.configStore.Debug = config.Debug 870 } 871 if config.IsValueSet("live-restore") { 872 daemon.configStore.LiveRestore = config.LiveRestore 873 if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestore)); err != nil { 874 return err 875 } 876 877 } 878 879 // If no value is set for max-concurrent-downloads we assume it is the default value 880 // We always "reset" as the cost is lightweight and easy to maintain. 881 if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil { 882 *daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads 883 } else { 884 maxConcurrentDownloads := defaultMaxConcurrentDownloads 885 daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads 886 } 887 logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) 888 if daemon.downloadManager != nil { 889 daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) 890 } 891 892 // If no value is set for max-concurrent-upload we assume it is the default value 893 // We always "reset" as the cost is lightweight and easy to maintain. 894 if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil { 895 *daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads 896 } else { 897 maxConcurrentUploads := defaultMaxConcurrentUploads 898 daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads 899 } 900 logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) 901 if daemon.uploadManager != nil { 902 daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) 903 } 904 905 // We emit daemon reload event here with updatable configurations 906 attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) 907 attributes["cluster-store"] = daemon.configStore.ClusterStore 908 if daemon.configStore.ClusterOpts != nil { 909 opts, _ := json.Marshal(daemon.configStore.ClusterOpts) 910 attributes["cluster-store-opts"] = string(opts) 911 } else { 912 attributes["cluster-store-opts"] = "{}" 913 } 914 attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise 915 if daemon.configStore.Labels != nil { 916 labels, _ := json.Marshal(daemon.configStore.Labels) 917 attributes["labels"] = string(labels) 918 } else { 919 attributes["labels"] = "[]" 920 } 921 attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) 922 attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) 923 924 return nil 925 } 926 927 func (daemon *Daemon) reloadClusterDiscovery(config *Config) error { 928 var err error 929 newAdvertise := daemon.configStore.ClusterAdvertise 930 newClusterStore := daemon.configStore.ClusterStore 931 if config.IsValueSet("cluster-advertise") { 932 if config.IsValueSet("cluster-store") { 933 newClusterStore = config.ClusterStore 934 } 935 newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise) 936 if err != nil && err != errDiscoveryDisabled { 937 return err 938 } 939 } 940 941 if daemon.clusterProvider != nil { 942 if err := config.isSwarmCompatible(); err != nil { 943 return err 944 } 945 } 946 947 // check discovery modifications 948 if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) { 949 return nil 950 } 951 952 // enable discovery for the first time if it was not previously enabled 953 if daemon.discoveryWatcher == nil { 954 discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts) 955 if err != nil { 956 return fmt.Errorf("discovery initialization failed (%v)", err) 957 } 958 daemon.discoveryWatcher = discoveryWatcher 959 } else { 960 if err == errDiscoveryDisabled { 961 // disable discovery if it was previously enabled and it's disabled now 962 daemon.discoveryWatcher.Stop() 963 } else { 964 // reload discovery 965 if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil { 966 return err 967 } 968 } 969 } 970 971 daemon.configStore.ClusterStore = newClusterStore 972 daemon.configStore.ClusterOpts = config.ClusterOpts 973 daemon.configStore.ClusterAdvertise = newAdvertise 974 975 if daemon.netController == nil { 976 return nil 977 } 978 netOptions, err := daemon.networkOptions(daemon.configStore, nil) 979 if err != nil { 980 logrus.Warnf("Failed to reload configuration with network controller: %v", err) 981 return nil 982 } 983 err = daemon.netController.ReloadConfiguration(netOptions...) 984 if err != nil { 985 logrus.Warnf("Failed to reload configuration with network controller: %v", err) 986 } 987 988 return nil 989 } 990 991 func isBridgeNetworkDisabled(config *Config) bool { 992 return config.bridgeConfig.Iface == disableNetworkBridge 993 } 994 995 func (daemon *Daemon) networkOptions(dconfig *Config, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { 996 options := []nwconfig.Option{} 997 if dconfig == nil { 998 return options, nil 999 } 1000 1001 options = append(options, nwconfig.OptionDataDir(dconfig.Root)) 1002 1003 dd := runconfig.DefaultDaemonNetworkMode() 1004 dn := runconfig.DefaultDaemonNetworkMode().NetworkName() 1005 options = append(options, nwconfig.OptionDefaultDriver(string(dd))) 1006 options = append(options, nwconfig.OptionDefaultNetwork(dn)) 1007 1008 if strings.TrimSpace(dconfig.ClusterStore) != "" { 1009 kv := strings.Split(dconfig.ClusterStore, "://") 1010 if len(kv) != 2 { 1011 return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL") 1012 } 1013 options = append(options, nwconfig.OptionKVProvider(kv[0])) 1014 options = append(options, nwconfig.OptionKVProviderURL(kv[1])) 1015 } 1016 if len(dconfig.ClusterOpts) > 0 { 1017 options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) 1018 } 1019 1020 if daemon.discoveryWatcher != nil { 1021 options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) 1022 } 1023 1024 if dconfig.ClusterAdvertise != "" { 1025 options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) 1026 } 1027 1028 options = append(options, nwconfig.OptionLabels(dconfig.Labels)) 1029 options = append(options, driverOptions(dconfig)...) 1030 1031 if daemon.configStore != nil && daemon.configStore.LiveRestore && len(activeSandboxes) != 0 { 1032 options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) 1033 } 1034 1035 return options, nil 1036 } 1037 1038 func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { 1039 out := make([]types.BlkioStatEntry, len(entries)) 1040 for i, re := range entries { 1041 out[i] = types.BlkioStatEntry{ 1042 Major: re.Major, 1043 Minor: re.Minor, 1044 Op: re.Op, 1045 Value: re.Value, 1046 } 1047 } 1048 return out 1049 }