github.com/rhatdan/docker@v0.7.7-0.20180119204836-47a0dcbcd20a/daemon/daemon.go (about) 1 // Package daemon exposes the functions that occur on the host server 2 // that the Docker daemon is running. 3 // 4 // In implementing the various functions of the daemon, there is often 5 // a method-specific struct for configuring the runtime behavior. 6 package daemon 7 8 import ( 9 "context" 10 "fmt" 11 "io/ioutil" 12 "net" 13 "os" 14 "path" 15 "path/filepath" 16 "runtime" 17 "strings" 18 "sync" 19 "time" 20 21 "github.com/docker/docker/api/types" 22 containertypes "github.com/docker/docker/api/types/container" 23 "github.com/docker/docker/api/types/swarm" 24 "github.com/docker/docker/container" 25 "github.com/docker/docker/daemon/config" 26 "github.com/docker/docker/daemon/discovery" 27 "github.com/docker/docker/daemon/events" 28 "github.com/docker/docker/daemon/exec" 29 "github.com/docker/docker/daemon/logger" 30 "github.com/docker/docker/daemon/network" 31 "github.com/docker/docker/errdefs" 32 "github.com/sirupsen/logrus" 33 // register graph drivers 34 _ "github.com/docker/docker/daemon/graphdriver/register" 35 "github.com/docker/docker/daemon/initlayer" 36 "github.com/docker/docker/daemon/stats" 37 dmetadata "github.com/docker/docker/distribution/metadata" 38 "github.com/docker/docker/distribution/xfer" 39 "github.com/docker/docker/dockerversion" 40 "github.com/docker/docker/image" 41 "github.com/docker/docker/layer" 42 "github.com/docker/docker/libcontainerd" 43 "github.com/docker/docker/migrate/v1" 44 "github.com/docker/docker/pkg/containerfs" 45 "github.com/docker/docker/pkg/idtools" 46 "github.com/docker/docker/pkg/plugingetter" 47 "github.com/docker/docker/pkg/sysinfo" 48 "github.com/docker/docker/pkg/system" 49 "github.com/docker/docker/pkg/truncindex" 50 "github.com/docker/docker/plugin" 51 pluginexec "github.com/docker/docker/plugin/executor/containerd" 52 refstore "github.com/docker/docker/reference" 53 "github.com/docker/docker/registry" 54 "github.com/docker/docker/runconfig" 55 volumedrivers "github.com/docker/docker/volume/drivers" 56 "github.com/docker/docker/volume/local" 57 "github.com/docker/docker/volume/store" 58 "github.com/docker/libnetwork" 59 "github.com/docker/libnetwork/cluster" 60 nwconfig "github.com/docker/libnetwork/config" 61 "github.com/docker/libtrust" 62 "github.com/pkg/errors" 63 ) 64 65 // ContainersNamespace is the name of the namespace used for users containers 66 const ContainersNamespace = "moby" 67 68 var ( 69 errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform") 70 ) 71 72 // Daemon holds information about the Docker daemon. 73 type Daemon struct { 74 ID string 75 repository string 76 containers container.Store 77 containersReplica container.ViewDB 78 execCommands *exec.Store 79 downloadManager *xfer.LayerDownloadManager 80 uploadManager *xfer.LayerUploadManager 81 trustKey libtrust.PrivateKey 82 idIndex *truncindex.TruncIndex 83 configStore *config.Config 84 statsCollector *stats.Collector 85 defaultLogConfig containertypes.LogConfig 86 RegistryService registry.Service 87 EventsService *events.Events 88 netController libnetwork.NetworkController 89 volumes *store.VolumeStore 90 discoveryWatcher discovery.Reloader 91 root string 92 seccompEnabled bool 93 apparmorEnabled bool 94 shutdown bool 95 idMappings *idtools.IDMappings 96 graphDrivers map[string]string // By operating system 97 referenceStore refstore.Store 98 imageStore image.Store 99 imageRoot string 100 layerStores map[string]layer.Store // By operating system 101 distributionMetadataStore dmetadata.Store 102 PluginStore *plugin.Store // todo: remove 103 pluginManager *plugin.Manager 104 linkIndex *linkIndex 105 containerd libcontainerd.Client 106 containerdRemote libcontainerd.Remote 107 defaultIsolation containertypes.Isolation // Default isolation mode on Windows 108 clusterProvider cluster.Provider 109 cluster Cluster 110 genericResources []swarm.GenericResource 111 metricsPluginListener net.Listener 112 113 machineMemory uint64 114 115 seccompProfile []byte 116 seccompProfilePath string 117 118 diskUsageRunning int32 119 pruneRunning int32 120 hosts map[string]bool // hosts stores the addresses the daemon is listening on 121 startupDone chan struct{} 122 123 attachmentStore network.AttachmentStore 124 } 125 126 // StoreHosts stores the addresses the daemon is listening on 127 func (daemon *Daemon) StoreHosts(hosts []string) { 128 if daemon.hosts == nil { 129 daemon.hosts = make(map[string]bool) 130 } 131 for _, h := range hosts { 132 daemon.hosts[h] = true 133 } 134 } 135 136 // HasExperimental returns whether the experimental features of the daemon are enabled or not 137 func (daemon *Daemon) HasExperimental() bool { 138 return daemon.configStore != nil && daemon.configStore.Experimental 139 } 140 141 func (daemon *Daemon) restore() error { 142 containers := make(map[string]*container.Container) 143 144 logrus.Info("Loading containers: start.") 145 146 dir, err := ioutil.ReadDir(daemon.repository) 147 if err != nil { 148 return err 149 } 150 151 for _, v := range dir { 152 id := v.Name() 153 container, err := daemon.load(id) 154 if err != nil { 155 logrus.Errorf("Failed to load container %v: %v", id, err) 156 continue 157 } 158 if !system.IsOSSupported(container.OS) { 159 logrus.Errorf("Failed to load container %v: %s (%q)", id, system.ErrNotSupportedOperatingSystem, container.OS) 160 continue 161 } 162 // Ignore the container if it does not support the current driver being used by the graph 163 currentDriverForContainerOS := daemon.graphDrivers[container.OS] 164 if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS { 165 rwlayer, err := daemon.layerStores[container.OS].GetRWLayer(container.ID) 166 if err != nil { 167 logrus.Errorf("Failed to load container mount %v: %v", id, err) 168 continue 169 } 170 container.RWLayer = rwlayer 171 logrus.Debugf("Loaded container %v, isRunning: %v", container.ID, container.IsRunning()) 172 173 containers[container.ID] = container 174 } else { 175 logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) 176 } 177 } 178 179 removeContainers := make(map[string]*container.Container) 180 restartContainers := make(map[*container.Container]chan struct{}) 181 activeSandboxes := make(map[string]interface{}) 182 for id, c := range containers { 183 if err := daemon.registerName(c); err != nil { 184 logrus.Errorf("Failed to register container name %s: %s", c.ID, err) 185 delete(containers, id) 186 continue 187 } 188 // verify that all volumes valid and have been migrated from the pre-1.7 layout 189 if err := daemon.verifyVolumesInfo(c); err != nil { 190 // don't skip the container due to error 191 logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err) 192 } 193 if err := daemon.Register(c); err != nil { 194 logrus.Errorf("Failed to register container %s: %s", c.ID, err) 195 delete(containers, id) 196 continue 197 } 198 199 // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. 200 // We should rewrite it to use the daemon defaults. 201 // Fixes https://github.com/docker/docker/issues/22536 202 if c.HostConfig.LogConfig.Type == "" { 203 if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { 204 logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) 205 continue 206 } 207 } 208 } 209 210 var ( 211 wg sync.WaitGroup 212 mapLock sync.Mutex 213 ) 214 for _, c := range containers { 215 wg.Add(1) 216 go func(c *container.Container) { 217 defer wg.Done() 218 daemon.backportMountSpec(c) 219 if err := daemon.checkpointAndSave(c); err != nil { 220 logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk") 221 } 222 223 daemon.setStateCounter(c) 224 225 logrus.WithFields(logrus.Fields{ 226 "container": c.ID, 227 "running": c.IsRunning(), 228 "paused": c.IsPaused(), 229 }).Debug("restoring container") 230 231 var ( 232 err error 233 alive bool 234 ec uint32 235 exitedAt time.Time 236 ) 237 238 alive, _, err = daemon.containerd.Restore(context.Background(), c.ID, c.InitializeStdio) 239 if err != nil && !errdefs.IsNotFound(err) { 240 logrus.Errorf("Failed to restore container %s with containerd: %s", c.ID, err) 241 return 242 } 243 if !alive { 244 ec, exitedAt, err = daemon.containerd.DeleteTask(context.Background(), c.ID) 245 if err != nil && !errdefs.IsNotFound(err) { 246 logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID) 247 return 248 } 249 } else if !daemon.configStore.LiveRestoreEnabled { 250 if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) { 251 logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container") 252 return 253 } 254 } 255 256 if c.IsRunning() || c.IsPaused() { 257 c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking 258 259 if c.IsPaused() && alive { 260 s, err := daemon.containerd.Status(context.Background(), c.ID) 261 if err != nil { 262 logrus.WithError(err).WithField("container", c.ID). 263 Errorf("Failed to get container status") 264 } else { 265 logrus.WithField("container", c.ID).WithField("state", s). 266 Info("restored container paused") 267 switch s { 268 case libcontainerd.StatusPaused, libcontainerd.StatusPausing: 269 // nothing to do 270 case libcontainerd.StatusStopped: 271 alive = false 272 case libcontainerd.StatusUnknown: 273 logrus.WithField("container", c.ID). 274 Error("Unknown status for container during restore") 275 default: 276 // running 277 c.Lock() 278 c.Paused = false 279 daemon.setStateCounter(c) 280 if err := c.CheckpointTo(daemon.containersReplica); err != nil { 281 logrus.WithError(err).WithField("container", c.ID). 282 Error("Failed to update stopped container state") 283 } 284 c.Unlock() 285 } 286 } 287 } 288 289 if !alive { 290 c.Lock() 291 c.SetStopped(&container.ExitStatus{ExitCode: int(ec), ExitedAt: exitedAt}) 292 daemon.Cleanup(c) 293 if err := c.CheckpointTo(daemon.containersReplica); err != nil { 294 logrus.Errorf("Failed to update stopped container %s state: %v", c.ID, err) 295 } 296 c.Unlock() 297 } 298 299 // we call Mount and then Unmount to get BaseFs of the container 300 if err := daemon.Mount(c); err != nil { 301 // The mount is unlikely to fail. However, in case mount fails 302 // the container should be allowed to restore here. Some functionalities 303 // (like docker exec -u user) might be missing but container is able to be 304 // stopped/restarted/removed. 305 // See #29365 for related information. 306 // The error is only logged here. 307 logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) 308 } else { 309 if err := daemon.Unmount(c); err != nil { 310 logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) 311 } 312 } 313 314 c.ResetRestartManager(false) 315 if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { 316 options, err := daemon.buildSandboxOptions(c) 317 if err != nil { 318 logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) 319 } 320 mapLock.Lock() 321 activeSandboxes[c.NetworkSettings.SandboxID] = options 322 mapLock.Unlock() 323 } 324 } 325 326 // get list of containers we need to restart 327 328 // Do not autostart containers which 329 // has endpoints in a swarm scope 330 // network yet since the cluster is 331 // not initialized yet. We will start 332 // it after the cluster is 333 // initialized. 334 if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint { 335 mapLock.Lock() 336 restartContainers[c] = make(chan struct{}) 337 mapLock.Unlock() 338 } else if c.HostConfig != nil && c.HostConfig.AutoRemove { 339 mapLock.Lock() 340 removeContainers[c.ID] = c 341 mapLock.Unlock() 342 } 343 344 c.Lock() 345 if c.RemovalInProgress { 346 // We probably crashed in the middle of a removal, reset 347 // the flag. 348 // 349 // We DO NOT remove the container here as we do not 350 // know if the user had requested for either the 351 // associated volumes, network links or both to also 352 // be removed. So we put the container in the "dead" 353 // state and leave further processing up to them. 354 logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) 355 c.RemovalInProgress = false 356 c.Dead = true 357 if err := c.CheckpointTo(daemon.containersReplica); err != nil { 358 logrus.Errorf("Failed to update RemovalInProgress container %s state: %v", c.ID, err) 359 } 360 } 361 c.Unlock() 362 }(c) 363 } 364 wg.Wait() 365 daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) 366 if err != nil { 367 return fmt.Errorf("Error initializing network controller: %v", err) 368 } 369 370 // Now that all the containers are registered, register the links 371 for _, c := range containers { 372 if err := daemon.registerLinks(c, c.HostConfig); err != nil { 373 logrus.Errorf("failed to register link for container %s: %v", c.ID, err) 374 } 375 } 376 377 group := sync.WaitGroup{} 378 for c, notifier := range restartContainers { 379 group.Add(1) 380 381 go func(c *container.Container, chNotify chan struct{}) { 382 defer group.Done() 383 384 logrus.Debugf("Starting container %s", c.ID) 385 386 // ignore errors here as this is a best effort to wait for children to be 387 // running before we try to start the container 388 children := daemon.children(c) 389 timeout := time.After(5 * time.Second) 390 for _, child := range children { 391 if notifier, exists := restartContainers[child]; exists { 392 select { 393 case <-notifier: 394 case <-timeout: 395 } 396 } 397 } 398 399 // Make sure networks are available before starting 400 daemon.waitForNetworks(c) 401 if err := daemon.containerStart(c, "", "", true); err != nil { 402 logrus.Errorf("Failed to start container %s: %s", c.ID, err) 403 } 404 close(chNotify) 405 }(c, notifier) 406 407 } 408 group.Wait() 409 410 removeGroup := sync.WaitGroup{} 411 for id := range removeContainers { 412 removeGroup.Add(1) 413 go func(cid string) { 414 if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { 415 logrus.Errorf("Failed to remove container %s: %s", cid, err) 416 } 417 removeGroup.Done() 418 }(id) 419 } 420 removeGroup.Wait() 421 422 // any containers that were started above would already have had this done, 423 // however we need to now prepare the mountpoints for the rest of the containers as well. 424 // This shouldn't cause any issue running on the containers that already had this run. 425 // This must be run after any containers with a restart policy so that containerized plugins 426 // can have a chance to be running before we try to initialize them. 427 for _, c := range containers { 428 // if the container has restart policy, do not 429 // prepare the mountpoints since it has been done on restarting. 430 // This is to speed up the daemon start when a restart container 431 // has a volume and the volume driver is not available. 432 if _, ok := restartContainers[c]; ok { 433 continue 434 } else if _, ok := removeContainers[c.ID]; ok { 435 // container is automatically removed, skip it. 436 continue 437 } 438 439 group.Add(1) 440 go func(c *container.Container) { 441 defer group.Done() 442 if err := daemon.prepareMountPoints(c); err != nil { 443 logrus.Error(err) 444 } 445 }(c) 446 } 447 448 group.Wait() 449 450 logrus.Info("Loading containers: done.") 451 452 return nil 453 } 454 455 // RestartSwarmContainers restarts any autostart container which has a 456 // swarm endpoint. 457 func (daemon *Daemon) RestartSwarmContainers() { 458 group := sync.WaitGroup{} 459 for _, c := range daemon.List() { 460 if !c.IsRunning() && !c.IsPaused() { 461 // Autostart all the containers which has a 462 // swarm endpoint now that the cluster is 463 // initialized. 464 if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint { 465 group.Add(1) 466 go func(c *container.Container) { 467 defer group.Done() 468 if err := daemon.containerStart(c, "", "", true); err != nil { 469 logrus.Error(err) 470 } 471 }(c) 472 } 473 } 474 475 } 476 group.Wait() 477 } 478 479 // waitForNetworks is used during daemon initialization when starting up containers 480 // It ensures that all of a container's networks are available before the daemon tries to start the container. 481 // In practice it just makes sure the discovery service is available for containers which use a network that require discovery. 482 func (daemon *Daemon) waitForNetworks(c *container.Container) { 483 if daemon.discoveryWatcher == nil { 484 return 485 } 486 // Make sure if the container has a network that requires discovery that the discovery service is available before starting 487 for netName := range c.NetworkSettings.Networks { 488 // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready 489 // Most likely this is because the K/V store used for discovery is in a container and needs to be started 490 if _, err := daemon.netController.NetworkByName(netName); err != nil { 491 if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { 492 continue 493 } 494 // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host 495 // FIXME: why is this slow??? 496 logrus.Debugf("Container %s waiting for network to be ready", c.Name) 497 select { 498 case <-daemon.discoveryWatcher.ReadyCh(): 499 case <-time.After(60 * time.Second): 500 } 501 return 502 } 503 } 504 } 505 506 func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { 507 return daemon.linkIndex.children(c) 508 } 509 510 // parents returns the names of the parent containers of the container 511 // with the given name. 512 func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { 513 return daemon.linkIndex.parents(c) 514 } 515 516 func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { 517 fullName := path.Join(parent.Name, alias) 518 if err := daemon.containersReplica.ReserveName(fullName, child.ID); err != nil { 519 if err == container.ErrNameReserved { 520 logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) 521 return nil 522 } 523 return err 524 } 525 daemon.linkIndex.link(parent, child, fullName) 526 return nil 527 } 528 529 // DaemonJoinsCluster informs the daemon has joined the cluster and provides 530 // the handler to query the cluster component 531 func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) { 532 daemon.setClusterProvider(clusterProvider) 533 } 534 535 // DaemonLeavesCluster informs the daemon has left the cluster 536 func (daemon *Daemon) DaemonLeavesCluster() { 537 // Daemon is in charge of removing the attachable networks with 538 // connected containers when the node leaves the swarm 539 daemon.clearAttachableNetworks() 540 // We no longer need the cluster provider, stop it now so that 541 // the network agent will stop listening to cluster events. 542 daemon.setClusterProvider(nil) 543 // Wait for the networking cluster agent to stop 544 daemon.netController.AgentStopWait() 545 // Daemon is in charge of removing the ingress network when the 546 // node leaves the swarm. Wait for job to be done or timeout. 547 // This is called also on graceful daemon shutdown. We need to 548 // wait, because the ingress release has to happen before the 549 // network controller is stopped. 550 if done, err := daemon.ReleaseIngress(); err == nil { 551 select { 552 case <-done: 553 case <-time.After(5 * time.Second): 554 logrus.Warnf("timeout while waiting for ingress network removal") 555 } 556 } else { 557 logrus.Warnf("failed to initiate ingress network removal: %v", err) 558 } 559 560 daemon.attachmentStore.ClearAttachments() 561 } 562 563 // setClusterProvider sets a component for querying the current cluster state. 564 func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) { 565 daemon.clusterProvider = clusterProvider 566 daemon.netController.SetClusterProvider(clusterProvider) 567 } 568 569 // IsSwarmCompatible verifies if the current daemon 570 // configuration is compatible with the swarm mode 571 func (daemon *Daemon) IsSwarmCompatible() error { 572 if daemon.configStore == nil { 573 return nil 574 } 575 return daemon.configStore.IsSwarmCompatible() 576 } 577 578 // NewDaemon sets up everything for the daemon to be able to service 579 // requests from the webserver. 580 func NewDaemon(config *config.Config, registryService registry.Service, containerdRemote libcontainerd.Remote, pluginStore *plugin.Store) (daemon *Daemon, err error) { 581 setDefaultMtu(config) 582 583 // Ensure that we have a correct root key limit for launching containers. 584 if err := ModifyRootKeyLimit(); err != nil { 585 logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) 586 } 587 588 // Ensure we have compatible and valid configuration options 589 if err := verifyDaemonSettings(config); err != nil { 590 return nil, err 591 } 592 593 // Do we have a disabled network? 594 config.DisableBridge = isBridgeNetworkDisabled(config) 595 596 // Verify the platform is supported as a daemon 597 if !platformSupported { 598 return nil, errSystemNotSupported 599 } 600 601 // Validate platform-specific requirements 602 if err := checkSystem(); err != nil { 603 return nil, err 604 } 605 606 idMappings, err := setupRemappedRoot(config) 607 if err != nil { 608 return nil, err 609 } 610 rootIDs := idMappings.RootPair() 611 if err := setupDaemonProcess(config); err != nil { 612 return nil, err 613 } 614 615 // set up the tmpDir to use a canonical path 616 tmp, err := prepareTempDir(config.Root, rootIDs) 617 if err != nil { 618 return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) 619 } 620 realTmp, err := getRealPath(tmp) 621 if err != nil { 622 return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) 623 } 624 if runtime.GOOS == "windows" { 625 if _, err := os.Stat(realTmp); err != nil && os.IsNotExist(err) { 626 if err := system.MkdirAll(realTmp, 0700, ""); err != nil { 627 return nil, fmt.Errorf("Unable to create the TempDir (%s): %s", realTmp, err) 628 } 629 } 630 os.Setenv("TEMP", realTmp) 631 os.Setenv("TMP", realTmp) 632 } else { 633 os.Setenv("TMPDIR", realTmp) 634 } 635 636 d := &Daemon{ 637 configStore: config, 638 PluginStore: pluginStore, 639 startupDone: make(chan struct{}), 640 } 641 // Ensure the daemon is properly shutdown if there is a failure during 642 // initialization 643 defer func() { 644 if err != nil { 645 if err := d.Shutdown(); err != nil { 646 logrus.Error(err) 647 } 648 } 649 }() 650 651 if err := d.setGenericResources(config); err != nil { 652 return nil, err 653 } 654 // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event 655 // on Windows to dump Go routine stacks 656 stackDumpDir := config.Root 657 if execRoot := config.GetExecRoot(); execRoot != "" { 658 stackDumpDir = execRoot 659 } 660 d.setupDumpStackTrap(stackDumpDir) 661 662 if err := d.setupSeccompProfile(); err != nil { 663 return nil, err 664 } 665 666 // Set the default isolation mode (only applicable on Windows) 667 if err := d.setDefaultIsolation(); err != nil { 668 return nil, fmt.Errorf("error setting default isolation mode: %v", err) 669 } 670 671 logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) 672 673 if err := configureMaxThreads(config); err != nil { 674 logrus.Warnf("Failed to configure golang's threads limit: %v", err) 675 } 676 677 if err := ensureDefaultAppArmorProfile(); err != nil { 678 logrus.Errorf(err.Error()) 679 } 680 681 daemonRepo := filepath.Join(config.Root, "containers") 682 if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil { 683 return nil, err 684 } 685 686 // Create the directory where we'll store the runtime scripts (i.e. in 687 // order to support runtimeArgs) 688 daemonRuntimes := filepath.Join(config.Root, "runtimes") 689 if err := system.MkdirAll(daemonRuntimes, 0700, ""); err != nil { 690 return nil, err 691 } 692 if err := d.loadRuntimes(); err != nil { 693 return nil, err 694 } 695 696 if runtime.GOOS == "windows" { 697 if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil { 698 return nil, err 699 } 700 } 701 702 // On Windows we don't support the environment variable, or a user supplied graphdriver 703 // as Windows has no choice in terms of which graphdrivers to use. It's a case of 704 // running Windows containers on Windows - windowsfilter, running Linux containers on Windows, 705 // lcow. Unix platforms however run a single graphdriver for all containers, and it can 706 // be set through an environment variable, a daemon start parameter, or chosen through 707 // initialization of the layerstore through driver priority order for example. 708 d.graphDrivers = make(map[string]string) 709 d.layerStores = make(map[string]layer.Store) 710 if runtime.GOOS == "windows" { 711 d.graphDrivers[runtime.GOOS] = "windowsfilter" 712 if system.LCOWSupported() { 713 d.graphDrivers["linux"] = "lcow" 714 } 715 } else { 716 driverName := os.Getenv("DOCKER_DRIVER") 717 if driverName == "" { 718 driverName = config.GraphDriver 719 } else { 720 logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName) 721 } 722 d.graphDrivers[runtime.GOOS] = driverName // May still be empty. Layerstore init determines instead. 723 } 724 725 d.RegistryService = registryService 726 logger.RegisterPluginGetter(d.PluginStore) 727 728 metricsSockPath, err := d.listenMetricsSock() 729 if err != nil { 730 return nil, err 731 } 732 registerMetricsPluginCallback(d.PluginStore, metricsSockPath) 733 734 createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) { 735 return pluginexec.New(getPluginExecRoot(config.Root), containerdRemote, m) 736 } 737 738 // Plugin system initialization should happen before restore. Do not change order. 739 d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ 740 Root: filepath.Join(config.Root, "plugins"), 741 ExecRoot: getPluginExecRoot(config.Root), 742 Store: d.PluginStore, 743 CreateExecutor: createPluginExec, 744 RegistryService: registryService, 745 LiveRestoreEnabled: config.LiveRestoreEnabled, 746 LogPluginEvent: d.LogPluginEvent, // todo: make private 747 AuthzMiddleware: config.AuthzMiddleware, 748 }) 749 if err != nil { 750 return nil, errors.Wrap(err, "couldn't create plugin manager") 751 } 752 753 for operatingSystem, gd := range d.graphDrivers { 754 d.layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{ 755 Root: config.Root, 756 MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), 757 GraphDriver: gd, 758 GraphDriverOptions: config.GraphOptions, 759 IDMappings: idMappings, 760 PluginGetter: d.PluginStore, 761 ExperimentalEnabled: config.Experimental, 762 OS: operatingSystem, 763 }) 764 if err != nil { 765 return nil, err 766 } 767 } 768 769 // As layerstore initialization may set the driver 770 for os := range d.graphDrivers { 771 d.graphDrivers[os] = d.layerStores[os].DriverName() 772 } 773 774 // Configure and validate the kernels security support. Note this is a Linux/FreeBSD 775 // operation only, so it is safe to pass *just* the runtime OS graphdriver. 776 if err := configureKernelSecuritySupport(config, d.graphDrivers[runtime.GOOS]); err != nil { 777 return nil, err 778 } 779 780 logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) 781 d.downloadManager = xfer.NewLayerDownloadManager(d.layerStores, *config.MaxConcurrentDownloads) 782 logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) 783 d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) 784 785 d.imageRoot = filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS]) 786 ifs, err := image.NewFSStoreBackend(filepath.Join(d.imageRoot, "imagedb")) 787 if err != nil { 788 return nil, err 789 } 790 791 lgrMap := make(map[string]image.LayerGetReleaser) 792 for os, ls := range d.layerStores { 793 lgrMap[os] = ls 794 } 795 d.imageStore, err = image.NewImageStore(ifs, lgrMap) 796 if err != nil { 797 return nil, err 798 } 799 800 // Configure the volumes driver 801 volStore, err := d.configureVolumes(rootIDs) 802 if err != nil { 803 return nil, err 804 } 805 806 trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath) 807 if err != nil { 808 return nil, err 809 } 810 811 trustDir := filepath.Join(config.Root, "trust") 812 813 if err := system.MkdirAll(trustDir, 0700, ""); err != nil { 814 return nil, err 815 } 816 817 eventsService := events.New() 818 819 // We have a single tag/reference store for the daemon globally. However, it's 820 // stored under the graphdriver. On host platforms which only support a single 821 // container OS, but multiple selectable graphdrivers, this means depending on which 822 // graphdriver is chosen, the global reference store is under there. For 823 // platforms which support multiple container operating systems, this is slightly 824 // more problematic as where does the global ref store get located? Fortunately, 825 // for Windows, which is currently the only daemon supporting multiple container 826 // operating systems, the list of graphdrivers available isn't user configurable. 827 // For backwards compatibility, we just put it under the windowsfilter 828 // directory regardless. 829 refStoreLocation := filepath.Join(d.imageRoot, `repositories.json`) 830 rs, err := refstore.NewReferenceStore(refStoreLocation) 831 if err != nil { 832 return nil, fmt.Errorf("Couldn't create reference store repository: %s", err) 833 } 834 d.referenceStore = rs 835 836 d.distributionMetadataStore, err = dmetadata.NewFSMetadataStore(filepath.Join(d.imageRoot, "distribution")) 837 if err != nil { 838 return nil, err 839 } 840 841 // No content-addressability migration on Windows as it never supported pre-CA 842 if runtime.GOOS != "windows" { 843 migrationStart := time.Now() 844 if err := v1.Migrate(config.Root, d.graphDrivers[runtime.GOOS], d.layerStores[runtime.GOOS], d.imageStore, rs, d.distributionMetadataStore); err != nil { 845 logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) 846 } 847 logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) 848 } 849 850 // Discovery is only enabled when the daemon is launched with an address to advertise. When 851 // initialized, the daemon is registered and we can store the discovery backend as it's read-only 852 if err := d.initDiscovery(config); err != nil { 853 return nil, err 854 } 855 856 sysInfo := sysinfo.New(false) 857 // Check if Devices cgroup is mounted, it is hard requirement for container security, 858 // on Linux. 859 if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { 860 return nil, errors.New("Devices cgroup isn't mounted") 861 } 862 863 d.ID = trustKey.PublicKey().KeyID() 864 d.repository = daemonRepo 865 d.containers = container.NewMemoryStore() 866 if d.containersReplica, err = container.NewViewDB(); err != nil { 867 return nil, err 868 } 869 d.execCommands = exec.NewStore() 870 d.trustKey = trustKey 871 d.idIndex = truncindex.NewTruncIndex([]string{}) 872 d.statsCollector = d.newStatsCollector(1 * time.Second) 873 d.defaultLogConfig = containertypes.LogConfig{ 874 Type: config.LogConfig.Type, 875 Config: config.LogConfig.Config, 876 } 877 d.EventsService = eventsService 878 d.volumes = volStore 879 d.root = config.Root 880 d.idMappings = idMappings 881 d.seccompEnabled = sysInfo.Seccomp 882 d.apparmorEnabled = sysInfo.AppArmor 883 d.containerdRemote = containerdRemote 884 885 d.linkIndex = newLinkIndex() 886 887 go d.execCommandGC() 888 889 d.containerd, err = containerdRemote.NewClient(ContainersNamespace, d) 890 if err != nil { 891 return nil, err 892 } 893 894 if err := d.restore(); err != nil { 895 return nil, err 896 } 897 close(d.startupDone) 898 899 // FIXME: this method never returns an error 900 info, _ := d.SystemInfo() 901 902 engineInfo.WithValues( 903 dockerversion.Version, 904 dockerversion.GitCommit, 905 info.Architecture, 906 info.Driver, 907 info.KernelVersion, 908 info.OperatingSystem, 909 info.OSType, 910 info.ID, 911 ).Set(1) 912 engineCpus.Set(float64(info.NCPU)) 913 engineMemory.Set(float64(info.MemTotal)) 914 915 gd := "" 916 for os, driver := range d.graphDrivers { 917 if len(gd) > 0 { 918 gd += ", " 919 } 920 gd += driver 921 if len(d.graphDrivers) > 1 { 922 gd = fmt.Sprintf("%s (%s)", gd, os) 923 } 924 } 925 logrus.WithFields(logrus.Fields{ 926 "version": dockerversion.Version, 927 "commit": dockerversion.GitCommit, 928 "graphdriver(s)": gd, 929 }).Info("Docker daemon") 930 931 return d, nil 932 } 933 934 func (daemon *Daemon) waitForStartupDone() { 935 <-daemon.startupDone 936 } 937 938 func (daemon *Daemon) shutdownContainer(c *container.Container) error { 939 stopTimeout := c.StopTimeout() 940 941 // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force 942 if err := daemon.containerStop(c, stopTimeout); err != nil { 943 return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) 944 } 945 946 // Wait without timeout for the container to exit. 947 // Ignore the result. 948 <-c.Wait(context.Background(), container.WaitConditionNotRunning) 949 return nil 950 } 951 952 // ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers, 953 // and is limited by daemon's ShutdownTimeout. 954 func (daemon *Daemon) ShutdownTimeout() int { 955 // By default we use daemon's ShutdownTimeout. 956 shutdownTimeout := daemon.configStore.ShutdownTimeout 957 958 graceTimeout := 5 959 if daemon.containers != nil { 960 for _, c := range daemon.containers.List() { 961 if shutdownTimeout >= 0 { 962 stopTimeout := c.StopTimeout() 963 if stopTimeout < 0 { 964 shutdownTimeout = -1 965 } else { 966 if stopTimeout+graceTimeout > shutdownTimeout { 967 shutdownTimeout = stopTimeout + graceTimeout 968 } 969 } 970 } 971 } 972 } 973 return shutdownTimeout 974 } 975 976 // Shutdown stops the daemon. 977 func (daemon *Daemon) Shutdown() error { 978 daemon.shutdown = true 979 // Keep mounts and networking running on daemon shutdown if 980 // we are to keep containers running and restore them. 981 982 if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { 983 // check if there are any running containers, if none we should do some cleanup 984 if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { 985 // metrics plugins still need some cleanup 986 daemon.cleanupMetricsPlugins() 987 return nil 988 } 989 } 990 991 if daemon.containers != nil { 992 logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", daemon.configStore.ShutdownTimeout) 993 logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.ShutdownTimeout()) 994 daemon.containers.ApplyAll(func(c *container.Container) { 995 if !c.IsRunning() { 996 return 997 } 998 logrus.Debugf("stopping %s", c.ID) 999 if err := daemon.shutdownContainer(c); err != nil { 1000 logrus.Errorf("Stop container error: %v", err) 1001 return 1002 } 1003 if mountid, err := daemon.layerStores[c.OS].GetMountID(c.ID); err == nil { 1004 daemon.cleanupMountsByID(mountid) 1005 } 1006 logrus.Debugf("container stopped %s", c.ID) 1007 }) 1008 } 1009 1010 if daemon.volumes != nil { 1011 if err := daemon.volumes.Shutdown(); err != nil { 1012 logrus.Errorf("Error shutting down volume store: %v", err) 1013 } 1014 } 1015 1016 for os, ls := range daemon.layerStores { 1017 if ls != nil { 1018 if err := ls.Cleanup(); err != nil { 1019 logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, os) 1020 } 1021 } 1022 } 1023 1024 // If we are part of a cluster, clean up cluster's stuff 1025 if daemon.clusterProvider != nil { 1026 logrus.Debugf("start clean shutdown of cluster resources...") 1027 daemon.DaemonLeavesCluster() 1028 } 1029 1030 daemon.cleanupMetricsPlugins() 1031 1032 // Shutdown plugins after containers and layerstore. Don't change the order. 1033 daemon.pluginShutdown() 1034 1035 // trigger libnetwork Stop only if it's initialized 1036 if daemon.netController != nil { 1037 daemon.netController.Stop() 1038 } 1039 1040 return daemon.cleanupMounts() 1041 } 1042 1043 // Mount sets container.BaseFS 1044 // (is it not set coming in? why is it unset?) 1045 func (daemon *Daemon) Mount(container *container.Container) error { 1046 dir, err := container.RWLayer.Mount(container.GetMountLabel()) 1047 if err != nil { 1048 return err 1049 } 1050 logrus.Debugf("container mounted via layerStore: %v", dir) 1051 1052 if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() { 1053 // The mount path reported by the graph driver should always be trusted on Windows, since the 1054 // volume path for a given mounted layer may change over time. This should only be an error 1055 // on non-Windows operating systems. 1056 if runtime.GOOS != "windows" { 1057 daemon.Unmount(container) 1058 return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", 1059 daemon.GraphDriverName(container.OS), container.ID, container.BaseFS, dir) 1060 } 1061 } 1062 container.BaseFS = dir // TODO: combine these fields 1063 return nil 1064 } 1065 1066 // Unmount unsets the container base filesystem 1067 func (daemon *Daemon) Unmount(container *container.Container) error { 1068 if err := container.RWLayer.Unmount(); err != nil { 1069 logrus.Errorf("Error unmounting container %s: %s", container.ID, err) 1070 return err 1071 } 1072 1073 return nil 1074 } 1075 1076 // Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker. 1077 func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) { 1078 var v4Subnets []net.IPNet 1079 var v6Subnets []net.IPNet 1080 1081 managedNetworks := daemon.netController.Networks() 1082 1083 for _, managedNetwork := range managedNetworks { 1084 v4infos, v6infos := managedNetwork.Info().IpamInfo() 1085 for _, info := range v4infos { 1086 if info.IPAMData.Pool != nil { 1087 v4Subnets = append(v4Subnets, *info.IPAMData.Pool) 1088 } 1089 } 1090 for _, info := range v6infos { 1091 if info.IPAMData.Pool != nil { 1092 v6Subnets = append(v6Subnets, *info.IPAMData.Pool) 1093 } 1094 } 1095 } 1096 1097 return v4Subnets, v6Subnets 1098 } 1099 1100 // GraphDriverName returns the name of the graph driver used by the layer.Store 1101 func (daemon *Daemon) GraphDriverName(os string) string { 1102 return daemon.layerStores[os].DriverName() 1103 } 1104 1105 // prepareTempDir prepares and returns the default directory to use 1106 // for temporary files. 1107 // If it doesn't exist, it is created. If it exists, its content is removed. 1108 func prepareTempDir(rootDir string, rootIDs idtools.IDPair) (string, error) { 1109 var tmpDir string 1110 if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { 1111 tmpDir = filepath.Join(rootDir, "tmp") 1112 newName := tmpDir + "-old" 1113 if err := os.Rename(tmpDir, newName); err == nil { 1114 go func() { 1115 if err := os.RemoveAll(newName); err != nil { 1116 logrus.Warnf("failed to delete old tmp directory: %s", newName) 1117 } 1118 }() 1119 } else if !os.IsNotExist(err) { 1120 logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err) 1121 if err := os.RemoveAll(tmpDir); err != nil { 1122 logrus.Warnf("failed to delete old tmp directory: %s", tmpDir) 1123 } 1124 } 1125 } 1126 // We don't remove the content of tmpdir if it's not the default, 1127 // it may hold things that do not belong to us. 1128 return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIDs) 1129 } 1130 1131 func (daemon *Daemon) setupInitLayer(initPath containerfs.ContainerFS) error { 1132 rootIDs := daemon.idMappings.RootPair() 1133 return initlayer.Setup(initPath, rootIDs) 1134 } 1135 1136 func (daemon *Daemon) setGenericResources(conf *config.Config) error { 1137 genericResources, err := config.ParseGenericResources(conf.NodeGenericResources) 1138 if err != nil { 1139 return err 1140 } 1141 1142 daemon.genericResources = genericResources 1143 1144 return nil 1145 } 1146 1147 func setDefaultMtu(conf *config.Config) { 1148 // do nothing if the config does not have the default 0 value. 1149 if conf.Mtu != 0 { 1150 return 1151 } 1152 conf.Mtu = config.DefaultNetworkMtu 1153 } 1154 1155 func (daemon *Daemon) configureVolumes(rootIDs idtools.IDPair) (*store.VolumeStore, error) { 1156 volumesDriver, err := local.New(daemon.configStore.Root, rootIDs) 1157 if err != nil { 1158 return nil, err 1159 } 1160 1161 volumedrivers.RegisterPluginGetter(daemon.PluginStore) 1162 1163 if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { 1164 return nil, errors.New("local volume driver could not be registered") 1165 } 1166 return store.New(daemon.configStore.Root) 1167 } 1168 1169 // IsShuttingDown tells whether the daemon is shutting down or not 1170 func (daemon *Daemon) IsShuttingDown() bool { 1171 return daemon.shutdown 1172 } 1173 1174 // initDiscovery initializes the discovery watcher for this daemon. 1175 func (daemon *Daemon) initDiscovery(conf *config.Config) error { 1176 advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise) 1177 if err != nil { 1178 if err == discovery.ErrDiscoveryDisabled { 1179 return nil 1180 } 1181 return err 1182 } 1183 1184 conf.ClusterAdvertise = advertise 1185 discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts) 1186 if err != nil { 1187 return fmt.Errorf("discovery initialization failed (%v)", err) 1188 } 1189 1190 daemon.discoveryWatcher = discoveryWatcher 1191 return nil 1192 } 1193 1194 func isBridgeNetworkDisabled(conf *config.Config) bool { 1195 return conf.BridgeConfig.Iface == config.DisableNetworkBridge 1196 } 1197 1198 func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { 1199 options := []nwconfig.Option{} 1200 if dconfig == nil { 1201 return options, nil 1202 } 1203 1204 options = append(options, nwconfig.OptionExperimental(dconfig.Experimental)) 1205 options = append(options, nwconfig.OptionDataDir(dconfig.Root)) 1206 options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) 1207 1208 dd := runconfig.DefaultDaemonNetworkMode() 1209 dn := runconfig.DefaultDaemonNetworkMode().NetworkName() 1210 options = append(options, nwconfig.OptionDefaultDriver(string(dd))) 1211 options = append(options, nwconfig.OptionDefaultNetwork(dn)) 1212 1213 if strings.TrimSpace(dconfig.ClusterStore) != "" { 1214 kv := strings.Split(dconfig.ClusterStore, "://") 1215 if len(kv) != 2 { 1216 return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL") 1217 } 1218 options = append(options, nwconfig.OptionKVProvider(kv[0])) 1219 options = append(options, nwconfig.OptionKVProviderURL(kv[1])) 1220 } 1221 if len(dconfig.ClusterOpts) > 0 { 1222 options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) 1223 } 1224 1225 if daemon.discoveryWatcher != nil { 1226 options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) 1227 } 1228 1229 if dconfig.ClusterAdvertise != "" { 1230 options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) 1231 } 1232 1233 options = append(options, nwconfig.OptionLabels(dconfig.Labels)) 1234 options = append(options, driverOptions(dconfig)...) 1235 1236 if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { 1237 options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) 1238 } 1239 1240 if pg != nil { 1241 options = append(options, nwconfig.OptionPluginGetter(pg)) 1242 } 1243 1244 options = append(options, nwconfig.OptionNetworkControlPlaneMTU(dconfig.NetworkControlPlaneMTU)) 1245 1246 return options, nil 1247 } 1248 1249 // GetCluster returns the cluster 1250 func (daemon *Daemon) GetCluster() Cluster { 1251 return daemon.cluster 1252 } 1253 1254 // SetCluster sets the cluster 1255 func (daemon *Daemon) SetCluster(cluster Cluster) { 1256 daemon.cluster = cluster 1257 } 1258 1259 func (daemon *Daemon) pluginShutdown() { 1260 manager := daemon.pluginManager 1261 // Check for a valid manager object. In error conditions, daemon init can fail 1262 // and shutdown called, before plugin manager is initialized. 1263 if manager != nil { 1264 manager.Shutdown() 1265 } 1266 } 1267 1268 // PluginManager returns current pluginManager associated with the daemon 1269 func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method 1270 return daemon.pluginManager 1271 } 1272 1273 // PluginGetter returns current pluginStore associated with the daemon 1274 func (daemon *Daemon) PluginGetter() *plugin.Store { 1275 return daemon.PluginStore 1276 } 1277 1278 // CreateDaemonRoot creates the root for the daemon 1279 func CreateDaemonRoot(config *config.Config) error { 1280 // get the canonical path to the Docker root directory 1281 var realRoot string 1282 if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { 1283 realRoot = config.Root 1284 } else { 1285 realRoot, err = getRealPath(config.Root) 1286 if err != nil { 1287 return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) 1288 } 1289 } 1290 1291 idMappings, err := setupRemappedRoot(config) 1292 if err != nil { 1293 return err 1294 } 1295 return setupDaemonRoot(config, realRoot, idMappings.RootPair()) 1296 } 1297 1298 // checkpointAndSave grabs a container lock to safely call container.CheckpointTo 1299 func (daemon *Daemon) checkpointAndSave(container *container.Container) error { 1300 container.Lock() 1301 defer container.Unlock() 1302 if err := container.CheckpointTo(daemon.containersReplica); err != nil { 1303 return fmt.Errorf("Error saving container state: %v", err) 1304 } 1305 return nil 1306 } 1307 1308 // because the CLI sends a -1 when it wants to unset the swappiness value 1309 // we need to clear it on the server side 1310 func fixMemorySwappiness(resources *containertypes.Resources) { 1311 if resources.MemorySwappiness != nil && *resources.MemorySwappiness == -1 { 1312 resources.MemorySwappiness = nil 1313 } 1314 } 1315 1316 // GetAttachmentStore returns current attachment store associated with the daemon 1317 func (daemon *Daemon) GetAttachmentStore() *network.AttachmentStore { 1318 return &daemon.attachmentStore 1319 }