github.com/mforkel/docker-ce-i386@v17.12.1-ce-rc2+incompatible/components/engine/daemon/daemon.go (about) 1 // Package daemon exposes the functions that occur on the host server 2 // that the Docker daemon is running. 3 // 4 // In implementing the various functions of the daemon, there is often 5 // a method-specific struct for configuring the runtime behavior. 6 package daemon 7 8 import ( 9 "context" 10 "fmt" 11 "io/ioutil" 12 "net" 13 "os" 14 "path" 15 "path/filepath" 16 "runtime" 17 "strings" 18 "sync" 19 "time" 20 21 "github.com/docker/docker/api/errdefs" 22 "github.com/docker/docker/api/types" 23 containertypes "github.com/docker/docker/api/types/container" 24 "github.com/docker/docker/api/types/swarm" 25 "github.com/docker/docker/container" 26 "github.com/docker/docker/daemon/config" 27 "github.com/docker/docker/daemon/discovery" 28 "github.com/docker/docker/daemon/events" 29 "github.com/docker/docker/daemon/exec" 30 "github.com/docker/docker/daemon/logger" 31 "github.com/docker/docker/daemon/network" 32 "github.com/sirupsen/logrus" 33 // register graph drivers 34 _ "github.com/docker/docker/daemon/graphdriver/register" 35 "github.com/docker/docker/daemon/initlayer" 36 "github.com/docker/docker/daemon/stats" 37 dmetadata "github.com/docker/docker/distribution/metadata" 38 "github.com/docker/docker/distribution/xfer" 39 "github.com/docker/docker/dockerversion" 40 "github.com/docker/docker/image" 41 "github.com/docker/docker/layer" 42 "github.com/docker/docker/libcontainerd" 43 "github.com/docker/docker/migrate/v1" 44 "github.com/docker/docker/pkg/containerfs" 45 "github.com/docker/docker/pkg/idtools" 46 "github.com/docker/docker/pkg/plugingetter" 47 "github.com/docker/docker/pkg/sysinfo" 48 "github.com/docker/docker/pkg/system" 49 "github.com/docker/docker/pkg/truncindex" 50 "github.com/docker/docker/plugin" 51 pluginexec "github.com/docker/docker/plugin/executor/containerd" 52 refstore "github.com/docker/docker/reference" 53 "github.com/docker/docker/registry" 54 "github.com/docker/docker/runconfig" 55 volumedrivers "github.com/docker/docker/volume/drivers" 56 "github.com/docker/docker/volume/local" 57 "github.com/docker/docker/volume/store" 58 "github.com/docker/libnetwork" 59 "github.com/docker/libnetwork/cluster" 60 nwconfig "github.com/docker/libnetwork/config" 61 "github.com/docker/libtrust" 62 "github.com/pkg/errors" 63 ) 64 65 // ContainersNamespace is the name of the namespace used for users containers 66 const ContainersNamespace = "moby" 67 68 var ( 69 errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform") 70 ) 71 72 type daemonStore struct { 73 graphDriver string 74 imageRoot string 75 imageStore image.Store 76 layerStore layer.Store 77 distributionMetadataStore dmetadata.Store 78 } 79 80 // Daemon holds information about the Docker daemon. 81 type Daemon struct { 82 ID string 83 repository string 84 containers container.Store 85 containersReplica container.ViewDB 86 execCommands *exec.Store 87 downloadManager *xfer.LayerDownloadManager 88 uploadManager *xfer.LayerUploadManager 89 trustKey libtrust.PrivateKey 90 idIndex *truncindex.TruncIndex 91 configStore *config.Config 92 statsCollector *stats.Collector 93 defaultLogConfig containertypes.LogConfig 94 RegistryService registry.Service 95 EventsService *events.Events 96 netController libnetwork.NetworkController 97 volumes *store.VolumeStore 98 discoveryWatcher discovery.Reloader 99 root string 100 seccompEnabled bool 101 apparmorEnabled bool 102 shutdown bool 103 idMappings *idtools.IDMappings 104 stores map[string]daemonStore // By container target platform 105 referenceStore refstore.Store 106 PluginStore *plugin.Store // todo: remove 107 pluginManager *plugin.Manager 108 linkIndex *linkIndex 109 containerd libcontainerd.Client 110 containerdRemote libcontainerd.Remote 111 defaultIsolation containertypes.Isolation // Default isolation mode on Windows 112 clusterProvider cluster.Provider 113 cluster Cluster 114 genericResources []swarm.GenericResource 115 metricsPluginListener net.Listener 116 117 machineMemory uint64 118 119 seccompProfile []byte 120 seccompProfilePath string 121 122 diskUsageRunning int32 123 pruneRunning int32 124 hosts map[string]bool // hosts stores the addresses the daemon is listening on 125 startupDone chan struct{} 126 127 attachmentStore network.AttachmentStore 128 } 129 130 // StoreHosts stores the addresses the daemon is listening on 131 func (daemon *Daemon) StoreHosts(hosts []string) { 132 if daemon.hosts == nil { 133 daemon.hosts = make(map[string]bool) 134 } 135 for _, h := range hosts { 136 daemon.hosts[h] = true 137 } 138 } 139 140 // HasExperimental returns whether the experimental features of the daemon are enabled or not 141 func (daemon *Daemon) HasExperimental() bool { 142 return daemon.configStore != nil && daemon.configStore.Experimental 143 } 144 145 func (daemon *Daemon) restore() error { 146 containers := make(map[string]*container.Container) 147 148 logrus.Info("Loading containers: start.") 149 150 dir, err := ioutil.ReadDir(daemon.repository) 151 if err != nil { 152 return err 153 } 154 155 for _, v := range dir { 156 id := v.Name() 157 container, err := daemon.load(id) 158 if err != nil { 159 logrus.Errorf("Failed to load container %v: %v", id, err) 160 continue 161 } 162 163 // Ignore the container if it does not support the current driver being used by the graph 164 currentDriverForContainerOS := daemon.stores[container.OS].graphDriver 165 if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS { 166 rwlayer, err := daemon.stores[container.OS].layerStore.GetRWLayer(container.ID) 167 if err != nil { 168 logrus.Errorf("Failed to load container mount %v: %v", id, err) 169 continue 170 } 171 container.RWLayer = rwlayer 172 logrus.Debugf("Loaded container %v, isRunning: %v", container.ID, container.IsRunning()) 173 174 containers[container.ID] = container 175 } else { 176 logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) 177 } 178 } 179 180 removeContainers := make(map[string]*container.Container) 181 restartContainers := make(map[*container.Container]chan struct{}) 182 activeSandboxes := make(map[string]interface{}) 183 for id, c := range containers { 184 if err := daemon.registerName(c); err != nil { 185 logrus.Errorf("Failed to register container name %s: %s", c.ID, err) 186 delete(containers, id) 187 continue 188 } 189 // verify that all volumes valid and have been migrated from the pre-1.7 layout 190 if err := daemon.verifyVolumesInfo(c); err != nil { 191 // don't skip the container due to error 192 logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err) 193 } 194 if err := daemon.Register(c); err != nil { 195 logrus.Errorf("Failed to register container %s: %s", c.ID, err) 196 delete(containers, id) 197 continue 198 } 199 200 // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. 201 // We should rewrite it to use the daemon defaults. 202 // Fixes https://github.com/docker/docker/issues/22536 203 if c.HostConfig.LogConfig.Type == "" { 204 if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { 205 logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) 206 continue 207 } 208 } 209 } 210 211 var ( 212 wg sync.WaitGroup 213 mapLock sync.Mutex 214 ) 215 for _, c := range containers { 216 wg.Add(1) 217 go func(c *container.Container) { 218 defer wg.Done() 219 daemon.backportMountSpec(c) 220 if err := daemon.checkpointAndSave(c); err != nil { 221 logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk") 222 } 223 224 daemon.setStateCounter(c) 225 226 logrus.WithFields(logrus.Fields{ 227 "container": c.ID, 228 "running": c.IsRunning(), 229 "paused": c.IsPaused(), 230 }).Debug("restoring container") 231 232 var ( 233 err error 234 alive bool 235 ec uint32 236 exitedAt time.Time 237 ) 238 239 alive, _, err = daemon.containerd.Restore(context.Background(), c.ID, c.InitializeStdio) 240 if err != nil && !errdefs.IsNotFound(err) { 241 logrus.Errorf("Failed to restore container %s with containerd: %s", c.ID, err) 242 return 243 } 244 if !alive { 245 ec, exitedAt, err = daemon.containerd.DeleteTask(context.Background(), c.ID) 246 if err != nil && !errdefs.IsNotFound(err) { 247 logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID) 248 return 249 } 250 } else if !daemon.configStore.LiveRestoreEnabled { 251 if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) { 252 logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container") 253 return 254 } 255 } 256 257 if c.IsRunning() || c.IsPaused() { 258 c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking 259 260 if c.IsPaused() && alive { 261 s, err := daemon.containerd.Status(context.Background(), c.ID) 262 if err != nil { 263 logrus.WithError(err).WithField("container", c.ID). 264 Errorf("Failed to get container status") 265 } else { 266 logrus.WithField("container", c.ID).WithField("state", s). 267 Info("restored container paused") 268 switch s { 269 case libcontainerd.StatusPaused, libcontainerd.StatusPausing: 270 // nothing to do 271 case libcontainerd.StatusStopped: 272 alive = false 273 case libcontainerd.StatusUnknown: 274 logrus.WithField("container", c.ID). 275 Error("Unknown status for container during restore") 276 default: 277 // running 278 c.Lock() 279 c.Paused = false 280 daemon.setStateCounter(c) 281 if err := c.CheckpointTo(daemon.containersReplica); err != nil { 282 logrus.WithError(err).WithField("container", c.ID). 283 Error("Failed to update stopped container state") 284 } 285 c.Unlock() 286 } 287 } 288 } 289 290 if !alive { 291 c.Lock() 292 c.SetStopped(&container.ExitStatus{ExitCode: int(ec), ExitedAt: exitedAt}) 293 daemon.Cleanup(c) 294 if err := c.CheckpointTo(daemon.containersReplica); err != nil { 295 logrus.Errorf("Failed to update stopped container %s state: %v", c.ID, err) 296 } 297 c.Unlock() 298 } 299 300 // we call Mount and then Unmount to get BaseFs of the container 301 if err := daemon.Mount(c); err != nil { 302 // The mount is unlikely to fail. However, in case mount fails 303 // the container should be allowed to restore here. Some functionalities 304 // (like docker exec -u user) might be missing but container is able to be 305 // stopped/restarted/removed. 306 // See #29365 for related information. 307 // The error is only logged here. 308 logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) 309 } else { 310 if err := daemon.Unmount(c); err != nil { 311 logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) 312 } 313 } 314 315 c.ResetRestartManager(false) 316 if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { 317 options, err := daemon.buildSandboxOptions(c) 318 if err != nil { 319 logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) 320 } 321 mapLock.Lock() 322 activeSandboxes[c.NetworkSettings.SandboxID] = options 323 mapLock.Unlock() 324 } 325 } 326 327 // get list of containers we need to restart 328 329 // Do not autostart containers which 330 // has endpoints in a swarm scope 331 // network yet since the cluster is 332 // not initialized yet. We will start 333 // it after the cluster is 334 // initialized. 335 if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint { 336 mapLock.Lock() 337 restartContainers[c] = make(chan struct{}) 338 mapLock.Unlock() 339 } else if c.HostConfig != nil && c.HostConfig.AutoRemove { 340 mapLock.Lock() 341 removeContainers[c.ID] = c 342 mapLock.Unlock() 343 } 344 345 c.Lock() 346 if c.RemovalInProgress { 347 // We probably crashed in the middle of a removal, reset 348 // the flag. 349 // 350 // We DO NOT remove the container here as we do not 351 // know if the user had requested for either the 352 // associated volumes, network links or both to also 353 // be removed. So we put the container in the "dead" 354 // state and leave further processing up to them. 355 logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) 356 c.RemovalInProgress = false 357 c.Dead = true 358 if err := c.CheckpointTo(daemon.containersReplica); err != nil { 359 logrus.Errorf("Failed to update RemovalInProgress container %s state: %v", c.ID, err) 360 } 361 } 362 c.Unlock() 363 }(c) 364 } 365 wg.Wait() 366 daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) 367 if err != nil { 368 return fmt.Errorf("Error initializing network controller: %v", err) 369 } 370 371 // Now that all the containers are registered, register the links 372 for _, c := range containers { 373 if err := daemon.registerLinks(c, c.HostConfig); err != nil { 374 logrus.Errorf("failed to register link for container %s: %v", c.ID, err) 375 } 376 } 377 378 group := sync.WaitGroup{} 379 for c, notifier := range restartContainers { 380 group.Add(1) 381 382 go func(c *container.Container, chNotify chan struct{}) { 383 defer group.Done() 384 385 logrus.Debugf("Starting container %s", c.ID) 386 387 // ignore errors here as this is a best effort to wait for children to be 388 // running before we try to start the container 389 children := daemon.children(c) 390 timeout := time.After(5 * time.Second) 391 for _, child := range children { 392 if notifier, exists := restartContainers[child]; exists { 393 select { 394 case <-notifier: 395 case <-timeout: 396 } 397 } 398 } 399 400 // Make sure networks are available before starting 401 daemon.waitForNetworks(c) 402 if err := daemon.containerStart(c, "", "", true); err != nil { 403 logrus.Errorf("Failed to start container %s: %s", c.ID, err) 404 } 405 close(chNotify) 406 }(c, notifier) 407 408 } 409 group.Wait() 410 411 removeGroup := sync.WaitGroup{} 412 for id := range removeContainers { 413 removeGroup.Add(1) 414 go func(cid string) { 415 if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { 416 logrus.Errorf("Failed to remove container %s: %s", cid, err) 417 } 418 removeGroup.Done() 419 }(id) 420 } 421 removeGroup.Wait() 422 423 // any containers that were started above would already have had this done, 424 // however we need to now prepare the mountpoints for the rest of the containers as well. 425 // This shouldn't cause any issue running on the containers that already had this run. 426 // This must be run after any containers with a restart policy so that containerized plugins 427 // can have a chance to be running before we try to initialize them. 428 for _, c := range containers { 429 // if the container has restart policy, do not 430 // prepare the mountpoints since it has been done on restarting. 431 // This is to speed up the daemon start when a restart container 432 // has a volume and the volume driver is not available. 433 if _, ok := restartContainers[c]; ok { 434 continue 435 } else if _, ok := removeContainers[c.ID]; ok { 436 // container is automatically removed, skip it. 437 continue 438 } 439 440 group.Add(1) 441 go func(c *container.Container) { 442 defer group.Done() 443 if err := daemon.prepareMountPoints(c); err != nil { 444 logrus.Error(err) 445 } 446 }(c) 447 } 448 449 group.Wait() 450 451 logrus.Info("Loading containers: done.") 452 453 return nil 454 } 455 456 // RestartSwarmContainers restarts any autostart container which has a 457 // swarm endpoint. 458 func (daemon *Daemon) RestartSwarmContainers() { 459 group := sync.WaitGroup{} 460 for _, c := range daemon.List() { 461 if !c.IsRunning() && !c.IsPaused() { 462 // Autostart all the containers which has a 463 // swarm endpoint now that the cluster is 464 // initialized. 465 if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint { 466 group.Add(1) 467 go func(c *container.Container) { 468 defer group.Done() 469 if err := daemon.containerStart(c, "", "", true); err != nil { 470 logrus.Error(err) 471 } 472 }(c) 473 } 474 } 475 476 } 477 group.Wait() 478 } 479 480 // waitForNetworks is used during daemon initialization when starting up containers 481 // It ensures that all of a container's networks are available before the daemon tries to start the container. 482 // In practice it just makes sure the discovery service is available for containers which use a network that require discovery. 483 func (daemon *Daemon) waitForNetworks(c *container.Container) { 484 if daemon.discoveryWatcher == nil { 485 return 486 } 487 // Make sure if the container has a network that requires discovery that the discovery service is available before starting 488 for netName := range c.NetworkSettings.Networks { 489 // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready 490 // Most likely this is because the K/V store used for discovery is in a container and needs to be started 491 if _, err := daemon.netController.NetworkByName(netName); err != nil { 492 if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { 493 continue 494 } 495 // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host 496 // FIXME: why is this slow??? 497 logrus.Debugf("Container %s waiting for network to be ready", c.Name) 498 select { 499 case <-daemon.discoveryWatcher.ReadyCh(): 500 case <-time.After(60 * time.Second): 501 } 502 return 503 } 504 } 505 } 506 507 func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { 508 return daemon.linkIndex.children(c) 509 } 510 511 // parents returns the names of the parent containers of the container 512 // with the given name. 513 func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { 514 return daemon.linkIndex.parents(c) 515 } 516 517 func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { 518 fullName := path.Join(parent.Name, alias) 519 if err := daemon.containersReplica.ReserveName(fullName, child.ID); err != nil { 520 if err == container.ErrNameReserved { 521 logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) 522 return nil 523 } 524 return err 525 } 526 daemon.linkIndex.link(parent, child, fullName) 527 return nil 528 } 529 530 // DaemonJoinsCluster informs the daemon has joined the cluster and provides 531 // the handler to query the cluster component 532 func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) { 533 daemon.setClusterProvider(clusterProvider) 534 } 535 536 // DaemonLeavesCluster informs the daemon has left the cluster 537 func (daemon *Daemon) DaemonLeavesCluster() { 538 // Daemon is in charge of removing the attachable networks with 539 // connected containers when the node leaves the swarm 540 daemon.clearAttachableNetworks() 541 // We no longer need the cluster provider, stop it now so that 542 // the network agent will stop listening to cluster events. 543 daemon.setClusterProvider(nil) 544 // Wait for the networking cluster agent to stop 545 daemon.netController.AgentStopWait() 546 // Daemon is in charge of removing the ingress network when the 547 // node leaves the swarm. Wait for job to be done or timeout. 548 // This is called also on graceful daemon shutdown. We need to 549 // wait, because the ingress release has to happen before the 550 // network controller is stopped. 551 if done, err := daemon.ReleaseIngress(); err == nil { 552 select { 553 case <-done: 554 case <-time.After(5 * time.Second): 555 logrus.Warnf("timeout while waiting for ingress network removal") 556 } 557 } else { 558 logrus.Warnf("failed to initiate ingress network removal: %v", err) 559 } 560 561 daemon.attachmentStore.ClearAttachments() 562 } 563 564 // setClusterProvider sets a component for querying the current cluster state. 565 func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) { 566 daemon.clusterProvider = clusterProvider 567 daemon.netController.SetClusterProvider(clusterProvider) 568 } 569 570 // IsSwarmCompatible verifies if the current daemon 571 // configuration is compatible with the swarm mode 572 func (daemon *Daemon) IsSwarmCompatible() error { 573 if daemon.configStore == nil { 574 return nil 575 } 576 return daemon.configStore.IsSwarmCompatible() 577 } 578 579 // NewDaemon sets up everything for the daemon to be able to service 580 // requests from the webserver. 581 func NewDaemon(config *config.Config, registryService registry.Service, containerdRemote libcontainerd.Remote, pluginStore *plugin.Store) (daemon *Daemon, err error) { 582 setDefaultMtu(config) 583 584 // Ensure that we have a correct root key limit for launching containers. 585 if err := ModifyRootKeyLimit(); err != nil { 586 logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) 587 } 588 589 // Ensure we have compatible and valid configuration options 590 if err := verifyDaemonSettings(config); err != nil { 591 return nil, err 592 } 593 594 // Do we have a disabled network? 595 config.DisableBridge = isBridgeNetworkDisabled(config) 596 597 // Verify the platform is supported as a daemon 598 if !platformSupported { 599 return nil, errSystemNotSupported 600 } 601 602 // Validate platform-specific requirements 603 if err := checkSystem(); err != nil { 604 return nil, err 605 } 606 607 idMappings, err := setupRemappedRoot(config) 608 if err != nil { 609 return nil, err 610 } 611 rootIDs := idMappings.RootPair() 612 if err := setupDaemonProcess(config); err != nil { 613 return nil, err 614 } 615 616 // set up the tmpDir to use a canonical path 617 tmp, err := prepareTempDir(config.Root, rootIDs) 618 if err != nil { 619 return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) 620 } 621 realTmp, err := getRealPath(tmp) 622 if err != nil { 623 return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) 624 } 625 if runtime.GOOS == "windows" { 626 if _, err := os.Stat(realTmp); err != nil && os.IsNotExist(err) { 627 if err := system.MkdirAll(realTmp, 0700, ""); err != nil { 628 return nil, fmt.Errorf("Unable to create the TempDir (%s): %s", realTmp, err) 629 } 630 } 631 os.Setenv("TEMP", realTmp) 632 os.Setenv("TMP", realTmp) 633 } else { 634 os.Setenv("TMPDIR", realTmp) 635 } 636 637 d := &Daemon{ 638 configStore: config, 639 PluginStore: pluginStore, 640 startupDone: make(chan struct{}), 641 } 642 // Ensure the daemon is properly shutdown if there is a failure during 643 // initialization 644 defer func() { 645 if err != nil { 646 if err := d.Shutdown(); err != nil { 647 logrus.Error(err) 648 } 649 } 650 }() 651 652 if err := d.setGenericResources(config); err != nil { 653 return nil, err 654 } 655 // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event 656 // on Windows to dump Go routine stacks 657 stackDumpDir := config.Root 658 if execRoot := config.GetExecRoot(); execRoot != "" { 659 stackDumpDir = execRoot 660 } 661 d.setupDumpStackTrap(stackDumpDir) 662 663 if err := d.setupSeccompProfile(); err != nil { 664 return nil, err 665 } 666 667 // Set the default isolation mode (only applicable on Windows) 668 if err := d.setDefaultIsolation(); err != nil { 669 return nil, fmt.Errorf("error setting default isolation mode: %v", err) 670 } 671 672 logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) 673 674 if err := configureMaxThreads(config); err != nil { 675 logrus.Warnf("Failed to configure golang's threads limit: %v", err) 676 } 677 678 if err := ensureDefaultAppArmorProfile(); err != nil { 679 logrus.Errorf(err.Error()) 680 } 681 682 daemonRepo := filepath.Join(config.Root, "containers") 683 if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil { 684 return nil, err 685 } 686 687 // Create the directory where we'll store the runtime scripts (i.e. in 688 // order to support runtimeArgs) 689 daemonRuntimes := filepath.Join(config.Root, "runtimes") 690 if err := system.MkdirAll(daemonRuntimes, 0700, ""); err != nil { 691 return nil, err 692 } 693 if err := d.loadRuntimes(); err != nil { 694 return nil, err 695 } 696 697 if runtime.GOOS == "windows" { 698 if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil { 699 return nil, err 700 } 701 } 702 703 // On Windows we don't support the environment variable, or a user supplied graphdriver 704 // as Windows has no choice in terms of which graphdrivers to use. It's a case of 705 // running Windows containers on Windows - windowsfilter, running Linux containers on Windows, 706 // lcow. Unix platforms however run a single graphdriver for all containers, and it can 707 // be set through an environment variable, a daemon start parameter, or chosen through 708 // initialization of the layerstore through driver priority order for example. 709 d.stores = make(map[string]daemonStore) 710 if runtime.GOOS == "windows" { 711 d.stores["windows"] = daemonStore{graphDriver: "windowsfilter"} 712 if system.LCOWSupported() { 713 d.stores["linux"] = daemonStore{graphDriver: "lcow"} 714 } 715 } else { 716 driverName := os.Getenv("DOCKER_DRIVER") 717 if driverName == "" { 718 driverName = config.GraphDriver 719 } else { 720 logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName) 721 } 722 d.stores[runtime.GOOS] = daemonStore{graphDriver: driverName} // May still be empty. Layerstore init determines instead. 723 } 724 725 d.RegistryService = registryService 726 logger.RegisterPluginGetter(d.PluginStore) 727 728 metricsSockPath, err := d.listenMetricsSock() 729 if err != nil { 730 return nil, err 731 } 732 registerMetricsPluginCallback(d.PluginStore, metricsSockPath) 733 734 createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) { 735 return pluginexec.New(getPluginExecRoot(config.Root), containerdRemote, m) 736 } 737 738 // Plugin system initialization should happen before restore. Do not change order. 739 d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ 740 Root: filepath.Join(config.Root, "plugins"), 741 ExecRoot: getPluginExecRoot(config.Root), 742 Store: d.PluginStore, 743 CreateExecutor: createPluginExec, 744 RegistryService: registryService, 745 LiveRestoreEnabled: config.LiveRestoreEnabled, 746 LogPluginEvent: d.LogPluginEvent, // todo: make private 747 AuthzMiddleware: config.AuthzMiddleware, 748 }) 749 if err != nil { 750 return nil, errors.Wrap(err, "couldn't create plugin manager") 751 } 752 753 var graphDrivers []string 754 for operatingSystem, ds := range d.stores { 755 ls, err := layer.NewStoreFromOptions(layer.StoreOptions{ 756 StorePath: config.Root, 757 MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), 758 GraphDriver: ds.graphDriver, 759 GraphDriverOptions: config.GraphOptions, 760 IDMappings: idMappings, 761 PluginGetter: d.PluginStore, 762 ExperimentalEnabled: config.Experimental, 763 OS: operatingSystem, 764 }) 765 if err != nil { 766 return nil, err 767 } 768 ds.graphDriver = ls.DriverName() // As layerstore may set the driver 769 ds.layerStore = ls 770 d.stores[operatingSystem] = ds 771 graphDrivers = append(graphDrivers, ls.DriverName()) 772 } 773 774 // Configure and validate the kernels security support 775 if err := configureKernelSecuritySupport(config, graphDrivers); err != nil { 776 return nil, err 777 } 778 779 logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) 780 lsMap := make(map[string]layer.Store) 781 for operatingSystem, ds := range d.stores { 782 lsMap[operatingSystem] = ds.layerStore 783 } 784 d.downloadManager = xfer.NewLayerDownloadManager(lsMap, *config.MaxConcurrentDownloads) 785 logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) 786 d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) 787 for operatingSystem, ds := range d.stores { 788 imageRoot := filepath.Join(config.Root, "image", ds.graphDriver) 789 ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) 790 if err != nil { 791 return nil, err 792 } 793 794 var is image.Store 795 is, err = image.NewImageStore(ifs, operatingSystem, ds.layerStore) 796 if err != nil { 797 return nil, err 798 } 799 ds.imageRoot = imageRoot 800 ds.imageStore = is 801 d.stores[operatingSystem] = ds 802 } 803 804 // Configure the volumes driver 805 volStore, err := d.configureVolumes(rootIDs) 806 if err != nil { 807 return nil, err 808 } 809 810 trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath) 811 if err != nil { 812 return nil, err 813 } 814 815 trustDir := filepath.Join(config.Root, "trust") 816 817 if err := system.MkdirAll(trustDir, 0700, ""); err != nil { 818 return nil, err 819 } 820 821 eventsService := events.New() 822 823 // We have a single tag/reference store for the daemon globally. However, it's 824 // stored under the graphdriver. On host platforms which only support a single 825 // container OS, but multiple selectable graphdrivers, this means depending on which 826 // graphdriver is chosen, the global reference store is under there. For 827 // platforms which support multiple container operating systems, this is slightly 828 // more problematic as where does the global ref store get located? Fortunately, 829 // for Windows, which is currently the only daemon supporting multiple container 830 // operating systems, the list of graphdrivers available isn't user configurable. 831 // For backwards compatibility, we just put it under the windowsfilter 832 // directory regardless. 833 refStoreLocation := filepath.Join(d.stores[runtime.GOOS].imageRoot, `repositories.json`) 834 rs, err := refstore.NewReferenceStore(refStoreLocation) 835 if err != nil { 836 return nil, fmt.Errorf("Couldn't create reference store repository: %s", err) 837 } 838 d.referenceStore = rs 839 840 for platform, ds := range d.stores { 841 dms, err := dmetadata.NewFSMetadataStore(filepath.Join(ds.imageRoot, "distribution"), platform) 842 if err != nil { 843 return nil, err 844 } 845 846 ds.distributionMetadataStore = dms 847 d.stores[platform] = ds 848 849 // No content-addressability migration on Windows as it never supported pre-CA 850 if runtime.GOOS != "windows" { 851 migrationStart := time.Now() 852 if err := v1.Migrate(config.Root, ds.graphDriver, ds.layerStore, ds.imageStore, rs, dms); err != nil { 853 logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) 854 } 855 logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) 856 } 857 } 858 859 // Discovery is only enabled when the daemon is launched with an address to advertise. When 860 // initialized, the daemon is registered and we can store the discovery backend as it's read-only 861 if err := d.initDiscovery(config); err != nil { 862 return nil, err 863 } 864 865 sysInfo := sysinfo.New(false) 866 // Check if Devices cgroup is mounted, it is hard requirement for container security, 867 // on Linux. 868 if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { 869 return nil, errors.New("Devices cgroup isn't mounted") 870 } 871 872 d.ID = trustKey.PublicKey().KeyID() 873 d.repository = daemonRepo 874 d.containers = container.NewMemoryStore() 875 if d.containersReplica, err = container.NewViewDB(); err != nil { 876 return nil, err 877 } 878 d.execCommands = exec.NewStore() 879 d.trustKey = trustKey 880 d.idIndex = truncindex.NewTruncIndex([]string{}) 881 d.statsCollector = d.newStatsCollector(1 * time.Second) 882 d.defaultLogConfig = containertypes.LogConfig{ 883 Type: config.LogConfig.Type, 884 Config: config.LogConfig.Config, 885 } 886 d.EventsService = eventsService 887 d.volumes = volStore 888 d.root = config.Root 889 d.idMappings = idMappings 890 d.seccompEnabled = sysInfo.Seccomp 891 d.apparmorEnabled = sysInfo.AppArmor 892 d.containerdRemote = containerdRemote 893 894 d.linkIndex = newLinkIndex() 895 896 go d.execCommandGC() 897 898 d.containerd, err = containerdRemote.NewClient(ContainersNamespace, d) 899 if err != nil { 900 return nil, err 901 } 902 903 if err := d.restore(); err != nil { 904 return nil, err 905 } 906 close(d.startupDone) 907 908 // FIXME: this method never returns an error 909 info, _ := d.SystemInfo() 910 911 engineInfo.WithValues( 912 dockerversion.Version, 913 dockerversion.GitCommit, 914 info.Architecture, 915 info.Driver, 916 info.KernelVersion, 917 info.OperatingSystem, 918 info.OSType, 919 info.ID, 920 ).Set(1) 921 engineCpus.Set(float64(info.NCPU)) 922 engineMemory.Set(float64(info.MemTotal)) 923 924 gd := "" 925 for platform, ds := range d.stores { 926 if len(gd) > 0 { 927 gd += ", " 928 } 929 gd += ds.graphDriver 930 if len(d.stores) > 1 { 931 gd = fmt.Sprintf("%s (%s)", gd, platform) 932 } 933 } 934 logrus.WithFields(logrus.Fields{ 935 "version": dockerversion.Version, 936 "commit": dockerversion.GitCommit, 937 "graphdriver(s)": gd, 938 }).Info("Docker daemon") 939 940 return d, nil 941 } 942 943 func (daemon *Daemon) waitForStartupDone() { 944 <-daemon.startupDone 945 } 946 947 func (daemon *Daemon) shutdownContainer(c *container.Container) error { 948 stopTimeout := c.StopTimeout() 949 950 // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force 951 if err := daemon.containerStop(c, stopTimeout); err != nil { 952 return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) 953 } 954 955 // Wait without timeout for the container to exit. 956 // Ignore the result. 957 <-c.Wait(context.Background(), container.WaitConditionNotRunning) 958 return nil 959 } 960 961 // ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers, 962 // and is limited by daemon's ShutdownTimeout. 963 func (daemon *Daemon) ShutdownTimeout() int { 964 // By default we use daemon's ShutdownTimeout. 965 shutdownTimeout := daemon.configStore.ShutdownTimeout 966 967 graceTimeout := 5 968 if daemon.containers != nil { 969 for _, c := range daemon.containers.List() { 970 if shutdownTimeout >= 0 { 971 stopTimeout := c.StopTimeout() 972 if stopTimeout < 0 { 973 shutdownTimeout = -1 974 } else { 975 if stopTimeout+graceTimeout > shutdownTimeout { 976 shutdownTimeout = stopTimeout + graceTimeout 977 } 978 } 979 } 980 } 981 } 982 return shutdownTimeout 983 } 984 985 // Shutdown stops the daemon. 986 func (daemon *Daemon) Shutdown() error { 987 daemon.shutdown = true 988 // Keep mounts and networking running on daemon shutdown if 989 // we are to keep containers running and restore them. 990 991 if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { 992 // check if there are any running containers, if none we should do some cleanup 993 if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { 994 // metrics plugins still need some cleanup 995 daemon.cleanupMetricsPlugins() 996 return nil 997 } 998 } 999 1000 if daemon.containers != nil { 1001 logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", daemon.configStore.ShutdownTimeout) 1002 logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.ShutdownTimeout()) 1003 daemon.containers.ApplyAll(func(c *container.Container) { 1004 if !c.IsRunning() { 1005 return 1006 } 1007 logrus.Debugf("stopping %s", c.ID) 1008 if err := daemon.shutdownContainer(c); err != nil { 1009 logrus.Errorf("Stop container error: %v", err) 1010 return 1011 } 1012 if mountid, err := daemon.stores[c.OS].layerStore.GetMountID(c.ID); err == nil { 1013 daemon.cleanupMountsByID(mountid) 1014 } 1015 logrus.Debugf("container stopped %s", c.ID) 1016 }) 1017 } 1018 1019 if daemon.volumes != nil { 1020 if err := daemon.volumes.Shutdown(); err != nil { 1021 logrus.Errorf("Error shutting down volume store: %v", err) 1022 } 1023 } 1024 1025 for platform, ds := range daemon.stores { 1026 if ds.layerStore != nil { 1027 if err := ds.layerStore.Cleanup(); err != nil { 1028 logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, platform) 1029 } 1030 } 1031 } 1032 1033 // If we are part of a cluster, clean up cluster's stuff 1034 if daemon.clusterProvider != nil { 1035 logrus.Debugf("start clean shutdown of cluster resources...") 1036 daemon.DaemonLeavesCluster() 1037 } 1038 1039 daemon.cleanupMetricsPlugins() 1040 1041 // Shutdown plugins after containers and layerstore. Don't change the order. 1042 daemon.pluginShutdown() 1043 1044 // trigger libnetwork Stop only if it's initialized 1045 if daemon.netController != nil { 1046 daemon.netController.Stop() 1047 } 1048 1049 if err := daemon.cleanupMounts(); err != nil { 1050 return err 1051 } 1052 1053 return nil 1054 } 1055 1056 // Mount sets container.BaseFS 1057 // (is it not set coming in? why is it unset?) 1058 func (daemon *Daemon) Mount(container *container.Container) error { 1059 if container.RWLayer == nil { 1060 return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") 1061 } 1062 dir, err := container.RWLayer.Mount(container.GetMountLabel()) 1063 if err != nil { 1064 return err 1065 } 1066 logrus.Debugf("container mounted via layerStore: %v", dir) 1067 1068 if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() { 1069 // The mount path reported by the graph driver should always be trusted on Windows, since the 1070 // volume path for a given mounted layer may change over time. This should only be an error 1071 // on non-Windows operating systems. 1072 if runtime.GOOS != "windows" { 1073 daemon.Unmount(container) 1074 return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", 1075 daemon.GraphDriverName(container.OS), container.ID, container.BaseFS, dir) 1076 } 1077 } 1078 container.BaseFS = dir // TODO: combine these fields 1079 return nil 1080 } 1081 1082 // Unmount unsets the container base filesystem 1083 func (daemon *Daemon) Unmount(container *container.Container) error { 1084 if container.RWLayer == nil { 1085 return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") 1086 } 1087 if err := container.RWLayer.Unmount(); err != nil { 1088 logrus.Errorf("Error unmounting container %s: %s", container.ID, err) 1089 return err 1090 } 1091 1092 return nil 1093 } 1094 1095 // Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker. 1096 func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) { 1097 var v4Subnets []net.IPNet 1098 var v6Subnets []net.IPNet 1099 1100 managedNetworks := daemon.netController.Networks() 1101 1102 for _, managedNetwork := range managedNetworks { 1103 v4infos, v6infos := managedNetwork.Info().IpamInfo() 1104 for _, info := range v4infos { 1105 if info.IPAMData.Pool != nil { 1106 v4Subnets = append(v4Subnets, *info.IPAMData.Pool) 1107 } 1108 } 1109 for _, info := range v6infos { 1110 if info.IPAMData.Pool != nil { 1111 v6Subnets = append(v6Subnets, *info.IPAMData.Pool) 1112 } 1113 } 1114 } 1115 1116 return v4Subnets, v6Subnets 1117 } 1118 1119 // GraphDriverName returns the name of the graph driver used by the layer.Store 1120 func (daemon *Daemon) GraphDriverName(platform string) string { 1121 return daemon.stores[platform].layerStore.DriverName() 1122 } 1123 1124 // prepareTempDir prepares and returns the default directory to use 1125 // for temporary files. 1126 // If it doesn't exist, it is created. If it exists, its content is removed. 1127 func prepareTempDir(rootDir string, rootIDs idtools.IDPair) (string, error) { 1128 var tmpDir string 1129 if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { 1130 tmpDir = filepath.Join(rootDir, "tmp") 1131 newName := tmpDir + "-old" 1132 if err := os.Rename(tmpDir, newName); err == nil { 1133 go func() { 1134 if err := os.RemoveAll(newName); err != nil { 1135 logrus.Warnf("failed to delete old tmp directory: %s", newName) 1136 } 1137 }() 1138 } else if !os.IsNotExist(err) { 1139 logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err) 1140 if err := os.RemoveAll(tmpDir); err != nil { 1141 logrus.Warnf("failed to delete old tmp directory: %s", tmpDir) 1142 } 1143 } 1144 } 1145 // We don't remove the content of tmpdir if it's not the default, 1146 // it may hold things that do not belong to us. 1147 return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIDs) 1148 } 1149 1150 func (daemon *Daemon) setupInitLayer(initPath containerfs.ContainerFS) error { 1151 rootIDs := daemon.idMappings.RootPair() 1152 return initlayer.Setup(initPath, rootIDs) 1153 } 1154 1155 func (daemon *Daemon) setGenericResources(conf *config.Config) error { 1156 genericResources, err := config.ParseGenericResources(conf.NodeGenericResources) 1157 if err != nil { 1158 return err 1159 } 1160 1161 daemon.genericResources = genericResources 1162 1163 return nil 1164 } 1165 1166 func setDefaultMtu(conf *config.Config) { 1167 // do nothing if the config does not have the default 0 value. 1168 if conf.Mtu != 0 { 1169 return 1170 } 1171 conf.Mtu = config.DefaultNetworkMtu 1172 } 1173 1174 func (daemon *Daemon) configureVolumes(rootIDs idtools.IDPair) (*store.VolumeStore, error) { 1175 volumesDriver, err := local.New(daemon.configStore.Root, rootIDs) 1176 if err != nil { 1177 return nil, err 1178 } 1179 1180 volumedrivers.RegisterPluginGetter(daemon.PluginStore) 1181 1182 if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { 1183 return nil, errors.New("local volume driver could not be registered") 1184 } 1185 return store.New(daemon.configStore.Root) 1186 } 1187 1188 // IsShuttingDown tells whether the daemon is shutting down or not 1189 func (daemon *Daemon) IsShuttingDown() bool { 1190 return daemon.shutdown 1191 } 1192 1193 // initDiscovery initializes the discovery watcher for this daemon. 1194 func (daemon *Daemon) initDiscovery(conf *config.Config) error { 1195 advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise) 1196 if err != nil { 1197 if err == discovery.ErrDiscoveryDisabled { 1198 return nil 1199 } 1200 return err 1201 } 1202 1203 conf.ClusterAdvertise = advertise 1204 discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts) 1205 if err != nil { 1206 return fmt.Errorf("discovery initialization failed (%v)", err) 1207 } 1208 1209 daemon.discoveryWatcher = discoveryWatcher 1210 return nil 1211 } 1212 1213 func isBridgeNetworkDisabled(conf *config.Config) bool { 1214 return conf.BridgeConfig.Iface == config.DisableNetworkBridge 1215 } 1216 1217 func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { 1218 options := []nwconfig.Option{} 1219 if dconfig == nil { 1220 return options, nil 1221 } 1222 1223 options = append(options, nwconfig.OptionExperimental(dconfig.Experimental)) 1224 options = append(options, nwconfig.OptionDataDir(dconfig.Root)) 1225 options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) 1226 1227 dd := runconfig.DefaultDaemonNetworkMode() 1228 dn := runconfig.DefaultDaemonNetworkMode().NetworkName() 1229 options = append(options, nwconfig.OptionDefaultDriver(string(dd))) 1230 options = append(options, nwconfig.OptionDefaultNetwork(dn)) 1231 1232 if strings.TrimSpace(dconfig.ClusterStore) != "" { 1233 kv := strings.Split(dconfig.ClusterStore, "://") 1234 if len(kv) != 2 { 1235 return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL") 1236 } 1237 options = append(options, nwconfig.OptionKVProvider(kv[0])) 1238 options = append(options, nwconfig.OptionKVProviderURL(kv[1])) 1239 } 1240 if len(dconfig.ClusterOpts) > 0 { 1241 options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) 1242 } 1243 1244 if daemon.discoveryWatcher != nil { 1245 options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) 1246 } 1247 1248 if dconfig.ClusterAdvertise != "" { 1249 options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) 1250 } 1251 1252 options = append(options, nwconfig.OptionLabels(dconfig.Labels)) 1253 options = append(options, driverOptions(dconfig)...) 1254 1255 if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { 1256 options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) 1257 } 1258 1259 if pg != nil { 1260 options = append(options, nwconfig.OptionPluginGetter(pg)) 1261 } 1262 1263 options = append(options, nwconfig.OptionNetworkControlPlaneMTU(dconfig.NetworkControlPlaneMTU)) 1264 1265 return options, nil 1266 } 1267 1268 // GetCluster returns the cluster 1269 func (daemon *Daemon) GetCluster() Cluster { 1270 return daemon.cluster 1271 } 1272 1273 // SetCluster sets the cluster 1274 func (daemon *Daemon) SetCluster(cluster Cluster) { 1275 daemon.cluster = cluster 1276 } 1277 1278 func (daemon *Daemon) pluginShutdown() { 1279 manager := daemon.pluginManager 1280 // Check for a valid manager object. In error conditions, daemon init can fail 1281 // and shutdown called, before plugin manager is initialized. 1282 if manager != nil { 1283 manager.Shutdown() 1284 } 1285 } 1286 1287 // PluginManager returns current pluginManager associated with the daemon 1288 func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method 1289 return daemon.pluginManager 1290 } 1291 1292 // PluginGetter returns current pluginStore associated with the daemon 1293 func (daemon *Daemon) PluginGetter() *plugin.Store { 1294 return daemon.PluginStore 1295 } 1296 1297 // CreateDaemonRoot creates the root for the daemon 1298 func CreateDaemonRoot(config *config.Config) error { 1299 // get the canonical path to the Docker root directory 1300 var realRoot string 1301 if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { 1302 realRoot = config.Root 1303 } else { 1304 realRoot, err = getRealPath(config.Root) 1305 if err != nil { 1306 return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) 1307 } 1308 } 1309 1310 idMappings, err := setupRemappedRoot(config) 1311 if err != nil { 1312 return err 1313 } 1314 return setupDaemonRoot(config, realRoot, idMappings.RootPair()) 1315 } 1316 1317 // checkpointAndSave grabs a container lock to safely call container.CheckpointTo 1318 func (daemon *Daemon) checkpointAndSave(container *container.Container) error { 1319 container.Lock() 1320 defer container.Unlock() 1321 if err := container.CheckpointTo(daemon.containersReplica); err != nil { 1322 return fmt.Errorf("Error saving container state: %v", err) 1323 } 1324 return nil 1325 } 1326 1327 // because the CLI sends a -1 when it wants to unset the swappiness value 1328 // we need to clear it on the server side 1329 func fixMemorySwappiness(resources *containertypes.Resources) { 1330 if resources.MemorySwappiness != nil && *resources.MemorySwappiness == -1 { 1331 resources.MemorySwappiness = nil 1332 } 1333 } 1334 1335 // GetAttachmentStore returns current attachment store associated with the daemon 1336 func (daemon *Daemon) GetAttachmentStore() *network.AttachmentStore { 1337 return &daemon.attachmentStore 1338 }