gopkg.in/docker/docker.v1@v1.13.1/daemon/daemon.go (about) 1 // Package daemon exposes the functions that occur on the host server 2 // that the Docker daemon is running. 3 // 4 // In implementing the various functions of the daemon, there is often 5 // a method-specific struct for configuring the runtime behavior. 6 package daemon 7 8 import ( 9 "encoding/json" 10 "fmt" 11 "io/ioutil" 12 "net" 13 "os" 14 "path" 15 "path/filepath" 16 "runtime" 17 "strings" 18 "sync" 19 "time" 20 21 "github.com/Sirupsen/logrus" 22 containerd "github.com/docker/containerd/api/grpc/types" 23 "github.com/docker/docker/api" 24 "github.com/docker/docker/api/types" 25 containertypes "github.com/docker/docker/api/types/container" 26 "github.com/docker/docker/container" 27 "github.com/docker/docker/daemon/events" 28 "github.com/docker/docker/daemon/exec" 29 "github.com/docker/docker/daemon/initlayer" 30 "github.com/docker/docker/dockerversion" 31 "github.com/docker/docker/plugin" 32 "github.com/docker/libnetwork/cluster" 33 // register graph drivers 34 _ "github.com/docker/docker/daemon/graphdriver/register" 35 dmetadata "github.com/docker/docker/distribution/metadata" 36 "github.com/docker/docker/distribution/xfer" 37 "github.com/docker/docker/image" 38 "github.com/docker/docker/layer" 39 "github.com/docker/docker/libcontainerd" 40 "github.com/docker/docker/migrate/v1" 41 "github.com/docker/docker/pkg/fileutils" 42 "github.com/docker/docker/pkg/idtools" 43 "github.com/docker/docker/pkg/plugingetter" 44 "github.com/docker/docker/pkg/registrar" 45 "github.com/docker/docker/pkg/signal" 46 "github.com/docker/docker/pkg/sysinfo" 47 "github.com/docker/docker/pkg/system" 48 "github.com/docker/docker/pkg/truncindex" 49 "github.com/docker/docker/reference" 50 "github.com/docker/docker/registry" 51 "github.com/docker/docker/runconfig" 52 volumedrivers "github.com/docker/docker/volume/drivers" 53 "github.com/docker/docker/volume/local" 54 "github.com/docker/docker/volume/store" 55 "github.com/docker/libnetwork" 56 nwconfig "github.com/docker/libnetwork/config" 57 "github.com/docker/libtrust" 58 "github.com/pkg/errors" 59 ) 60 61 var ( 62 // DefaultRuntimeBinary is the default runtime to be used by 63 // containerd if none is specified 64 DefaultRuntimeBinary = "docker-runc" 65 66 // DefaultInitBinary is the name of the default init binary 67 DefaultInitBinary = "docker-init" 68 69 errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") 70 ) 71 72 // Daemon holds information about the Docker daemon. 73 type Daemon struct { 74 ID string 75 repository string 76 containers container.Store 77 execCommands *exec.Store 78 referenceStore reference.Store 79 downloadManager *xfer.LayerDownloadManager 80 uploadManager *xfer.LayerUploadManager 81 distributionMetadataStore dmetadata.Store 82 trustKey libtrust.PrivateKey 83 idIndex *truncindex.TruncIndex 84 configStore *Config 85 statsCollector *statsCollector 86 defaultLogConfig containertypes.LogConfig 87 RegistryService registry.Service 88 EventsService *events.Events 89 netController libnetwork.NetworkController 90 volumes *store.VolumeStore 91 discoveryWatcher discoveryReloader 92 root string 93 seccompEnabled bool 94 shutdown bool 95 uidMaps []idtools.IDMap 96 gidMaps []idtools.IDMap 97 layerStore layer.Store 98 imageStore image.Store 99 PluginStore *plugin.Store // todo: remove 100 pluginManager *plugin.Manager 101 nameIndex *registrar.Registrar 102 linkIndex *linkIndex 103 containerd libcontainerd.Client 104 containerdRemote libcontainerd.Remote 105 defaultIsolation containertypes.Isolation // Default isolation mode on Windows 106 clusterProvider cluster.Provider 107 cluster Cluster 108 109 seccompProfile []byte 110 seccompProfilePath string 111 } 112 113 // HasExperimental returns whether the experimental features of the daemon are enabled or not 114 func (daemon *Daemon) HasExperimental() bool { 115 if daemon.configStore != nil && daemon.configStore.Experimental { 116 return true 117 } 118 return false 119 } 120 121 func (daemon *Daemon) restore() error { 122 var ( 123 currentDriver = daemon.GraphDriverName() 124 containers = make(map[string]*container.Container) 125 ) 126 127 logrus.Info("Loading containers: start.") 128 129 dir, err := ioutil.ReadDir(daemon.repository) 130 if err != nil { 131 return err 132 } 133 134 for _, v := range dir { 135 id := v.Name() 136 container, err := daemon.load(id) 137 if err != nil { 138 logrus.Errorf("Failed to load container %v: %v", id, err) 139 continue 140 } 141 142 // Ignore the container if it does not support the current driver being used by the graph 143 if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { 144 rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) 145 if err != nil { 146 logrus.Errorf("Failed to load container mount %v: %v", id, err) 147 continue 148 } 149 container.RWLayer = rwlayer 150 logrus.Debugf("Loaded container %v", container.ID) 151 152 containers[container.ID] = container 153 } else { 154 logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) 155 } 156 } 157 158 removeContainers := make(map[string]*container.Container) 159 restartContainers := make(map[*container.Container]chan struct{}) 160 activeSandboxes := make(map[string]interface{}) 161 for id, c := range containers { 162 if err := daemon.registerName(c); err != nil { 163 logrus.Errorf("Failed to register container %s: %s", c.ID, err) 164 delete(containers, id) 165 continue 166 } 167 if err := daemon.Register(c); err != nil { 168 logrus.Errorf("Failed to register container %s: %s", c.ID, err) 169 delete(containers, id) 170 continue 171 } 172 173 // verify that all volumes valid and have been migrated from the pre-1.7 layout 174 if err := daemon.verifyVolumesInfo(c); err != nil { 175 // don't skip the container due to error 176 logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err) 177 } 178 179 // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. 180 // We should rewrite it to use the daemon defaults. 181 // Fixes https://github.com/docker/docker/issues/22536 182 if c.HostConfig.LogConfig.Type == "" { 183 if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { 184 logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) 185 continue 186 } 187 } 188 } 189 190 var migrateLegacyLinks bool // Not relevant on Windows 191 var wg sync.WaitGroup 192 var mapLock sync.Mutex 193 for _, c := range containers { 194 wg.Add(1) 195 go func(c *container.Container) { 196 defer wg.Done() 197 if err := backportMountSpec(c); err != nil { 198 logrus.Error("Failed to migrate old mounts to use new spec format") 199 } 200 201 if c.IsRunning() || c.IsPaused() { 202 c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking 203 if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil { 204 logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err) 205 return 206 } 207 208 // we call Mount and then Unmount to get BaseFs of the container 209 if err := daemon.Mount(c); err != nil { 210 // The mount is unlikely to fail. However, in case mount fails 211 // the container should be allowed to restore here. Some functionalities 212 // (like docker exec -u user) might be missing but container is able to be 213 // stopped/restarted/removed. 214 // See #29365 for related information. 215 // The error is only logged here. 216 logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) 217 } else { 218 // if mount success, then unmount it 219 if err := daemon.Unmount(c); err != nil { 220 logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) 221 } 222 } 223 224 c.ResetRestartManager(false) 225 if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { 226 options, err := daemon.buildSandboxOptions(c) 227 if err != nil { 228 logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) 229 } 230 mapLock.Lock() 231 activeSandboxes[c.NetworkSettings.SandboxID] = options 232 mapLock.Unlock() 233 } 234 235 } 236 // fixme: only if not running 237 // get list of containers we need to restart 238 if !c.IsRunning() && !c.IsPaused() { 239 // Do not autostart containers which 240 // has endpoints in a swarm scope 241 // network yet since the cluster is 242 // not initialized yet. We will start 243 // it after the cluster is 244 // initialized. 245 if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint { 246 mapLock.Lock() 247 restartContainers[c] = make(chan struct{}) 248 mapLock.Unlock() 249 } else if c.HostConfig != nil && c.HostConfig.AutoRemove { 250 mapLock.Lock() 251 removeContainers[c.ID] = c 252 mapLock.Unlock() 253 } 254 } 255 256 if c.RemovalInProgress { 257 // We probably crashed in the middle of a removal, reset 258 // the flag. 259 // 260 // We DO NOT remove the container here as we do not 261 // know if the user had requested for either the 262 // associated volumes, network links or both to also 263 // be removed. So we put the container in the "dead" 264 // state and leave further processing up to them. 265 logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) 266 c.ResetRemovalInProgress() 267 c.SetDead() 268 c.ToDisk() 269 } 270 271 // if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated 272 if c.HostConfig != nil && c.HostConfig.Links == nil { 273 migrateLegacyLinks = true 274 } 275 }(c) 276 } 277 wg.Wait() 278 daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) 279 if err != nil { 280 return fmt.Errorf("Error initializing network controller: %v", err) 281 } 282 283 // Perform migration of legacy sqlite links (no-op on Windows) 284 if migrateLegacyLinks { 285 if err := daemon.sqliteMigration(containers); err != nil { 286 return err 287 } 288 } 289 290 // Now that all the containers are registered, register the links 291 for _, c := range containers { 292 if err := daemon.registerLinks(c, c.HostConfig); err != nil { 293 logrus.Errorf("failed to register link for container %s: %v", c.ID, err) 294 } 295 } 296 297 group := sync.WaitGroup{} 298 for c, notifier := range restartContainers { 299 group.Add(1) 300 301 go func(c *container.Container, chNotify chan struct{}) { 302 defer group.Done() 303 304 logrus.Debugf("Starting container %s", c.ID) 305 306 // ignore errors here as this is a best effort to wait for children to be 307 // running before we try to start the container 308 children := daemon.children(c) 309 timeout := time.After(5 * time.Second) 310 for _, child := range children { 311 if notifier, exists := restartContainers[child]; exists { 312 select { 313 case <-notifier: 314 case <-timeout: 315 } 316 } 317 } 318 319 // Make sure networks are available before starting 320 daemon.waitForNetworks(c) 321 if err := daemon.containerStart(c, "", "", true); err != nil { 322 logrus.Errorf("Failed to start container %s: %s", c.ID, err) 323 } 324 close(chNotify) 325 }(c, notifier) 326 327 } 328 group.Wait() 329 330 removeGroup := sync.WaitGroup{} 331 for id := range removeContainers { 332 removeGroup.Add(1) 333 go func(cid string) { 334 if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { 335 logrus.Errorf("Failed to remove container %s: %s", cid, err) 336 } 337 removeGroup.Done() 338 }(id) 339 } 340 removeGroup.Wait() 341 342 // any containers that were started above would already have had this done, 343 // however we need to now prepare the mountpoints for the rest of the containers as well. 344 // This shouldn't cause any issue running on the containers that already had this run. 345 // This must be run after any containers with a restart policy so that containerized plugins 346 // can have a chance to be running before we try to initialize them. 347 for _, c := range containers { 348 // if the container has restart policy, do not 349 // prepare the mountpoints since it has been done on restarting. 350 // This is to speed up the daemon start when a restart container 351 // has a volume and the volume dirver is not available. 352 if _, ok := restartContainers[c]; ok { 353 continue 354 } else if _, ok := removeContainers[c.ID]; ok { 355 // container is automatically removed, skip it. 356 continue 357 } 358 359 group.Add(1) 360 go func(c *container.Container) { 361 defer group.Done() 362 if err := daemon.prepareMountPoints(c); err != nil { 363 logrus.Error(err) 364 } 365 }(c) 366 } 367 368 group.Wait() 369 370 logrus.Info("Loading containers: done.") 371 372 return nil 373 } 374 375 // RestartSwarmContainers restarts any autostart container which has a 376 // swarm endpoint. 377 func (daemon *Daemon) RestartSwarmContainers() { 378 group := sync.WaitGroup{} 379 for _, c := range daemon.List() { 380 if !c.IsRunning() && !c.IsPaused() { 381 // Autostart all the containers which has a 382 // swarm endpoint now that the cluster is 383 // initialized. 384 if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint { 385 group.Add(1) 386 go func(c *container.Container) { 387 defer group.Done() 388 if err := daemon.containerStart(c, "", "", true); err != nil { 389 logrus.Error(err) 390 } 391 }(c) 392 } 393 } 394 395 } 396 group.Wait() 397 } 398 399 // waitForNetworks is used during daemon initialization when starting up containers 400 // It ensures that all of a container's networks are available before the daemon tries to start the container. 401 // In practice it just makes sure the discovery service is available for containers which use a network that require discovery. 402 func (daemon *Daemon) waitForNetworks(c *container.Container) { 403 if daemon.discoveryWatcher == nil { 404 return 405 } 406 // Make sure if the container has a network that requires discovery that the discovery service is available before starting 407 for netName := range c.NetworkSettings.Networks { 408 // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready 409 // Most likely this is because the K/V store used for discovery is in a container and needs to be started 410 if _, err := daemon.netController.NetworkByName(netName); err != nil { 411 if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { 412 continue 413 } 414 // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host 415 // FIXME: why is this slow??? 416 logrus.Debugf("Container %s waiting for network to be ready", c.Name) 417 select { 418 case <-daemon.discoveryWatcher.ReadyCh(): 419 case <-time.After(60 * time.Second): 420 } 421 return 422 } 423 } 424 } 425 426 func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { 427 return daemon.linkIndex.children(c) 428 } 429 430 // parents returns the names of the parent containers of the container 431 // with the given name. 432 func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { 433 return daemon.linkIndex.parents(c) 434 } 435 436 func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { 437 fullName := path.Join(parent.Name, alias) 438 if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { 439 if err == registrar.ErrNameReserved { 440 logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) 441 return nil 442 } 443 return err 444 } 445 daemon.linkIndex.link(parent, child, fullName) 446 return nil 447 } 448 449 // DaemonJoinsCluster informs the daemon has joined the cluster and provides 450 // the handler to query the cluster component 451 func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) { 452 daemon.setClusterProvider(clusterProvider) 453 } 454 455 // DaemonLeavesCluster informs the daemon has left the cluster 456 func (daemon *Daemon) DaemonLeavesCluster() { 457 // Daemon is in charge of removing the attachable networks with 458 // connected containers when the node leaves the swarm 459 daemon.clearAttachableNetworks() 460 daemon.setClusterProvider(nil) 461 } 462 463 // setClusterProvider sets a component for querying the current cluster state. 464 func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) { 465 daemon.clusterProvider = clusterProvider 466 // call this in a goroutine to allow netcontroller handle this event async 467 // and not block if it is in the middle of talking with cluster 468 go daemon.netController.SetClusterProvider(clusterProvider) 469 } 470 471 // IsSwarmCompatible verifies if the current daemon 472 // configuration is compatible with the swarm mode 473 func (daemon *Daemon) IsSwarmCompatible() error { 474 if daemon.configStore == nil { 475 return nil 476 } 477 return daemon.configStore.isSwarmCompatible() 478 } 479 480 // NewDaemon sets up everything for the daemon to be able to service 481 // requests from the webserver. 482 func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { 483 setDefaultMtu(config) 484 485 // Ensure that we have a correct root key limit for launching containers. 486 if err := ModifyRootKeyLimit(); err != nil { 487 logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) 488 } 489 490 // Ensure we have compatible and valid configuration options 491 if err := verifyDaemonSettings(config); err != nil { 492 return nil, err 493 } 494 495 // Do we have a disabled network? 496 config.DisableBridge = isBridgeNetworkDisabled(config) 497 498 // Verify the platform is supported as a daemon 499 if !platformSupported { 500 return nil, errSystemNotSupported 501 } 502 503 // Validate platform-specific requirements 504 if err := checkSystem(); err != nil { 505 return nil, err 506 } 507 508 uidMaps, gidMaps, err := setupRemappedRoot(config) 509 if err != nil { 510 return nil, err 511 } 512 rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) 513 if err != nil { 514 return nil, err 515 } 516 517 if err := setupDaemonProcess(config); err != nil { 518 return nil, err 519 } 520 521 // set up the tmpDir to use a canonical path 522 tmp, err := tempDir(config.Root, rootUID, rootGID) 523 if err != nil { 524 return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) 525 } 526 realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) 527 if err != nil { 528 return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) 529 } 530 os.Setenv("TMPDIR", realTmp) 531 532 d := &Daemon{configStore: config} 533 // Ensure the daemon is properly shutdown if there is a failure during 534 // initialization 535 defer func() { 536 if err != nil { 537 if err := d.Shutdown(); err != nil { 538 logrus.Error(err) 539 } 540 } 541 }() 542 543 if err := d.setupSeccompProfile(); err != nil { 544 return nil, err 545 } 546 547 // Set the default isolation mode (only applicable on Windows) 548 if err := d.setDefaultIsolation(); err != nil { 549 return nil, fmt.Errorf("error setting default isolation mode: %v", err) 550 } 551 552 logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) 553 554 if err := configureMaxThreads(config); err != nil { 555 logrus.Warnf("Failed to configure golang's threads limit: %v", err) 556 } 557 558 if err := ensureDefaultAppArmorProfile(); err != nil { 559 logrus.Errorf(err.Error()) 560 } 561 562 daemonRepo := filepath.Join(config.Root, "containers") 563 if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { 564 return nil, err 565 } 566 567 if runtime.GOOS == "windows" { 568 if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil && !os.IsExist(err) { 569 return nil, err 570 } 571 } 572 573 driverName := os.Getenv("DOCKER_DRIVER") 574 if driverName == "" { 575 driverName = config.GraphDriver 576 } 577 578 d.RegistryService = registryService 579 d.PluginStore = plugin.NewStore(config.Root) // todo: remove 580 // Plugin system initialization should happen before restore. Do not change order. 581 d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ 582 Root: filepath.Join(config.Root, "plugins"), 583 ExecRoot: "/run/docker/plugins", // possibly needs fixing 584 Store: d.PluginStore, 585 Executor: containerdRemote, 586 RegistryService: registryService, 587 LiveRestoreEnabled: config.LiveRestoreEnabled, 588 LogPluginEvent: d.LogPluginEvent, // todo: make private 589 }) 590 if err != nil { 591 return nil, errors.Wrap(err, "couldn't create plugin manager") 592 } 593 594 d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ 595 StorePath: config.Root, 596 MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), 597 GraphDriver: driverName, 598 GraphDriverOptions: config.GraphOptions, 599 UIDMaps: uidMaps, 600 GIDMaps: gidMaps, 601 PluginGetter: d.PluginStore, 602 ExperimentalEnabled: config.Experimental, 603 }) 604 if err != nil { 605 return nil, err 606 } 607 608 graphDriver := d.layerStore.DriverName() 609 imageRoot := filepath.Join(config.Root, "image", graphDriver) 610 611 // Configure and validate the kernels security support 612 if err := configureKernelSecuritySupport(config, graphDriver); err != nil { 613 return nil, err 614 } 615 616 logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) 617 d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) 618 logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) 619 d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) 620 621 ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) 622 if err != nil { 623 return nil, err 624 } 625 626 d.imageStore, err = image.NewImageStore(ifs, d.layerStore) 627 if err != nil { 628 return nil, err 629 } 630 631 // Configure the volumes driver 632 volStore, err := d.configureVolumes(rootUID, rootGID) 633 if err != nil { 634 return nil, err 635 } 636 637 trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) 638 if err != nil { 639 return nil, err 640 } 641 642 trustDir := filepath.Join(config.Root, "trust") 643 644 if err := system.MkdirAll(trustDir, 0700); err != nil { 645 return nil, err 646 } 647 648 distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) 649 if err != nil { 650 return nil, err 651 } 652 653 eventsService := events.New() 654 655 referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) 656 if err != nil { 657 return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) 658 } 659 660 migrationStart := time.Now() 661 if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { 662 logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) 663 } 664 logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) 665 666 // Discovery is only enabled when the daemon is launched with an address to advertise. When 667 // initialized, the daemon is registered and we can store the discovery backend as its read-only 668 if err := d.initDiscovery(config); err != nil { 669 return nil, err 670 } 671 672 sysInfo := sysinfo.New(false) 673 // Check if Devices cgroup is mounted, it is hard requirement for container security, 674 // on Linux. 675 if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { 676 return nil, fmt.Errorf("Devices cgroup isn't mounted") 677 } 678 679 d.ID = trustKey.PublicKey().KeyID() 680 d.repository = daemonRepo 681 d.containers = container.NewMemoryStore() 682 d.execCommands = exec.NewStore() 683 d.referenceStore = referenceStore 684 d.distributionMetadataStore = distributionMetadataStore 685 d.trustKey = trustKey 686 d.idIndex = truncindex.NewTruncIndex([]string{}) 687 d.statsCollector = d.newStatsCollector(1 * time.Second) 688 d.defaultLogConfig = containertypes.LogConfig{ 689 Type: config.LogConfig.Type, 690 Config: config.LogConfig.Config, 691 } 692 d.EventsService = eventsService 693 d.volumes = volStore 694 d.root = config.Root 695 d.uidMaps = uidMaps 696 d.gidMaps = gidMaps 697 d.seccompEnabled = sysInfo.Seccomp 698 699 d.nameIndex = registrar.NewRegistrar() 700 d.linkIndex = newLinkIndex() 701 d.containerdRemote = containerdRemote 702 703 go d.execCommandGC() 704 705 d.containerd, err = containerdRemote.Client(d) 706 if err != nil { 707 return nil, err 708 } 709 710 if err := d.restore(); err != nil { 711 return nil, err 712 } 713 714 // FIXME: this method never returns an error 715 info, _ := d.SystemInfo() 716 717 engineVersion.WithValues( 718 dockerversion.Version, 719 dockerversion.GitCommit, 720 info.Architecture, 721 info.Driver, 722 info.KernelVersion, 723 info.OperatingSystem, 724 ).Set(1) 725 engineCpus.Set(float64(info.NCPU)) 726 engineMemory.Set(float64(info.MemTotal)) 727 728 // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event 729 // on Windows to dump Go routine stacks 730 stackDumpDir := config.Root 731 if execRoot := config.GetExecRoot(); execRoot != "" { 732 stackDumpDir = execRoot 733 } 734 d.setupDumpStackTrap(stackDumpDir) 735 736 return d, nil 737 } 738 739 func (daemon *Daemon) shutdownContainer(c *container.Container) error { 740 stopTimeout := c.StopTimeout() 741 // TODO(windows): Handle docker restart with paused containers 742 if c.IsPaused() { 743 // To terminate a process in freezer cgroup, we should send 744 // SIGTERM to this process then unfreeze it, and the process will 745 // force to terminate immediately. 746 logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID) 747 sig, ok := signal.SignalMap["TERM"] 748 if !ok { 749 return fmt.Errorf("System does not support SIGTERM") 750 } 751 if err := daemon.kill(c, int(sig)); err != nil { 752 return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) 753 } 754 if err := daemon.containerUnpause(c); err != nil { 755 return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) 756 } 757 if _, err := c.WaitStop(time.Duration(stopTimeout) * time.Second); err != nil { 758 logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout) 759 sig, ok := signal.SignalMap["KILL"] 760 if !ok { 761 return fmt.Errorf("System does not support SIGKILL") 762 } 763 if err := daemon.kill(c, int(sig)); err != nil { 764 logrus.Errorf("Failed to SIGKILL container %s", c.ID) 765 } 766 c.WaitStop(-1 * time.Second) 767 return err 768 } 769 } 770 // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force 771 if err := daemon.containerStop(c, stopTimeout); err != nil { 772 return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) 773 } 774 775 c.WaitStop(-1 * time.Second) 776 return nil 777 } 778 779 // ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers, 780 // and is limited by daemon's ShutdownTimeout. 781 func (daemon *Daemon) ShutdownTimeout() int { 782 // By default we use daemon's ShutdownTimeout. 783 shutdownTimeout := daemon.configStore.ShutdownTimeout 784 785 graceTimeout := 5 786 if daemon.containers != nil { 787 for _, c := range daemon.containers.List() { 788 if shutdownTimeout >= 0 { 789 stopTimeout := c.StopTimeout() 790 if stopTimeout < 0 { 791 shutdownTimeout = -1 792 } else { 793 if stopTimeout+graceTimeout > shutdownTimeout { 794 shutdownTimeout = stopTimeout + graceTimeout 795 } 796 } 797 } 798 } 799 } 800 return shutdownTimeout 801 } 802 803 // Shutdown stops the daemon. 804 func (daemon *Daemon) Shutdown() error { 805 daemon.shutdown = true 806 // Keep mounts and networking running on daemon shutdown if 807 // we are to keep containers running and restore them. 808 809 if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { 810 // check if there are any running containers, if none we should do some cleanup 811 if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { 812 return nil 813 } 814 } 815 816 if daemon.containers != nil { 817 logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout) 818 daemon.containers.ApplyAll(func(c *container.Container) { 819 if !c.IsRunning() { 820 return 821 } 822 logrus.Debugf("stopping %s", c.ID) 823 if err := daemon.shutdownContainer(c); err != nil { 824 logrus.Errorf("Stop container error: %v", err) 825 return 826 } 827 if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { 828 daemon.cleanupMountsByID(mountid) 829 } 830 logrus.Debugf("container stopped %s", c.ID) 831 }) 832 } 833 834 if daemon.volumes != nil { 835 if err := daemon.volumes.Shutdown(); err != nil { 836 logrus.Errorf("Error shutting down volume store: %v", err) 837 } 838 } 839 840 if daemon.layerStore != nil { 841 if err := daemon.layerStore.Cleanup(); err != nil { 842 logrus.Errorf("Error during layer Store.Cleanup(): %v", err) 843 } 844 } 845 846 // Shutdown plugins after containers and layerstore. Don't change the order. 847 daemon.pluginShutdown() 848 849 // trigger libnetwork Stop only if it's initialized 850 if daemon.netController != nil { 851 daemon.netController.Stop() 852 } 853 854 if err := daemon.cleanupMounts(); err != nil { 855 return err 856 } 857 858 return nil 859 } 860 861 // Mount sets container.BaseFS 862 // (is it not set coming in? why is it unset?) 863 func (daemon *Daemon) Mount(container *container.Container) error { 864 dir, err := container.RWLayer.Mount(container.GetMountLabel()) 865 if err != nil { 866 return err 867 } 868 logrus.Debugf("container mounted via layerStore: %v", dir) 869 870 if container.BaseFS != dir { 871 // The mount path reported by the graph driver should always be trusted on Windows, since the 872 // volume path for a given mounted layer may change over time. This should only be an error 873 // on non-Windows operating systems. 874 if container.BaseFS != "" && runtime.GOOS != "windows" { 875 daemon.Unmount(container) 876 return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", 877 daemon.GraphDriverName(), container.ID, container.BaseFS, dir) 878 } 879 } 880 container.BaseFS = dir // TODO: combine these fields 881 return nil 882 } 883 884 // Unmount unsets the container base filesystem 885 func (daemon *Daemon) Unmount(container *container.Container) error { 886 if err := container.RWLayer.Unmount(); err != nil { 887 logrus.Errorf("Error unmounting container %s: %s", container.ID, err) 888 return err 889 } 890 891 return nil 892 } 893 894 // V4Subnets returns the IPv4 subnets of networks that are managed by Docker. 895 func (daemon *Daemon) V4Subnets() []net.IPNet { 896 var subnets []net.IPNet 897 898 managedNetworks := daemon.netController.Networks() 899 900 for _, managedNetwork := range managedNetworks { 901 v4Infos, _ := managedNetwork.Info().IpamInfo() 902 for _, v4Info := range v4Infos { 903 if v4Info.IPAMData.Pool != nil { 904 subnets = append(subnets, *v4Info.IPAMData.Pool) 905 } 906 } 907 } 908 909 return subnets 910 } 911 912 // V6Subnets returns the IPv6 subnets of networks that are managed by Docker. 913 func (daemon *Daemon) V6Subnets() []net.IPNet { 914 var subnets []net.IPNet 915 916 managedNetworks := daemon.netController.Networks() 917 918 for _, managedNetwork := range managedNetworks { 919 _, v6Infos := managedNetwork.Info().IpamInfo() 920 for _, v6Info := range v6Infos { 921 if v6Info.IPAMData.Pool != nil { 922 subnets = append(subnets, *v6Info.IPAMData.Pool) 923 } 924 } 925 } 926 927 return subnets 928 } 929 930 // GraphDriverName returns the name of the graph driver used by the layer.Store 931 func (daemon *Daemon) GraphDriverName() string { 932 return daemon.layerStore.DriverName() 933 } 934 935 // GetUIDGIDMaps returns the current daemon's user namespace settings 936 // for the full uid and gid maps which will be applied to containers 937 // started in this instance. 938 func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { 939 return daemon.uidMaps, daemon.gidMaps 940 } 941 942 // GetRemappedUIDGID returns the current daemon's uid and gid values 943 // if user namespaces are in use for this daemon instance. If not 944 // this function will return "real" root values of 0, 0. 945 func (daemon *Daemon) GetRemappedUIDGID() (int, int) { 946 uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) 947 return uid, gid 948 } 949 950 // tempDir returns the default directory to use for temporary files. 951 func tempDir(rootDir string, rootUID, rootGID int) (string, error) { 952 var tmpDir string 953 if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { 954 tmpDir = filepath.Join(rootDir, "tmp") 955 } 956 return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) 957 } 958 959 func (daemon *Daemon) setupInitLayer(initPath string) error { 960 rootUID, rootGID := daemon.GetRemappedUIDGID() 961 return initlayer.Setup(initPath, rootUID, rootGID) 962 } 963 964 func setDefaultMtu(config *Config) { 965 // do nothing if the config does not have the default 0 value. 966 if config.Mtu != 0 { 967 return 968 } 969 config.Mtu = defaultNetworkMtu 970 } 971 972 func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) { 973 volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID) 974 if err != nil { 975 return nil, err 976 } 977 978 volumedrivers.RegisterPluginGetter(daemon.PluginStore) 979 980 if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { 981 return nil, fmt.Errorf("local volume driver could not be registered") 982 } 983 return store.New(daemon.configStore.Root) 984 } 985 986 // IsShuttingDown tells whether the daemon is shutting down or not 987 func (daemon *Daemon) IsShuttingDown() bool { 988 return daemon.shutdown 989 } 990 991 // initDiscovery initializes the discovery watcher for this daemon. 992 func (daemon *Daemon) initDiscovery(config *Config) error { 993 advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise) 994 if err != nil { 995 if err == errDiscoveryDisabled { 996 return nil 997 } 998 return err 999 } 1000 1001 config.ClusterAdvertise = advertise 1002 discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts) 1003 if err != nil { 1004 return fmt.Errorf("discovery initialization failed (%v)", err) 1005 } 1006 1007 daemon.discoveryWatcher = discoveryWatcher 1008 return nil 1009 } 1010 1011 // Reload reads configuration changes and modifies the 1012 // daemon according to those changes. 1013 // These are the settings that Reload changes: 1014 // - Daemon labels. 1015 // - Daemon debug log level. 1016 // - Daemon insecure registries. 1017 // - Daemon max concurrent downloads 1018 // - Daemon max concurrent uploads 1019 // - Cluster discovery (reconfigure and restart). 1020 // - Daemon live restore 1021 // - Daemon shutdown timeout (in seconds). 1022 func (daemon *Daemon) Reload(config *Config) (err error) { 1023 1024 daemon.configStore.reloadLock.Lock() 1025 1026 attributes := daemon.platformReload(config) 1027 1028 defer func() { 1029 // we're unlocking here, because 1030 // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() 1031 // holds that lock too. 1032 daemon.configStore.reloadLock.Unlock() 1033 if err == nil { 1034 daemon.LogDaemonEventWithAttributes("reload", attributes) 1035 } 1036 }() 1037 1038 if err := daemon.reloadClusterDiscovery(config); err != nil { 1039 return err 1040 } 1041 1042 if config.IsValueSet("labels") { 1043 daemon.configStore.Labels = config.Labels 1044 } 1045 if config.IsValueSet("debug") { 1046 daemon.configStore.Debug = config.Debug 1047 } 1048 if config.IsValueSet("insecure-registries") { 1049 daemon.configStore.InsecureRegistries = config.InsecureRegistries 1050 if err := daemon.RegistryService.LoadInsecureRegistries(config.InsecureRegistries); err != nil { 1051 return err 1052 } 1053 } 1054 if config.IsValueSet("live-restore") { 1055 daemon.configStore.LiveRestoreEnabled = config.LiveRestoreEnabled 1056 if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestoreEnabled)); err != nil { 1057 return err 1058 } 1059 } 1060 1061 // If no value is set for max-concurrent-downloads we assume it is the default value 1062 // We always "reset" as the cost is lightweight and easy to maintain. 1063 if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil { 1064 *daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads 1065 } else { 1066 maxConcurrentDownloads := defaultMaxConcurrentDownloads 1067 daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads 1068 } 1069 logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) 1070 if daemon.downloadManager != nil { 1071 daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) 1072 } 1073 1074 // If no value is set for max-concurrent-upload we assume it is the default value 1075 // We always "reset" as the cost is lightweight and easy to maintain. 1076 if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil { 1077 *daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads 1078 } else { 1079 maxConcurrentUploads := defaultMaxConcurrentUploads 1080 daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads 1081 } 1082 logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) 1083 if daemon.uploadManager != nil { 1084 daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) 1085 } 1086 1087 if config.IsValueSet("shutdown-timeout") { 1088 daemon.configStore.ShutdownTimeout = config.ShutdownTimeout 1089 logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) 1090 } 1091 1092 // We emit daemon reload event here with updatable configurations 1093 attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) 1094 attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) 1095 1096 if daemon.configStore.InsecureRegistries != nil { 1097 insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) 1098 if err != nil { 1099 return err 1100 } 1101 attributes["insecure-registries"] = string(insecureRegistries) 1102 } else { 1103 attributes["insecure-registries"] = "[]" 1104 } 1105 1106 attributes["cluster-store"] = daemon.configStore.ClusterStore 1107 if daemon.configStore.ClusterOpts != nil { 1108 opts, err := json.Marshal(daemon.configStore.ClusterOpts) 1109 if err != nil { 1110 return err 1111 } 1112 attributes["cluster-store-opts"] = string(opts) 1113 } else { 1114 attributes["cluster-store-opts"] = "{}" 1115 } 1116 attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise 1117 1118 if daemon.configStore.Labels != nil { 1119 labels, err := json.Marshal(daemon.configStore.Labels) 1120 if err != nil { 1121 return err 1122 } 1123 attributes["labels"] = string(labels) 1124 } else { 1125 attributes["labels"] = "[]" 1126 } 1127 1128 attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) 1129 attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) 1130 attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) 1131 1132 return nil 1133 } 1134 1135 func (daemon *Daemon) reloadClusterDiscovery(config *Config) error { 1136 var err error 1137 newAdvertise := daemon.configStore.ClusterAdvertise 1138 newClusterStore := daemon.configStore.ClusterStore 1139 if config.IsValueSet("cluster-advertise") { 1140 if config.IsValueSet("cluster-store") { 1141 newClusterStore = config.ClusterStore 1142 } 1143 newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise) 1144 if err != nil && err != errDiscoveryDisabled { 1145 return err 1146 } 1147 } 1148 1149 if daemon.clusterProvider != nil { 1150 if err := config.isSwarmCompatible(); err != nil { 1151 return err 1152 } 1153 } 1154 1155 // check discovery modifications 1156 if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) { 1157 return nil 1158 } 1159 1160 // enable discovery for the first time if it was not previously enabled 1161 if daemon.discoveryWatcher == nil { 1162 discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts) 1163 if err != nil { 1164 return fmt.Errorf("discovery initialization failed (%v)", err) 1165 } 1166 daemon.discoveryWatcher = discoveryWatcher 1167 } else { 1168 if err == errDiscoveryDisabled { 1169 // disable discovery if it was previously enabled and it's disabled now 1170 daemon.discoveryWatcher.Stop() 1171 } else { 1172 // reload discovery 1173 if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil { 1174 return err 1175 } 1176 } 1177 } 1178 1179 daemon.configStore.ClusterStore = newClusterStore 1180 daemon.configStore.ClusterOpts = config.ClusterOpts 1181 daemon.configStore.ClusterAdvertise = newAdvertise 1182 1183 if daemon.netController == nil { 1184 return nil 1185 } 1186 netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) 1187 if err != nil { 1188 logrus.WithError(err).Warnf("failed to get options with network controller") 1189 return nil 1190 } 1191 err = daemon.netController.ReloadConfiguration(netOptions...) 1192 if err != nil { 1193 logrus.Warnf("Failed to reload configuration with network controller: %v", err) 1194 } 1195 1196 return nil 1197 } 1198 1199 func isBridgeNetworkDisabled(config *Config) bool { 1200 return config.bridgeConfig.Iface == disableNetworkBridge 1201 } 1202 1203 func (daemon *Daemon) networkOptions(dconfig *Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { 1204 options := []nwconfig.Option{} 1205 if dconfig == nil { 1206 return options, nil 1207 } 1208 1209 options = append(options, nwconfig.OptionExperimental(dconfig.Experimental)) 1210 options = append(options, nwconfig.OptionDataDir(dconfig.Root)) 1211 options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) 1212 1213 dd := runconfig.DefaultDaemonNetworkMode() 1214 dn := runconfig.DefaultDaemonNetworkMode().NetworkName() 1215 options = append(options, nwconfig.OptionDefaultDriver(string(dd))) 1216 options = append(options, nwconfig.OptionDefaultNetwork(dn)) 1217 1218 if strings.TrimSpace(dconfig.ClusterStore) != "" { 1219 kv := strings.Split(dconfig.ClusterStore, "://") 1220 if len(kv) != 2 { 1221 return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL") 1222 } 1223 options = append(options, nwconfig.OptionKVProvider(kv[0])) 1224 options = append(options, nwconfig.OptionKVProviderURL(kv[1])) 1225 } 1226 if len(dconfig.ClusterOpts) > 0 { 1227 options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) 1228 } 1229 1230 if daemon.discoveryWatcher != nil { 1231 options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) 1232 } 1233 1234 if dconfig.ClusterAdvertise != "" { 1235 options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) 1236 } 1237 1238 options = append(options, nwconfig.OptionLabels(dconfig.Labels)) 1239 options = append(options, driverOptions(dconfig)...) 1240 1241 if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { 1242 options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) 1243 } 1244 1245 if pg != nil { 1246 options = append(options, nwconfig.OptionPluginGetter(pg)) 1247 } 1248 1249 return options, nil 1250 } 1251 1252 func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { 1253 out := make([]types.BlkioStatEntry, len(entries)) 1254 for i, re := range entries { 1255 out[i] = types.BlkioStatEntry{ 1256 Major: re.Major, 1257 Minor: re.Minor, 1258 Op: re.Op, 1259 Value: re.Value, 1260 } 1261 } 1262 return out 1263 } 1264 1265 // GetCluster returns the cluster 1266 func (daemon *Daemon) GetCluster() Cluster { 1267 return daemon.cluster 1268 } 1269 1270 // SetCluster sets the cluster 1271 func (daemon *Daemon) SetCluster(cluster Cluster) { 1272 daemon.cluster = cluster 1273 } 1274 1275 func (daemon *Daemon) pluginShutdown() { 1276 manager := daemon.pluginManager 1277 // Check for a valid manager object. In error conditions, daemon init can fail 1278 // and shutdown called, before plugin manager is initialized. 1279 if manager != nil { 1280 manager.Shutdown() 1281 } 1282 } 1283 1284 // PluginManager returns current pluginManager associated with the daemon 1285 func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method 1286 return daemon.pluginManager 1287 } 1288 1289 // PluginGetter returns current pluginStore associated with the daemon 1290 func (daemon *Daemon) PluginGetter() *plugin.Store { 1291 return daemon.PluginStore 1292 } 1293 1294 // CreateDaemonRoot creates the root for the daemon 1295 func CreateDaemonRoot(config *Config) error { 1296 // get the canonical path to the Docker root directory 1297 var realRoot string 1298 if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { 1299 realRoot = config.Root 1300 } else { 1301 realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) 1302 if err != nil { 1303 return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) 1304 } 1305 } 1306 1307 uidMaps, gidMaps, err := setupRemappedRoot(config) 1308 if err != nil { 1309 return err 1310 } 1311 rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) 1312 if err != nil { 1313 return err 1314 } 1315 1316 if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { 1317 return err 1318 } 1319 1320 return nil 1321 }