github.com/gdevillele/moby@v1.13.0/daemon/daemon.go (about) 1 // Package daemon exposes the functions that occur on the host server 2 // that the Docker daemon is running. 3 // 4 // In implementing the various functions of the daemon, there is often 5 // a method-specific struct for configuring the runtime behavior. 6 package daemon 7 8 import ( 9 "encoding/json" 10 "fmt" 11 "io/ioutil" 12 "net" 13 "os" 14 "path" 15 "path/filepath" 16 "runtime" 17 "strings" 18 "sync" 19 "time" 20 21 "github.com/Sirupsen/logrus" 22 containerd "github.com/docker/containerd/api/grpc/types" 23 "github.com/docker/docker/api" 24 "github.com/docker/docker/api/types" 25 containertypes "github.com/docker/docker/api/types/container" 26 "github.com/docker/docker/container" 27 "github.com/docker/docker/daemon/events" 28 "github.com/docker/docker/daemon/exec" 29 "github.com/docker/docker/daemon/initlayer" 30 "github.com/docker/docker/dockerversion" 31 "github.com/docker/docker/plugin" 32 "github.com/docker/libnetwork/cluster" 33 // register graph drivers 34 _ "github.com/docker/docker/daemon/graphdriver/register" 35 dmetadata "github.com/docker/docker/distribution/metadata" 36 "github.com/docker/docker/distribution/xfer" 37 "github.com/docker/docker/image" 38 "github.com/docker/docker/layer" 39 "github.com/docker/docker/libcontainerd" 40 "github.com/docker/docker/migrate/v1" 41 "github.com/docker/docker/pkg/fileutils" 42 "github.com/docker/docker/pkg/idtools" 43 "github.com/docker/docker/pkg/plugingetter" 44 "github.com/docker/docker/pkg/registrar" 45 "github.com/docker/docker/pkg/signal" 46 "github.com/docker/docker/pkg/sysinfo" 47 "github.com/docker/docker/pkg/system" 48 "github.com/docker/docker/pkg/truncindex" 49 "github.com/docker/docker/reference" 50 "github.com/docker/docker/registry" 51 "github.com/docker/docker/runconfig" 52 volumedrivers "github.com/docker/docker/volume/drivers" 53 "github.com/docker/docker/volume/local" 54 "github.com/docker/docker/volume/store" 55 "github.com/docker/libnetwork" 56 nwconfig "github.com/docker/libnetwork/config" 57 "github.com/docker/libtrust" 58 "github.com/pkg/errors" 59 ) 60 61 var ( 62 // DefaultRuntimeBinary is the default runtime to be used by 63 // containerd if none is specified 64 DefaultRuntimeBinary = "docker-runc" 65 66 // DefaultInitBinary is the name of the default init binary 67 DefaultInitBinary = "docker-init" 68 69 errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") 70 ) 71 72 // Daemon holds information about the Docker daemon. 73 type Daemon struct { 74 ID string 75 repository string 76 containers container.Store 77 execCommands *exec.Store 78 referenceStore reference.Store 79 downloadManager *xfer.LayerDownloadManager 80 uploadManager *xfer.LayerUploadManager 81 distributionMetadataStore dmetadata.Store 82 trustKey libtrust.PrivateKey 83 idIndex *truncindex.TruncIndex 84 configStore *Config 85 statsCollector *statsCollector 86 defaultLogConfig containertypes.LogConfig 87 RegistryService registry.Service 88 EventsService *events.Events 89 netController libnetwork.NetworkController 90 volumes *store.VolumeStore 91 discoveryWatcher discoveryReloader 92 root string 93 seccompEnabled bool 94 shutdown bool 95 uidMaps []idtools.IDMap 96 gidMaps []idtools.IDMap 97 layerStore layer.Store 98 imageStore image.Store 99 PluginStore *plugin.Store // todo: remove 100 pluginManager *plugin.Manager 101 nameIndex *registrar.Registrar 102 linkIndex *linkIndex 103 containerd libcontainerd.Client 104 containerdRemote libcontainerd.Remote 105 defaultIsolation containertypes.Isolation // Default isolation mode on Windows 106 clusterProvider cluster.Provider 107 cluster Cluster 108 109 seccompProfile []byte 110 seccompProfilePath string 111 } 112 113 // HasExperimental returns whether the experimental features of the daemon are enabled or not 114 func (daemon *Daemon) HasExperimental() bool { 115 if daemon.configStore != nil && daemon.configStore.Experimental { 116 return true 117 } 118 return false 119 } 120 121 func (daemon *Daemon) restore() error { 122 var ( 123 currentDriver = daemon.GraphDriverName() 124 containers = make(map[string]*container.Container) 125 ) 126 127 logrus.Info("Loading containers: start.") 128 129 dir, err := ioutil.ReadDir(daemon.repository) 130 if err != nil { 131 return err 132 } 133 134 for _, v := range dir { 135 id := v.Name() 136 container, err := daemon.load(id) 137 if err != nil { 138 logrus.Errorf("Failed to load container %v: %v", id, err) 139 continue 140 } 141 142 // Ignore the container if it does not support the current driver being used by the graph 143 if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { 144 rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) 145 if err != nil { 146 logrus.Errorf("Failed to load container mount %v: %v", id, err) 147 continue 148 } 149 container.RWLayer = rwlayer 150 logrus.Debugf("Loaded container %v", container.ID) 151 152 containers[container.ID] = container 153 } else { 154 logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) 155 } 156 } 157 158 removeContainers := make(map[string]*container.Container) 159 restartContainers := make(map[*container.Container]chan struct{}) 160 activeSandboxes := make(map[string]interface{}) 161 for id, c := range containers { 162 if err := daemon.registerName(c); err != nil { 163 logrus.Errorf("Failed to register container %s: %s", c.ID, err) 164 delete(containers, id) 165 continue 166 } 167 if err := daemon.Register(c); err != nil { 168 logrus.Errorf("Failed to register container %s: %s", c.ID, err) 169 delete(containers, id) 170 continue 171 } 172 173 // verify that all volumes valid and have been migrated from the pre-1.7 layout 174 if err := daemon.verifyVolumesInfo(c); err != nil { 175 // don't skip the container due to error 176 logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err) 177 } 178 179 // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. 180 // We should rewrite it to use the daemon defaults. 181 // Fixes https://github.com/docker/docker/issues/22536 182 if c.HostConfig.LogConfig.Type == "" { 183 if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { 184 logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) 185 continue 186 } 187 } 188 } 189 190 var migrateLegacyLinks bool // Not relevant on Windows 191 var wg sync.WaitGroup 192 var mapLock sync.Mutex 193 for _, c := range containers { 194 wg.Add(1) 195 go func(c *container.Container) { 196 defer wg.Done() 197 if err := backportMountSpec(c); err != nil { 198 logrus.Error("Failed to migrate old mounts to use new spec format") 199 } 200 201 if c.IsRunning() || c.IsPaused() { 202 c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking 203 if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil { 204 logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err) 205 return 206 } 207 208 // we call Mount and then Unmount to get BaseFs of the container 209 if err := daemon.Mount(c); err != nil { 210 // The mount is unlikely to fail. However, in case mount fails 211 // the container should be allowed to restore here. Some functionalities 212 // (like docker exec -u user) might be missing but container is able to be 213 // stopped/restarted/removed. 214 // See #29365 for related information. 215 // The error is only logged here. 216 logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) 217 } else { 218 // if mount success, then unmount it 219 if err := daemon.Unmount(c); err != nil { 220 logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) 221 } 222 } 223 224 c.ResetRestartManager(false) 225 if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { 226 options, err := daemon.buildSandboxOptions(c) 227 if err != nil { 228 logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) 229 } 230 mapLock.Lock() 231 activeSandboxes[c.NetworkSettings.SandboxID] = options 232 mapLock.Unlock() 233 } 234 235 } 236 // fixme: only if not running 237 // get list of containers we need to restart 238 if !c.IsRunning() && !c.IsPaused() { 239 // Do not autostart containers which 240 // has endpoints in a swarm scope 241 // network yet since the cluster is 242 // not initialized yet. We will start 243 // it after the cluster is 244 // initialized. 245 if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint { 246 mapLock.Lock() 247 restartContainers[c] = make(chan struct{}) 248 mapLock.Unlock() 249 } else if c.HostConfig != nil && c.HostConfig.AutoRemove { 250 mapLock.Lock() 251 removeContainers[c.ID] = c 252 mapLock.Unlock() 253 } 254 } 255 256 if c.RemovalInProgress { 257 // We probably crashed in the middle of a removal, reset 258 // the flag. 259 // 260 // We DO NOT remove the container here as we do not 261 // know if the user had requested for either the 262 // associated volumes, network links or both to also 263 // be removed. So we put the container in the "dead" 264 // state and leave further processing up to them. 265 logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) 266 c.ResetRemovalInProgress() 267 c.SetDead() 268 c.ToDisk() 269 } 270 271 // if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated 272 if c.HostConfig != nil && c.HostConfig.Links == nil { 273 migrateLegacyLinks = true 274 } 275 }(c) 276 } 277 wg.Wait() 278 daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) 279 if err != nil { 280 return fmt.Errorf("Error initializing network controller: %v", err) 281 } 282 283 // Perform migration of legacy sqlite links (no-op on Windows) 284 if migrateLegacyLinks { 285 if err := daemon.sqliteMigration(containers); err != nil { 286 return err 287 } 288 } 289 290 // Now that all the containers are registered, register the links 291 for _, c := range containers { 292 if err := daemon.registerLinks(c, c.HostConfig); err != nil { 293 logrus.Errorf("failed to register link for container %s: %v", c.ID, err) 294 } 295 } 296 297 group := sync.WaitGroup{} 298 for c, notifier := range restartContainers { 299 group.Add(1) 300 301 go func(c *container.Container, chNotify chan struct{}) { 302 defer group.Done() 303 304 logrus.Debugf("Starting container %s", c.ID) 305 306 // ignore errors here as this is a best effort to wait for children to be 307 // running before we try to start the container 308 children := daemon.children(c) 309 timeout := time.After(5 * time.Second) 310 for _, child := range children { 311 if notifier, exists := restartContainers[child]; exists { 312 select { 313 case <-notifier: 314 case <-timeout: 315 } 316 } 317 } 318 319 // Make sure networks are available before starting 320 daemon.waitForNetworks(c) 321 if err := daemon.containerStart(c, "", "", true); err != nil { 322 logrus.Errorf("Failed to start container %s: %s", c.ID, err) 323 } 324 close(chNotify) 325 }(c, notifier) 326 327 } 328 group.Wait() 329 330 removeGroup := sync.WaitGroup{} 331 for id := range removeContainers { 332 removeGroup.Add(1) 333 go func(cid string) { 334 if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { 335 logrus.Errorf("Failed to remove container %s: %s", cid, err) 336 } 337 removeGroup.Done() 338 }(id) 339 } 340 removeGroup.Wait() 341 342 // any containers that were started above would already have had this done, 343 // however we need to now prepare the mountpoints for the rest of the containers as well. 344 // This shouldn't cause any issue running on the containers that already had this run. 345 // This must be run after any containers with a restart policy so that containerized plugins 346 // can have a chance to be running before we try to initialize them. 347 for _, c := range containers { 348 // if the container has restart policy, do not 349 // prepare the mountpoints since it has been done on restarting. 350 // This is to speed up the daemon start when a restart container 351 // has a volume and the volume dirver is not available. 352 if _, ok := restartContainers[c]; ok { 353 continue 354 } else if _, ok := removeContainers[c.ID]; ok { 355 // container is automatically removed, skip it. 356 continue 357 } 358 359 group.Add(1) 360 go func(c *container.Container) { 361 defer group.Done() 362 if err := daemon.prepareMountPoints(c); err != nil { 363 logrus.Error(err) 364 } 365 }(c) 366 } 367 368 group.Wait() 369 370 logrus.Info("Loading containers: done.") 371 372 return nil 373 } 374 375 // RestartSwarmContainers restarts any autostart container which has a 376 // swarm endpoint. 377 func (daemon *Daemon) RestartSwarmContainers() { 378 group := sync.WaitGroup{} 379 for _, c := range daemon.List() { 380 if !c.IsRunning() && !c.IsPaused() { 381 // Autostart all the containers which has a 382 // swarm endpoint now that the cluster is 383 // initialized. 384 if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint { 385 group.Add(1) 386 go func(c *container.Container) { 387 defer group.Done() 388 if err := daemon.containerStart(c, "", "", true); err != nil { 389 logrus.Error(err) 390 } 391 }(c) 392 } 393 } 394 395 } 396 group.Wait() 397 } 398 399 // waitForNetworks is used during daemon initialization when starting up containers 400 // It ensures that all of a container's networks are available before the daemon tries to start the container. 401 // In practice it just makes sure the discovery service is available for containers which use a network that require discovery. 402 func (daemon *Daemon) waitForNetworks(c *container.Container) { 403 if daemon.discoveryWatcher == nil { 404 return 405 } 406 // Make sure if the container has a network that requires discovery that the discovery service is available before starting 407 for netName := range c.NetworkSettings.Networks { 408 // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready 409 // Most likely this is because the K/V store used for discovery is in a container and needs to be started 410 if _, err := daemon.netController.NetworkByName(netName); err != nil { 411 if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { 412 continue 413 } 414 // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host 415 // FIXME: why is this slow??? 416 logrus.Debugf("Container %s waiting for network to be ready", c.Name) 417 select { 418 case <-daemon.discoveryWatcher.ReadyCh(): 419 case <-time.After(60 * time.Second): 420 } 421 return 422 } 423 } 424 } 425 426 func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { 427 return daemon.linkIndex.children(c) 428 } 429 430 // parents returns the names of the parent containers of the container 431 // with the given name. 432 func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { 433 return daemon.linkIndex.parents(c) 434 } 435 436 func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { 437 fullName := path.Join(parent.Name, alias) 438 if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { 439 if err == registrar.ErrNameReserved { 440 logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) 441 return nil 442 } 443 return err 444 } 445 daemon.linkIndex.link(parent, child, fullName) 446 return nil 447 } 448 449 // SetClusterProvider sets a component for querying the current cluster state. 450 func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) { 451 daemon.clusterProvider = clusterProvider 452 daemon.netController.SetClusterProvider(clusterProvider) 453 } 454 455 // IsSwarmCompatible verifies if the current daemon 456 // configuration is compatible with the swarm mode 457 func (daemon *Daemon) IsSwarmCompatible() error { 458 if daemon.configStore == nil { 459 return nil 460 } 461 return daemon.configStore.isSwarmCompatible() 462 } 463 464 // NewDaemon sets up everything for the daemon to be able to service 465 // requests from the webserver. 466 func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { 467 setDefaultMtu(config) 468 469 // Ensure that we have a correct root key limit for launching containers. 470 if err := ModifyRootKeyLimit(); err != nil { 471 logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) 472 } 473 474 // Ensure we have compatible and valid configuration options 475 if err := verifyDaemonSettings(config); err != nil { 476 return nil, err 477 } 478 479 // Do we have a disabled network? 480 config.DisableBridge = isBridgeNetworkDisabled(config) 481 482 // Verify the platform is supported as a daemon 483 if !platformSupported { 484 return nil, errSystemNotSupported 485 } 486 487 // Validate platform-specific requirements 488 if err := checkSystem(); err != nil { 489 return nil, err 490 } 491 492 uidMaps, gidMaps, err := setupRemappedRoot(config) 493 if err != nil { 494 return nil, err 495 } 496 rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) 497 if err != nil { 498 return nil, err 499 } 500 501 if err := setupDaemonProcess(config); err != nil { 502 return nil, err 503 } 504 505 // set up the tmpDir to use a canonical path 506 tmp, err := tempDir(config.Root, rootUID, rootGID) 507 if err != nil { 508 return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) 509 } 510 realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) 511 if err != nil { 512 return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) 513 } 514 os.Setenv("TMPDIR", realTmp) 515 516 d := &Daemon{configStore: config} 517 // Ensure the daemon is properly shutdown if there is a failure during 518 // initialization 519 defer func() { 520 if err != nil { 521 if err := d.Shutdown(); err != nil { 522 logrus.Error(err) 523 } 524 } 525 }() 526 527 if err := d.setupSeccompProfile(); err != nil { 528 return nil, err 529 } 530 531 // Set the default isolation mode (only applicable on Windows) 532 if err := d.setDefaultIsolation(); err != nil { 533 return nil, fmt.Errorf("error setting default isolation mode: %v", err) 534 } 535 536 logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) 537 538 if err := configureMaxThreads(config); err != nil { 539 logrus.Warnf("Failed to configure golang's threads limit: %v", err) 540 } 541 542 if err := ensureDefaultAppArmorProfile(); err != nil { 543 logrus.Errorf(err.Error()) 544 } 545 546 daemonRepo := filepath.Join(config.Root, "containers") 547 if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { 548 return nil, err 549 } 550 551 if runtime.GOOS == "windows" { 552 if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil && !os.IsExist(err) { 553 return nil, err 554 } 555 } 556 557 driverName := os.Getenv("DOCKER_DRIVER") 558 if driverName == "" { 559 driverName = config.GraphDriver 560 } 561 562 d.RegistryService = registryService 563 d.PluginStore = plugin.NewStore(config.Root) // todo: remove 564 // Plugin system initialization should happen before restore. Do not change order. 565 d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ 566 Root: filepath.Join(config.Root, "plugins"), 567 ExecRoot: "/run/docker/plugins", // possibly needs fixing 568 Store: d.PluginStore, 569 Executor: containerdRemote, 570 RegistryService: registryService, 571 LiveRestoreEnabled: config.LiveRestoreEnabled, 572 LogPluginEvent: d.LogPluginEvent, // todo: make private 573 }) 574 if err != nil { 575 return nil, errors.Wrap(err, "couldn't create plugin manager") 576 } 577 578 d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ 579 StorePath: config.Root, 580 MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), 581 GraphDriver: driverName, 582 GraphDriverOptions: config.GraphOptions, 583 UIDMaps: uidMaps, 584 GIDMaps: gidMaps, 585 PluginGetter: d.PluginStore, 586 ExperimentalEnabled: config.Experimental, 587 }) 588 if err != nil { 589 return nil, err 590 } 591 592 graphDriver := d.layerStore.DriverName() 593 imageRoot := filepath.Join(config.Root, "image", graphDriver) 594 595 // Configure and validate the kernels security support 596 if err := configureKernelSecuritySupport(config, graphDriver); err != nil { 597 return nil, err 598 } 599 600 logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) 601 d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) 602 logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) 603 d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) 604 605 ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) 606 if err != nil { 607 return nil, err 608 } 609 610 d.imageStore, err = image.NewImageStore(ifs, d.layerStore) 611 if err != nil { 612 return nil, err 613 } 614 615 // Configure the volumes driver 616 volStore, err := d.configureVolumes(rootUID, rootGID) 617 if err != nil { 618 return nil, err 619 } 620 621 trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) 622 if err != nil { 623 return nil, err 624 } 625 626 trustDir := filepath.Join(config.Root, "trust") 627 628 if err := system.MkdirAll(trustDir, 0700); err != nil { 629 return nil, err 630 } 631 632 distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) 633 if err != nil { 634 return nil, err 635 } 636 637 eventsService := events.New() 638 639 referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) 640 if err != nil { 641 return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) 642 } 643 644 migrationStart := time.Now() 645 if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { 646 logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) 647 } 648 logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) 649 650 // Discovery is only enabled when the daemon is launched with an address to advertise. When 651 // initialized, the daemon is registered and we can store the discovery backend as its read-only 652 if err := d.initDiscovery(config); err != nil { 653 return nil, err 654 } 655 656 sysInfo := sysinfo.New(false) 657 // Check if Devices cgroup is mounted, it is hard requirement for container security, 658 // on Linux. 659 if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { 660 return nil, fmt.Errorf("Devices cgroup isn't mounted") 661 } 662 663 d.ID = trustKey.PublicKey().KeyID() 664 d.repository = daemonRepo 665 d.containers = container.NewMemoryStore() 666 d.execCommands = exec.NewStore() 667 d.referenceStore = referenceStore 668 d.distributionMetadataStore = distributionMetadataStore 669 d.trustKey = trustKey 670 d.idIndex = truncindex.NewTruncIndex([]string{}) 671 d.statsCollector = d.newStatsCollector(1 * time.Second) 672 d.defaultLogConfig = containertypes.LogConfig{ 673 Type: config.LogConfig.Type, 674 Config: config.LogConfig.Config, 675 } 676 d.EventsService = eventsService 677 d.volumes = volStore 678 d.root = config.Root 679 d.uidMaps = uidMaps 680 d.gidMaps = gidMaps 681 d.seccompEnabled = sysInfo.Seccomp 682 683 d.nameIndex = registrar.NewRegistrar() 684 d.linkIndex = newLinkIndex() 685 d.containerdRemote = containerdRemote 686 687 go d.execCommandGC() 688 689 d.containerd, err = containerdRemote.Client(d) 690 if err != nil { 691 return nil, err 692 } 693 694 if err := d.restore(); err != nil { 695 return nil, err 696 } 697 698 // FIXME: this method never returns an error 699 info, _ := d.SystemInfo() 700 701 engineVersion.WithValues( 702 dockerversion.Version, 703 dockerversion.GitCommit, 704 info.Architecture, 705 info.Driver, 706 info.KernelVersion, 707 info.OperatingSystem, 708 ).Set(1) 709 engineCpus.Set(float64(info.NCPU)) 710 engineMemory.Set(float64(info.MemTotal)) 711 712 // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event 713 // on Windows to dump Go routine stacks 714 stackDumpDir := config.Root 715 if execRoot := config.GetExecRoot(); execRoot != "" { 716 stackDumpDir = execRoot 717 } 718 d.setupDumpStackTrap(stackDumpDir) 719 720 return d, nil 721 } 722 723 func (daemon *Daemon) shutdownContainer(c *container.Container) error { 724 stopTimeout := c.StopTimeout() 725 // TODO(windows): Handle docker restart with paused containers 726 if c.IsPaused() { 727 // To terminate a process in freezer cgroup, we should send 728 // SIGTERM to this process then unfreeze it, and the process will 729 // force to terminate immediately. 730 logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID) 731 sig, ok := signal.SignalMap["TERM"] 732 if !ok { 733 return fmt.Errorf("System does not support SIGTERM") 734 } 735 if err := daemon.kill(c, int(sig)); err != nil { 736 return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) 737 } 738 if err := daemon.containerUnpause(c); err != nil { 739 return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) 740 } 741 if _, err := c.WaitStop(time.Duration(stopTimeout) * time.Second); err != nil { 742 logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout) 743 sig, ok := signal.SignalMap["KILL"] 744 if !ok { 745 return fmt.Errorf("System does not support SIGKILL") 746 } 747 if err := daemon.kill(c, int(sig)); err != nil { 748 logrus.Errorf("Failed to SIGKILL container %s", c.ID) 749 } 750 c.WaitStop(-1 * time.Second) 751 return err 752 } 753 } 754 // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force 755 if err := daemon.containerStop(c, stopTimeout); err != nil { 756 return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) 757 } 758 759 c.WaitStop(-1 * time.Second) 760 return nil 761 } 762 763 // ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers, 764 // and is limited by daemon's ShutdownTimeout. 765 func (daemon *Daemon) ShutdownTimeout() int { 766 // By default we use daemon's ShutdownTimeout. 767 shutdownTimeout := daemon.configStore.ShutdownTimeout 768 769 graceTimeout := 5 770 if daemon.containers != nil { 771 for _, c := range daemon.containers.List() { 772 if shutdownTimeout >= 0 { 773 stopTimeout := c.StopTimeout() 774 if stopTimeout < 0 { 775 shutdownTimeout = -1 776 } else { 777 if stopTimeout+graceTimeout > shutdownTimeout { 778 shutdownTimeout = stopTimeout + graceTimeout 779 } 780 } 781 } 782 } 783 } 784 return shutdownTimeout 785 } 786 787 // Shutdown stops the daemon. 788 func (daemon *Daemon) Shutdown() error { 789 daemon.shutdown = true 790 // Keep mounts and networking running on daemon shutdown if 791 // we are to keep containers running and restore them. 792 793 if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { 794 // check if there are any running containers, if none we should do some cleanup 795 if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { 796 return nil 797 } 798 } 799 800 if daemon.containers != nil { 801 logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout) 802 daemon.containers.ApplyAll(func(c *container.Container) { 803 if !c.IsRunning() { 804 return 805 } 806 logrus.Debugf("stopping %s", c.ID) 807 if err := daemon.shutdownContainer(c); err != nil { 808 logrus.Errorf("Stop container error: %v", err) 809 return 810 } 811 if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { 812 daemon.cleanupMountsByID(mountid) 813 } 814 logrus.Debugf("container stopped %s", c.ID) 815 }) 816 } 817 818 if daemon.volumes != nil { 819 if err := daemon.volumes.Shutdown(); err != nil { 820 logrus.Errorf("Error shutting down volume store: %v", err) 821 } 822 } 823 824 if daemon.layerStore != nil { 825 if err := daemon.layerStore.Cleanup(); err != nil { 826 logrus.Errorf("Error during layer Store.Cleanup(): %v", err) 827 } 828 } 829 830 // Shutdown plugins after containers and layerstore. Don't change the order. 831 daemon.pluginShutdown() 832 833 // trigger libnetwork Stop only if it's initialized 834 if daemon.netController != nil { 835 daemon.netController.Stop() 836 } 837 838 if err := daemon.cleanupMounts(); err != nil { 839 return err 840 } 841 842 return nil 843 } 844 845 // Mount sets container.BaseFS 846 // (is it not set coming in? why is it unset?) 847 func (daemon *Daemon) Mount(container *container.Container) error { 848 dir, err := container.RWLayer.Mount(container.GetMountLabel()) 849 if err != nil { 850 return err 851 } 852 logrus.Debugf("container mounted via layerStore: %v", dir) 853 854 if container.BaseFS != dir { 855 // The mount path reported by the graph driver should always be trusted on Windows, since the 856 // volume path for a given mounted layer may change over time. This should only be an error 857 // on non-Windows operating systems. 858 if container.BaseFS != "" && runtime.GOOS != "windows" { 859 daemon.Unmount(container) 860 return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", 861 daemon.GraphDriverName(), container.ID, container.BaseFS, dir) 862 } 863 } 864 container.BaseFS = dir // TODO: combine these fields 865 return nil 866 } 867 868 // Unmount unsets the container base filesystem 869 func (daemon *Daemon) Unmount(container *container.Container) error { 870 if err := container.RWLayer.Unmount(); err != nil { 871 logrus.Errorf("Error unmounting container %s: %s", container.ID, err) 872 return err 873 } 874 875 return nil 876 } 877 878 // V4Subnets returns the IPv4 subnets of networks that are managed by Docker. 879 func (daemon *Daemon) V4Subnets() []net.IPNet { 880 var subnets []net.IPNet 881 882 managedNetworks := daemon.netController.Networks() 883 884 for _, managedNetwork := range managedNetworks { 885 v4Infos, _ := managedNetwork.Info().IpamInfo() 886 for _, v4Info := range v4Infos { 887 if v4Info.IPAMData.Pool != nil { 888 subnets = append(subnets, *v4Info.IPAMData.Pool) 889 } 890 } 891 } 892 893 return subnets 894 } 895 896 // V6Subnets returns the IPv6 subnets of networks that are managed by Docker. 897 func (daemon *Daemon) V6Subnets() []net.IPNet { 898 var subnets []net.IPNet 899 900 managedNetworks := daemon.netController.Networks() 901 902 for _, managedNetwork := range managedNetworks { 903 _, v6Infos := managedNetwork.Info().IpamInfo() 904 for _, v6Info := range v6Infos { 905 if v6Info.IPAMData.Pool != nil { 906 subnets = append(subnets, *v6Info.IPAMData.Pool) 907 } 908 } 909 } 910 911 return subnets 912 } 913 914 // GraphDriverName returns the name of the graph driver used by the layer.Store 915 func (daemon *Daemon) GraphDriverName() string { 916 return daemon.layerStore.DriverName() 917 } 918 919 // GetUIDGIDMaps returns the current daemon's user namespace settings 920 // for the full uid and gid maps which will be applied to containers 921 // started in this instance. 922 func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { 923 return daemon.uidMaps, daemon.gidMaps 924 } 925 926 // GetRemappedUIDGID returns the current daemon's uid and gid values 927 // if user namespaces are in use for this daemon instance. If not 928 // this function will return "real" root values of 0, 0. 929 func (daemon *Daemon) GetRemappedUIDGID() (int, int) { 930 uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) 931 return uid, gid 932 } 933 934 // tempDir returns the default directory to use for temporary files. 935 func tempDir(rootDir string, rootUID, rootGID int) (string, error) { 936 var tmpDir string 937 if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { 938 tmpDir = filepath.Join(rootDir, "tmp") 939 } 940 return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) 941 } 942 943 func (daemon *Daemon) setupInitLayer(initPath string) error { 944 rootUID, rootGID := daemon.GetRemappedUIDGID() 945 return initlayer.Setup(initPath, rootUID, rootGID) 946 } 947 948 func setDefaultMtu(config *Config) { 949 // do nothing if the config does not have the default 0 value. 950 if config.Mtu != 0 { 951 return 952 } 953 config.Mtu = defaultNetworkMtu 954 } 955 956 func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) { 957 volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID) 958 if err != nil { 959 return nil, err 960 } 961 962 volumedrivers.RegisterPluginGetter(daemon.PluginStore) 963 964 if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { 965 return nil, fmt.Errorf("local volume driver could not be registered") 966 } 967 return store.New(daemon.configStore.Root) 968 } 969 970 // IsShuttingDown tells whether the daemon is shutting down or not 971 func (daemon *Daemon) IsShuttingDown() bool { 972 return daemon.shutdown 973 } 974 975 // initDiscovery initializes the discovery watcher for this daemon. 976 func (daemon *Daemon) initDiscovery(config *Config) error { 977 advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise) 978 if err != nil { 979 if err == errDiscoveryDisabled { 980 return nil 981 } 982 return err 983 } 984 985 config.ClusterAdvertise = advertise 986 discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts) 987 if err != nil { 988 return fmt.Errorf("discovery initialization failed (%v)", err) 989 } 990 991 daemon.discoveryWatcher = discoveryWatcher 992 return nil 993 } 994 995 // Reload reads configuration changes and modifies the 996 // daemon according to those changes. 997 // These are the settings that Reload changes: 998 // - Daemon labels. 999 // - Daemon debug log level. 1000 // - Daemon insecure registries. 1001 // - Daemon max concurrent downloads 1002 // - Daemon max concurrent uploads 1003 // - Cluster discovery (reconfigure and restart). 1004 // - Daemon live restore 1005 // - Daemon shutdown timeout (in seconds). 1006 func (daemon *Daemon) Reload(config *Config) (err error) { 1007 1008 daemon.configStore.reloadLock.Lock() 1009 1010 attributes := daemon.platformReload(config) 1011 1012 defer func() { 1013 // we're unlocking here, because 1014 // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() 1015 // holds that lock too. 1016 daemon.configStore.reloadLock.Unlock() 1017 if err == nil { 1018 daemon.LogDaemonEventWithAttributes("reload", attributes) 1019 } 1020 }() 1021 1022 if err := daemon.reloadClusterDiscovery(config); err != nil { 1023 return err 1024 } 1025 1026 if config.IsValueSet("labels") { 1027 daemon.configStore.Labels = config.Labels 1028 } 1029 if config.IsValueSet("debug") { 1030 daemon.configStore.Debug = config.Debug 1031 } 1032 if config.IsValueSet("insecure-registries") { 1033 daemon.configStore.InsecureRegistries = config.InsecureRegistries 1034 if err := daemon.RegistryService.LoadInsecureRegistries(config.InsecureRegistries); err != nil { 1035 return err 1036 } 1037 } 1038 if config.IsValueSet("live-restore") { 1039 daemon.configStore.LiveRestoreEnabled = config.LiveRestoreEnabled 1040 if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestoreEnabled)); err != nil { 1041 return err 1042 } 1043 } 1044 1045 // If no value is set for max-concurrent-downloads we assume it is the default value 1046 // We always "reset" as the cost is lightweight and easy to maintain. 1047 if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil { 1048 *daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads 1049 } else { 1050 maxConcurrentDownloads := defaultMaxConcurrentDownloads 1051 daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads 1052 } 1053 logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) 1054 if daemon.downloadManager != nil { 1055 daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) 1056 } 1057 1058 // If no value is set for max-concurrent-upload we assume it is the default value 1059 // We always "reset" as the cost is lightweight and easy to maintain. 1060 if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil { 1061 *daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads 1062 } else { 1063 maxConcurrentUploads := defaultMaxConcurrentUploads 1064 daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads 1065 } 1066 logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) 1067 if daemon.uploadManager != nil { 1068 daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) 1069 } 1070 1071 if config.IsValueSet("shutdown-timeout") { 1072 daemon.configStore.ShutdownTimeout = config.ShutdownTimeout 1073 logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) 1074 } 1075 1076 // We emit daemon reload event here with updatable configurations 1077 attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) 1078 attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) 1079 1080 if daemon.configStore.InsecureRegistries != nil { 1081 insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) 1082 if err != nil { 1083 return err 1084 } 1085 attributes["insecure-registries"] = string(insecureRegistries) 1086 } else { 1087 attributes["insecure-registries"] = "[]" 1088 } 1089 1090 attributes["cluster-store"] = daemon.configStore.ClusterStore 1091 if daemon.configStore.ClusterOpts != nil { 1092 opts, err := json.Marshal(daemon.configStore.ClusterOpts) 1093 if err != nil { 1094 return err 1095 } 1096 attributes["cluster-store-opts"] = string(opts) 1097 } else { 1098 attributes["cluster-store-opts"] = "{}" 1099 } 1100 attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise 1101 1102 if daemon.configStore.Labels != nil { 1103 labels, err := json.Marshal(daemon.configStore.Labels) 1104 if err != nil { 1105 return err 1106 } 1107 attributes["labels"] = string(labels) 1108 } else { 1109 attributes["labels"] = "[]" 1110 } 1111 1112 attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) 1113 attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) 1114 attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) 1115 1116 return nil 1117 } 1118 1119 func (daemon *Daemon) reloadClusterDiscovery(config *Config) error { 1120 var err error 1121 newAdvertise := daemon.configStore.ClusterAdvertise 1122 newClusterStore := daemon.configStore.ClusterStore 1123 if config.IsValueSet("cluster-advertise") { 1124 if config.IsValueSet("cluster-store") { 1125 newClusterStore = config.ClusterStore 1126 } 1127 newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise) 1128 if err != nil && err != errDiscoveryDisabled { 1129 return err 1130 } 1131 } 1132 1133 if daemon.clusterProvider != nil { 1134 if err := config.isSwarmCompatible(); err != nil { 1135 return err 1136 } 1137 } 1138 1139 // check discovery modifications 1140 if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) { 1141 return nil 1142 } 1143 1144 // enable discovery for the first time if it was not previously enabled 1145 if daemon.discoveryWatcher == nil { 1146 discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts) 1147 if err != nil { 1148 return fmt.Errorf("discovery initialization failed (%v)", err) 1149 } 1150 daemon.discoveryWatcher = discoveryWatcher 1151 } else { 1152 if err == errDiscoveryDisabled { 1153 // disable discovery if it was previously enabled and it's disabled now 1154 daemon.discoveryWatcher.Stop() 1155 } else { 1156 // reload discovery 1157 if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil { 1158 return err 1159 } 1160 } 1161 } 1162 1163 daemon.configStore.ClusterStore = newClusterStore 1164 daemon.configStore.ClusterOpts = config.ClusterOpts 1165 daemon.configStore.ClusterAdvertise = newAdvertise 1166 1167 if daemon.netController == nil { 1168 return nil 1169 } 1170 netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) 1171 if err != nil { 1172 logrus.WithError(err).Warnf("failed to get options with network controller") 1173 return nil 1174 } 1175 err = daemon.netController.ReloadConfiguration(netOptions...) 1176 if err != nil { 1177 logrus.Warnf("Failed to reload configuration with network controller: %v", err) 1178 } 1179 1180 return nil 1181 } 1182 1183 func isBridgeNetworkDisabled(config *Config) bool { 1184 return config.bridgeConfig.Iface == disableNetworkBridge 1185 } 1186 1187 func (daemon *Daemon) networkOptions(dconfig *Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { 1188 options := []nwconfig.Option{} 1189 if dconfig == nil { 1190 return options, nil 1191 } 1192 1193 options = append(options, nwconfig.OptionExperimental(dconfig.Experimental)) 1194 options = append(options, nwconfig.OptionDataDir(dconfig.Root)) 1195 options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) 1196 1197 dd := runconfig.DefaultDaemonNetworkMode() 1198 dn := runconfig.DefaultDaemonNetworkMode().NetworkName() 1199 options = append(options, nwconfig.OptionDefaultDriver(string(dd))) 1200 options = append(options, nwconfig.OptionDefaultNetwork(dn)) 1201 1202 if strings.TrimSpace(dconfig.ClusterStore) != "" { 1203 kv := strings.Split(dconfig.ClusterStore, "://") 1204 if len(kv) != 2 { 1205 return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL") 1206 } 1207 options = append(options, nwconfig.OptionKVProvider(kv[0])) 1208 options = append(options, nwconfig.OptionKVProviderURL(kv[1])) 1209 } 1210 if len(dconfig.ClusterOpts) > 0 { 1211 options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) 1212 } 1213 1214 if daemon.discoveryWatcher != nil { 1215 options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) 1216 } 1217 1218 if dconfig.ClusterAdvertise != "" { 1219 options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) 1220 } 1221 1222 options = append(options, nwconfig.OptionLabels(dconfig.Labels)) 1223 options = append(options, driverOptions(dconfig)...) 1224 1225 if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { 1226 options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) 1227 } 1228 1229 if pg != nil { 1230 options = append(options, nwconfig.OptionPluginGetter(pg)) 1231 } 1232 1233 return options, nil 1234 } 1235 1236 func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { 1237 out := make([]types.BlkioStatEntry, len(entries)) 1238 for i, re := range entries { 1239 out[i] = types.BlkioStatEntry{ 1240 Major: re.Major, 1241 Minor: re.Minor, 1242 Op: re.Op, 1243 Value: re.Value, 1244 } 1245 } 1246 return out 1247 } 1248 1249 // GetCluster returns the cluster 1250 func (daemon *Daemon) GetCluster() Cluster { 1251 return daemon.cluster 1252 } 1253 1254 // SetCluster sets the cluster 1255 func (daemon *Daemon) SetCluster(cluster Cluster) { 1256 daemon.cluster = cluster 1257 } 1258 1259 func (daemon *Daemon) pluginShutdown() { 1260 manager := daemon.pluginManager 1261 // Check for a valid manager object. In error conditions, daemon init can fail 1262 // and shutdown called, before plugin manager is initialized. 1263 if manager != nil { 1264 manager.Shutdown() 1265 } 1266 } 1267 1268 // PluginManager returns current pluginManager associated with the daemon 1269 func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method 1270 return daemon.pluginManager 1271 } 1272 1273 // CreateDaemonRoot creates the root for the daemon 1274 func CreateDaemonRoot(config *Config) error { 1275 // get the canonical path to the Docker root directory 1276 var realRoot string 1277 if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { 1278 realRoot = config.Root 1279 } else { 1280 realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) 1281 if err != nil { 1282 return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) 1283 } 1284 } 1285 1286 uidMaps, gidMaps, err := setupRemappedRoot(config) 1287 if err != nil { 1288 return err 1289 } 1290 rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) 1291 if err != nil { 1292 return err 1293 } 1294 1295 if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { 1296 return err 1297 } 1298 1299 return nil 1300 }