github.com/rumpl/bof@v23.0.0-rc.2+incompatible/daemon/graphdriver/windows/windows.go (about) 1 //go:build windows 2 // +build windows 3 4 package windows // import "github.com/docker/docker/daemon/graphdriver/windows" 5 6 import ( 7 "archive/tar" 8 "bufio" 9 "bytes" 10 "encoding/json" 11 "fmt" 12 "io" 13 "os" 14 "path" 15 "path/filepath" 16 "strconv" 17 "strings" 18 "sync" 19 "time" 20 21 winio "github.com/Microsoft/go-winio" 22 "github.com/Microsoft/go-winio/backuptar" 23 winiofs "github.com/Microsoft/go-winio/pkg/fs" 24 "github.com/Microsoft/go-winio/vhd" 25 "github.com/Microsoft/hcsshim" 26 "github.com/Microsoft/hcsshim/osversion" 27 "github.com/docker/docker/daemon/graphdriver" 28 "github.com/docker/docker/pkg/archive" 29 "github.com/docker/docker/pkg/containerfs" 30 "github.com/docker/docker/pkg/idtools" 31 "github.com/docker/docker/pkg/ioutils" 32 "github.com/docker/docker/pkg/longpath" 33 "github.com/docker/docker/pkg/reexec" 34 units "github.com/docker/go-units" 35 "github.com/pkg/errors" 36 "github.com/sirupsen/logrus" 37 "golang.org/x/sys/windows" 38 ) 39 40 const ( 41 // filterDriver is an HCSShim driver type for the Windows Filter driver. 42 filterDriver = 1 43 // For WCOW, the default of 20GB hard-coded in the platform 44 // is too small for builder scenarios where many users are 45 // using RUN or COPY statements to install large amounts of data. 46 // Use 127GB as that's the default size of a VHD in Hyper-V. 47 defaultSandboxSize = "127GB" 48 ) 49 50 var ( 51 // mutatedFiles is a list of files that are mutated by the import process 52 // and must be backed up and restored. 53 mutatedFiles = map[string]string{ 54 "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", 55 "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", 56 "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", 57 "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", 58 } 59 noreexec = false 60 ) 61 62 // init registers the windows graph drivers to the register. 63 func init() { 64 graphdriver.Register("windowsfilter", InitFilter) 65 // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes 66 // debugging issues in the re-exec codepath significantly easier. 67 if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { 68 logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") 69 noreexec = true 70 } else { 71 reexec.Register("docker-windows-write-layer", writeLayerReexec) 72 } 73 } 74 75 type checker struct { 76 } 77 78 func (c *checker) IsMounted(path string) bool { 79 return false 80 } 81 82 type storageOptions struct { 83 size uint64 84 } 85 86 // Driver represents a windows graph driver. 87 type Driver struct { 88 // info stores the shim driver information 89 info hcsshim.DriverInfo 90 ctr *graphdriver.RefCounter 91 // it is safe for windows to use a cache here because it does not support 92 // restoring containers when the daemon dies. 93 cacheMu sync.Mutex 94 cache map[string]string 95 defaultStorageOpts *storageOptions 96 } 97 98 // InitFilter returns a new Windows storage filter driver. 99 func InitFilter(home string, options []string, _ idtools.IdentityMapping) (graphdriver.Driver, error) { 100 logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) 101 102 fsType, err := winiofs.GetFileSystemType(home) 103 if err != nil { 104 return nil, err 105 } 106 if strings.ToLower(fsType) == "refs" { 107 return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) 108 } 109 110 if err := idtools.MkdirAllAndChown(home, 0700, idtools.Identity{UID: 0, GID: 0}); err != nil { 111 return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) 112 } 113 114 storageOpt := make(map[string]string) 115 storageOpt["size"] = defaultSandboxSize 116 117 for _, v := range options { 118 opt := strings.SplitN(v, "=", 2) 119 storageOpt[strings.ToLower(opt[0])] = opt[1] 120 } 121 122 storageOptions, err := parseStorageOpt(storageOpt) 123 if err != nil { 124 return nil, fmt.Errorf("windowsfilter failed to parse default storage options - %s", err) 125 } 126 127 d := &Driver{ 128 info: hcsshim.DriverInfo{ 129 HomeDir: home, 130 Flavour: filterDriver, 131 }, 132 cache: make(map[string]string), 133 ctr: graphdriver.NewRefCounter(&checker{}), 134 defaultStorageOpts: storageOptions, 135 } 136 return d, nil 137 } 138 139 // String returns the string representation of a driver. This should match 140 // the name the graph driver has been registered with. 141 func (d *Driver) String() string { 142 return "windowsfilter" 143 } 144 145 // Status returns the status of the driver. 146 func (d *Driver) Status() [][2]string { 147 return [][2]string{ 148 {"Windows", ""}, 149 } 150 } 151 152 // Exists returns true if the given id is registered with this driver. 153 func (d *Driver) Exists(id string) bool { 154 rID, err := d.resolveID(id) 155 if err != nil { 156 return false 157 } 158 result, err := hcsshim.LayerExists(d.info, rID) 159 if err != nil { 160 return false 161 } 162 return result 163 } 164 165 // CreateReadWrite creates a layer that is writable for use as a container 166 // file system. 167 func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { 168 if opts != nil { 169 return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) 170 } 171 return d.create(id, parent, "", false, nil) 172 } 173 174 // Create creates a new read-only layer with the given id. 175 func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { 176 if opts != nil { 177 return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) 178 } 179 return d.create(id, parent, "", true, nil) 180 } 181 182 func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { 183 rPId, err := d.resolveID(parent) 184 if err != nil { 185 return err 186 } 187 188 parentChain, err := d.getLayerChain(rPId) 189 if err != nil { 190 return err 191 } 192 193 var layerChain []string 194 195 if rPId != "" { 196 parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) 197 if err != nil { 198 return err 199 } 200 if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { 201 // This is a legitimate parent layer (not the empty "-init" layer), 202 // so include it in the layer chain. 203 layerChain = []string{parentPath} 204 } 205 } 206 207 layerChain = append(layerChain, parentChain...) 208 209 if readOnly { 210 if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { 211 return err 212 } 213 } else { 214 var parentPath string 215 if len(layerChain) != 0 { 216 parentPath = layerChain[0] 217 } 218 219 if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { 220 return err 221 } 222 223 storageOptions, err := parseStorageOpt(storageOpt) 224 if err != nil { 225 return fmt.Errorf("Failed to parse storage options - %s", err) 226 } 227 228 sandboxSize := d.defaultStorageOpts.size 229 if storageOptions.size != 0 { 230 sandboxSize = storageOptions.size 231 } 232 233 if sandboxSize != 0 { 234 if err := hcsshim.ExpandSandboxSize(d.info, id, sandboxSize); err != nil { 235 return err 236 } 237 } 238 } 239 240 if _, err := os.Lstat(d.dir(parent)); err != nil { 241 if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { 242 logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) 243 } 244 return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) 245 } 246 247 if err := d.setLayerChain(id, layerChain); err != nil { 248 if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { 249 logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) 250 } 251 return err 252 } 253 254 return nil 255 } 256 257 // dir returns the absolute path to the layer. 258 func (d *Driver) dir(id string) string { 259 return filepath.Join(d.info.HomeDir, filepath.Base(id)) 260 } 261 262 // Remove unmounts and removes the dir information. 263 func (d *Driver) Remove(id string) error { 264 rID, err := d.resolveID(id) 265 if err != nil { 266 return err 267 } 268 269 // This retry loop is due to a bug in Windows (Internal bug #9432268) 270 // if GetContainers fails with ErrVmcomputeOperationInvalidState 271 // it is a transient error. Retry until it succeeds. 272 var computeSystems []hcsshim.ContainerProperties 273 retryCount := 0 274 for { 275 // Get and terminate any template VMs that are currently using the layer. 276 // Note: It is unfortunate that we end up in the graphdrivers Remove() call 277 // for both containers and images, but the logic for template VMs is only 278 // needed for images - specifically we are looking to see if a base layer 279 // is in use by a template VM as a result of having started a Hyper-V 280 // container at some point. 281 // 282 // We have a retry loop for ErrVmcomputeOperationInvalidState and 283 // ErrVmcomputeOperationAccessIsDenied as there is a race condition 284 // in RS1 and RS2 building during enumeration when a silo is going away 285 // for example under it, in HCS. AccessIsDenied added to fix 30278. 286 // 287 // TODO: For RS3, we can remove the retries. Also consider 288 // using platform APIs (if available) to get this more succinctly. Also 289 // consider enhancing the Remove() interface to have context of why 290 // the remove is being called - that could improve efficiency by not 291 // enumerating compute systems during a remove of a container as it's 292 // not required. 293 computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) 294 if err != nil { 295 if osversion.Build() >= osversion.RS3 { 296 return err 297 } 298 if (err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied) { 299 if retryCount >= 500 { 300 break 301 } 302 retryCount++ 303 time.Sleep(10 * time.Millisecond) 304 continue 305 } 306 return err 307 } 308 break 309 } 310 311 for _, computeSystem := range computeSystems { 312 if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { 313 container, err := hcsshim.OpenContainer(computeSystem.ID) 314 if err != nil { 315 return err 316 } 317 err = container.Terminate() 318 if hcsshim.IsPending(err) { 319 err = container.Wait() 320 } else if hcsshim.IsAlreadyStopped(err) { 321 err = nil 322 } 323 324 _ = container.Close() 325 if err != nil { 326 return err 327 } 328 } 329 } 330 331 layerPath := filepath.Join(d.info.HomeDir, rID) 332 tmpID := fmt.Sprintf("%s-removing", rID) 333 tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) 334 if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { 335 if !os.IsPermission(err) { 336 return err 337 } 338 // If permission denied, it's possible that the scratch is still mounted, an 339 // artifact after a hard daemon crash for example. Worth a shot to try detaching it 340 // before retrying the rename. 341 sandbox := filepath.Join(layerPath, "sandbox.vhdx") 342 if _, statErr := os.Stat(sandbox); statErr == nil { 343 if detachErr := vhd.DetachVhd(sandbox); detachErr != nil { 344 return errors.Wrapf(err, "failed to detach VHD: %s", detachErr) 345 } 346 if renameErr := os.Rename(layerPath, tmpLayerPath); renameErr != nil && !os.IsNotExist(renameErr) { 347 return errors.Wrapf(err, "second rename attempt following detach failed: %s", renameErr) 348 } 349 } 350 } 351 if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { 352 logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) 353 } 354 355 return nil 356 } 357 358 // GetLayerPath gets the layer path on host 359 func (d *Driver) GetLayerPath(id string) (string, error) { 360 return d.dir(id), nil 361 } 362 363 // Get returns the rootfs path for the id. This will mount the dir at its given path. 364 func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { 365 logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) 366 var dir string 367 368 rID, err := d.resolveID(id) 369 if err != nil { 370 return nil, err 371 } 372 if count := d.ctr.Increment(rID); count > 1 { 373 return containerfs.NewLocalContainerFS(d.cache[rID]), nil 374 } 375 376 // Getting the layer paths must be done outside of the lock. 377 layerChain, err := d.getLayerChain(rID) 378 if err != nil { 379 d.ctr.Decrement(rID) 380 return nil, err 381 } 382 383 if err := hcsshim.ActivateLayer(d.info, rID); err != nil { 384 d.ctr.Decrement(rID) 385 return nil, err 386 } 387 if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { 388 d.ctr.Decrement(rID) 389 if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { 390 logrus.Warnf("Failed to Deactivate %s: %s", id, err) 391 } 392 return nil, err 393 } 394 395 mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) 396 if err != nil { 397 d.ctr.Decrement(rID) 398 if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { 399 logrus.Warnf("Failed to Unprepare %s: %s", id, err) 400 } 401 if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { 402 logrus.Warnf("Failed to Deactivate %s: %s", id, err) 403 } 404 return nil, err 405 } 406 d.cacheMu.Lock() 407 d.cache[rID] = mountPath 408 d.cacheMu.Unlock() 409 410 // If the layer has a mount path, use that. Otherwise, use the 411 // folder path. 412 if mountPath != "" { 413 dir = mountPath 414 } else { 415 dir = d.dir(id) 416 } 417 418 return containerfs.NewLocalContainerFS(dir), nil 419 } 420 421 // Put adds a new layer to the driver. 422 func (d *Driver) Put(id string) error { 423 logrus.Debugf("WindowsGraphDriver Put() id %s", id) 424 425 rID, err := d.resolveID(id) 426 if err != nil { 427 return err 428 } 429 if count := d.ctr.Decrement(rID); count > 0 { 430 return nil 431 } 432 d.cacheMu.Lock() 433 _, exists := d.cache[rID] 434 delete(d.cache, rID) 435 d.cacheMu.Unlock() 436 437 // If the cache was not populated, then the layer was left unprepared and deactivated 438 if !exists { 439 return nil 440 } 441 442 if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { 443 return err 444 } 445 return hcsshim.DeactivateLayer(d.info, rID) 446 } 447 448 // Cleanup ensures the information the driver stores is properly removed. 449 // We use this opportunity to cleanup any -removing folders which may be 450 // still left if the daemon was killed while it was removing a layer. 451 func (d *Driver) Cleanup() error { 452 items, err := os.ReadDir(d.info.HomeDir) 453 if err != nil { 454 if os.IsNotExist(err) { 455 return nil 456 } 457 return err 458 } 459 460 // Note we don't return an error below - it's possible the files 461 // are locked. However, next time around after the daemon exits, 462 // we likely will be able to cleanup successfully. Instead we log 463 // warnings if there are errors. 464 for _, item := range items { 465 if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { 466 if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { 467 logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) 468 } else { 469 logrus.Infof("Cleaned up %s", item.Name()) 470 } 471 } 472 } 473 474 return nil 475 } 476 477 // Diff produces an archive of the changes between the specified 478 // layer and its parent layer which may be "". 479 // The layer should be mounted when calling this function 480 func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { 481 rID, err := d.resolveID(id) 482 if err != nil { 483 return 484 } 485 486 layerChain, err := d.getLayerChain(rID) 487 if err != nil { 488 return 489 } 490 491 // this is assuming that the layer is unmounted 492 if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { 493 return nil, err 494 } 495 prepare := func() { 496 if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { 497 logrus.Warnf("Failed to Deactivate %s: %s", rID, err) 498 } 499 } 500 501 arch, err := d.exportLayer(rID, layerChain) 502 if err != nil { 503 prepare() 504 return 505 } 506 return ioutils.NewReadCloserWrapper(arch, func() error { 507 err := arch.Close() 508 prepare() 509 return err 510 }), nil 511 } 512 513 // Changes produces a list of changes between the specified layer 514 // and its parent layer. If parent is "", then all changes will be ADD changes. 515 // The layer should not be mounted when calling this function. 516 func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { 517 rID, err := d.resolveID(id) 518 if err != nil { 519 return nil, err 520 } 521 parentChain, err := d.getLayerChain(rID) 522 if err != nil { 523 return nil, err 524 } 525 526 if err := hcsshim.ActivateLayer(d.info, rID); err != nil { 527 return nil, err 528 } 529 defer func() { 530 if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { 531 logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) 532 } 533 }() 534 535 var changes []archive.Change 536 err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { 537 r, err := hcsshim.NewLayerReader(d.info, id, parentChain) 538 if err != nil { 539 return err 540 } 541 defer r.Close() 542 543 for { 544 name, _, fileInfo, err := r.Next() 545 if err == io.EOF { 546 return nil 547 } 548 if err != nil { 549 return err 550 } 551 name = filepath.ToSlash(name) 552 if fileInfo == nil { 553 changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) 554 } else { 555 // Currently there is no way to tell between an add and a modify. 556 changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) 557 } 558 } 559 }) 560 if err != nil { 561 return nil, err 562 } 563 564 return changes, nil 565 } 566 567 // ApplyDiff extracts the changeset from the given diff into the 568 // layer with the specified id and parent, returning the size of the 569 // new layer in bytes. 570 // The layer should not be mounted when calling this function 571 func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { 572 var layerChain []string 573 if parent != "" { 574 rPId, err := d.resolveID(parent) 575 if err != nil { 576 return 0, err 577 } 578 parentChain, err := d.getLayerChain(rPId) 579 if err != nil { 580 return 0, err 581 } 582 parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) 583 if err != nil { 584 return 0, err 585 } 586 layerChain = append(layerChain, parentPath) 587 layerChain = append(layerChain, parentChain...) 588 } 589 590 size, err := d.importLayer(id, diff, layerChain) 591 if err != nil { 592 return 0, err 593 } 594 595 if err = d.setLayerChain(id, layerChain); err != nil { 596 return 0, err 597 } 598 599 return size, nil 600 } 601 602 // DiffSize calculates the changes between the specified layer 603 // and its parent and returns the size in bytes of the changes 604 // relative to its base filesystem directory. 605 func (d *Driver) DiffSize(id, parent string) (size int64, err error) { 606 rPId, err := d.resolveID(parent) 607 if err != nil { 608 return 609 } 610 611 changes, err := d.Changes(id, rPId) 612 if err != nil { 613 return 614 } 615 616 layerFs, err := d.Get(id, "") 617 if err != nil { 618 return 619 } 620 defer d.Put(id) 621 622 return archive.ChangesSize(layerFs.Path(), changes), nil 623 } 624 625 // GetMetadata returns custom driver information. 626 func (d *Driver) GetMetadata(id string) (map[string]string, error) { 627 m := make(map[string]string) 628 m["dir"] = d.dir(id) 629 return m, nil 630 } 631 632 func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { 633 t := tar.NewWriter(w) 634 for { 635 name, size, fileInfo, err := r.Next() 636 if err == io.EOF { 637 break 638 } 639 if err != nil { 640 return err 641 } 642 if fileInfo == nil { 643 // Write a whiteout file. 644 hdr := &tar.Header{ 645 Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), 646 } 647 err := t.WriteHeader(hdr) 648 if err != nil { 649 return err 650 } 651 } else { 652 err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) 653 if err != nil { 654 return err 655 } 656 } 657 } 658 return t.Close() 659 } 660 661 // exportLayer generates an archive from a layer based on the given ID. 662 func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { 663 archive, w := io.Pipe() 664 go func() { 665 err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { 666 r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) 667 if err != nil { 668 return err 669 } 670 671 err = writeTarFromLayer(r, w) 672 cerr := r.Close() 673 if err == nil { 674 err = cerr 675 } 676 return err 677 }) 678 w.CloseWithError(err) 679 }() 680 681 return archive, nil 682 } 683 684 // writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and 685 // writes it to a backup stream, and also saves any files that will be mutated 686 // by the import layer process to a backup location. 687 func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { 688 var bcdBackup *os.File 689 var bcdBackupWriter *winio.BackupFileWriter 690 if backupPath, ok := mutatedFiles[hdr.Name]; ok { 691 bcdBackup, err = os.Create(filepath.Join(root, backupPath)) 692 if err != nil { 693 return nil, err 694 } 695 defer func() { 696 cerr := bcdBackup.Close() 697 if err == nil { 698 err = cerr 699 } 700 }() 701 702 bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) 703 defer func() { 704 cerr := bcdBackupWriter.Close() 705 if err == nil { 706 err = cerr 707 } 708 }() 709 710 buf.Reset(io.MultiWriter(w, bcdBackupWriter)) 711 } else { 712 buf.Reset(w) 713 } 714 715 defer func() { 716 ferr := buf.Flush() 717 if err == nil { 718 err = ferr 719 } 720 }() 721 722 return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) 723 } 724 725 func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { 726 t := tar.NewReader(r) 727 hdr, err := t.Next() 728 totalSize := int64(0) 729 buf := bufio.NewWriter(nil) 730 for err == nil { 731 base := path.Base(hdr.Name) 732 if strings.HasPrefix(base, archive.WhiteoutPrefix) { 733 name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) 734 err = w.Remove(filepath.FromSlash(name)) 735 if err != nil { 736 return 0, err 737 } 738 hdr, err = t.Next() 739 } else if hdr.Typeflag == tar.TypeLink { 740 err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) 741 if err != nil { 742 return 0, err 743 } 744 hdr, err = t.Next() 745 } else { 746 var ( 747 name string 748 size int64 749 fileInfo *winio.FileBasicInfo 750 ) 751 name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) 752 if err != nil { 753 return 0, err 754 } 755 err = w.Add(filepath.FromSlash(name), fileInfo) 756 if err != nil { 757 return 0, err 758 } 759 hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) 760 totalSize += size 761 } 762 } 763 if err != io.EOF { 764 return 0, err 765 } 766 return totalSize, nil 767 } 768 769 // importLayer adds a new layer to the tag and graph store based on the given data. 770 func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { 771 if !noreexec { 772 cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) 773 output := bytes.NewBuffer(nil) 774 cmd.Stdin = layerData 775 cmd.Stdout = output 776 cmd.Stderr = output 777 778 if err = cmd.Start(); err != nil { 779 return 780 } 781 782 if err = cmd.Wait(); err != nil { 783 return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) 784 } 785 786 return strconv.ParseInt(output.String(), 10, 64) 787 } 788 return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) 789 } 790 791 // writeLayerReexec is the re-exec entry point for writing a layer from a tar file 792 func writeLayerReexec() { 793 size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) 794 if err != nil { 795 fmt.Fprint(os.Stderr, err) 796 os.Exit(1) 797 } 798 fmt.Fprint(os.Stdout, size) 799 } 800 801 // writeLayer writes a layer from a tar file. 802 func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (size int64, retErr error) { 803 err := winio.EnableProcessPrivileges([]string{winio.SeSecurityPrivilege, winio.SeBackupPrivilege, winio.SeRestorePrivilege}) 804 if err != nil { 805 return 0, err 806 } 807 if noreexec { 808 defer func() { 809 if err := winio.DisableProcessPrivileges([]string{winio.SeSecurityPrivilege, winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { 810 // This should never happen, but just in case when in debugging mode. 811 // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. 812 panic("Failed to disabled process privileges while in non re-exec mode") 813 } 814 }() 815 } 816 817 info := hcsshim.DriverInfo{ 818 Flavour: filterDriver, 819 HomeDir: home, 820 } 821 822 w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) 823 if err != nil { 824 return 0, err 825 } 826 827 defer func() { 828 if err := w.Close(); err != nil { 829 // This error should not be discarded as a failure here 830 // could result in an invalid layer on disk 831 if retErr == nil { 832 retErr = err 833 } 834 } 835 }() 836 837 return writeLayerFromTar(layerData, w, filepath.Join(home, id)) 838 } 839 840 // resolveID computes the layerID information based on the given id. 841 func (d *Driver) resolveID(id string) (string, error) { 842 content, err := os.ReadFile(filepath.Join(d.dir(id), "layerID")) 843 if os.IsNotExist(err) { 844 return id, nil 845 } else if err != nil { 846 return "", err 847 } 848 return string(content), nil 849 } 850 851 // setID stores the layerId in disk. 852 func (d *Driver) setID(id, altID string) error { 853 return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) 854 } 855 856 // getLayerChain returns the layer chain information. 857 func (d *Driver) getLayerChain(id string) ([]string, error) { 858 jPath := filepath.Join(d.dir(id), "layerchain.json") 859 content, err := os.ReadFile(jPath) 860 if os.IsNotExist(err) { 861 return nil, nil 862 } else if err != nil { 863 return nil, fmt.Errorf("Unable to read layerchain file - %s", err) 864 } 865 866 var layerChain []string 867 err = json.Unmarshal(content, &layerChain) 868 if err != nil { 869 return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) 870 } 871 872 return layerChain, nil 873 } 874 875 // setLayerChain stores the layer chain information in disk. 876 func (d *Driver) setLayerChain(id string, chain []string) error { 877 content, err := json.Marshal(&chain) 878 if err != nil { 879 return fmt.Errorf("Failed to marshall layerchain json - %s", err) 880 } 881 882 jPath := filepath.Join(d.dir(id), "layerchain.json") 883 err = os.WriteFile(jPath, content, 0600) 884 if err != nil { 885 return fmt.Errorf("Unable to write layerchain file - %s", err) 886 } 887 888 return nil 889 } 890 891 type fileGetCloserWithBackupPrivileges struct { 892 path string 893 } 894 895 func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { 896 if backupPath, ok := mutatedFiles[filename]; ok { 897 return os.Open(filepath.Join(fg.path, backupPath)) 898 } 899 900 var f *os.File 901 // Open the file while holding the Windows backup privilege. This ensures that the 902 // file can be opened even if the caller does not actually have access to it according 903 // to the security descriptor. Also use sequential file access to avoid depleting the 904 // standby list - Microsoft VSO Bug Tracker #9900466 905 err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { 906 path := longpath.AddPrefix(filepath.Join(fg.path, filename)) 907 p, err := windows.UTF16FromString(path) 908 if err != nil { 909 return err 910 } 911 const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN 912 h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) 913 if err != nil { 914 return &os.PathError{Op: "open", Path: path, Err: err} 915 } 916 f = os.NewFile(uintptr(h), path) 917 return nil 918 }) 919 return f, err 920 } 921 922 func (fg *fileGetCloserWithBackupPrivileges) Close() error { 923 return nil 924 } 925 926 // DiffGetter returns a FileGetCloser that can read files from the directory that 927 // contains files for the layer differences. Used for direct access for tar-split. 928 func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { 929 id, err := d.resolveID(id) 930 if err != nil { 931 return nil, err 932 } 933 934 return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil 935 } 936 937 func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { 938 options := storageOptions{} 939 940 // Read size to change the block device size per container. 941 for key, val := range storageOpt { 942 key := strings.ToLower(key) 943 switch key { 944 case "size": 945 size, err := units.RAMInBytes(val) 946 if err != nil { 947 return nil, err 948 } 949 options.size = uint64(size) 950 } 951 } 952 return &options, nil 953 }