github.com/Heebron/moby@v0.0.0-20221111184709-6eab4f55faf7/daemon/graphdriver/windows/windows.go (about) 1 //go:build windows 2 // +build windows 3 4 package windows // import "github.com/docker/docker/daemon/graphdriver/windows" 5 6 import ( 7 "archive/tar" 8 "bufio" 9 "bytes" 10 "encoding/json" 11 "fmt" 12 "io" 13 "os" 14 "path" 15 "path/filepath" 16 "strconv" 17 "strings" 18 "sync" 19 "time" 20 21 winio "github.com/Microsoft/go-winio" 22 "github.com/Microsoft/go-winio/backuptar" 23 winiofs "github.com/Microsoft/go-winio/pkg/fs" 24 "github.com/Microsoft/go-winio/vhd" 25 "github.com/Microsoft/hcsshim" 26 "github.com/Microsoft/hcsshim/osversion" 27 "github.com/docker/docker/daemon/graphdriver" 28 "github.com/docker/docker/pkg/archive" 29 "github.com/docker/docker/pkg/idtools" 30 "github.com/docker/docker/pkg/ioutils" 31 "github.com/docker/docker/pkg/longpath" 32 "github.com/docker/docker/pkg/reexec" 33 units "github.com/docker/go-units" 34 "github.com/pkg/errors" 35 "github.com/sirupsen/logrus" 36 "golang.org/x/sys/windows" 37 ) 38 39 const ( 40 // filterDriver is an HCSShim driver type for the Windows Filter driver. 41 filterDriver = 1 42 // For WCOW, the default of 20GB hard-coded in the platform 43 // is too small for builder scenarios where many users are 44 // using RUN or COPY statements to install large amounts of data. 45 // Use 127GB as that's the default size of a VHD in Hyper-V. 46 defaultSandboxSize = "127GB" 47 ) 48 49 var ( 50 // mutatedFiles is a list of files that are mutated by the import process 51 // and must be backed up and restored. 52 mutatedFiles = map[string]string{ 53 "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", 54 "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", 55 "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", 56 "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", 57 } 58 noreexec = false 59 ) 60 61 // init registers the windows graph drivers to the register. 62 func init() { 63 graphdriver.Register("windowsfilter", InitFilter) 64 // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes 65 // debugging issues in the re-exec codepath significantly easier. 66 if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { 67 logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") 68 noreexec = true 69 } else { 70 reexec.Register("docker-windows-write-layer", writeLayerReexec) 71 } 72 } 73 74 type checker struct { 75 } 76 77 func (c *checker) IsMounted(path string) bool { 78 return false 79 } 80 81 type storageOptions struct { 82 size uint64 83 } 84 85 // Driver represents a windows graph driver. 86 type Driver struct { 87 // info stores the shim driver information 88 info hcsshim.DriverInfo 89 ctr *graphdriver.RefCounter 90 // it is safe for windows to use a cache here because it does not support 91 // restoring containers when the daemon dies. 92 cacheMu sync.Mutex 93 cache map[string]string 94 defaultStorageOpts *storageOptions 95 } 96 97 // InitFilter returns a new Windows storage filter driver. 98 func InitFilter(home string, options []string, _ idtools.IdentityMapping) (graphdriver.Driver, error) { 99 logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) 100 101 fsType, err := winiofs.GetFileSystemType(home) 102 if err != nil { 103 return nil, err 104 } 105 if strings.ToLower(fsType) == "refs" { 106 return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) 107 } 108 109 if err := idtools.MkdirAllAndChown(home, 0700, idtools.Identity{UID: 0, GID: 0}); err != nil { 110 return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) 111 } 112 113 storageOpt := make(map[string]string) 114 storageOpt["size"] = defaultSandboxSize 115 116 for _, v := range options { 117 opt := strings.SplitN(v, "=", 2) 118 storageOpt[strings.ToLower(opt[0])] = opt[1] 119 } 120 121 storageOptions, err := parseStorageOpt(storageOpt) 122 if err != nil { 123 return nil, fmt.Errorf("windowsfilter failed to parse default storage options - %s", err) 124 } 125 126 d := &Driver{ 127 info: hcsshim.DriverInfo{ 128 HomeDir: home, 129 Flavour: filterDriver, 130 }, 131 cache: make(map[string]string), 132 ctr: graphdriver.NewRefCounter(&checker{}), 133 defaultStorageOpts: storageOptions, 134 } 135 return d, nil 136 } 137 138 // String returns the string representation of a driver. This should match 139 // the name the graph driver has been registered with. 140 func (d *Driver) String() string { 141 return "windowsfilter" 142 } 143 144 // Status returns the status of the driver. 145 func (d *Driver) Status() [][2]string { 146 return [][2]string{ 147 {"Windows", ""}, 148 } 149 } 150 151 // Exists returns true if the given id is registered with this driver. 152 func (d *Driver) Exists(id string) bool { 153 rID, err := d.resolveID(id) 154 if err != nil { 155 return false 156 } 157 result, err := hcsshim.LayerExists(d.info, rID) 158 if err != nil { 159 return false 160 } 161 return result 162 } 163 164 // CreateReadWrite creates a layer that is writable for use as a container 165 // file system. 166 func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { 167 if opts != nil { 168 return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) 169 } 170 return d.create(id, parent, "", false, nil) 171 } 172 173 // Create creates a new read-only layer with the given id. 174 func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { 175 if opts != nil { 176 return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) 177 } 178 return d.create(id, parent, "", true, nil) 179 } 180 181 func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { 182 rPId, err := d.resolveID(parent) 183 if err != nil { 184 return err 185 } 186 187 parentChain, err := d.getLayerChain(rPId) 188 if err != nil { 189 return err 190 } 191 192 var layerChain []string 193 194 if rPId != "" { 195 parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) 196 if err != nil { 197 return err 198 } 199 if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { 200 // This is a legitimate parent layer (not the empty "-init" layer), 201 // so include it in the layer chain. 202 layerChain = []string{parentPath} 203 } 204 } 205 206 layerChain = append(layerChain, parentChain...) 207 208 if readOnly { 209 if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { 210 return err 211 } 212 } else { 213 var parentPath string 214 if len(layerChain) != 0 { 215 parentPath = layerChain[0] 216 } 217 218 if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { 219 return err 220 } 221 222 storageOptions, err := parseStorageOpt(storageOpt) 223 if err != nil { 224 return fmt.Errorf("Failed to parse storage options - %s", err) 225 } 226 227 sandboxSize := d.defaultStorageOpts.size 228 if storageOptions.size != 0 { 229 sandboxSize = storageOptions.size 230 } 231 232 if sandboxSize != 0 { 233 if err := hcsshim.ExpandSandboxSize(d.info, id, sandboxSize); err != nil { 234 return err 235 } 236 } 237 } 238 239 if _, err := os.Lstat(d.dir(parent)); err != nil { 240 if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { 241 logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) 242 } 243 return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) 244 } 245 246 if err := d.setLayerChain(id, layerChain); err != nil { 247 if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { 248 logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) 249 } 250 return err 251 } 252 253 return nil 254 } 255 256 // dir returns the absolute path to the layer. 257 func (d *Driver) dir(id string) string { 258 return filepath.Join(d.info.HomeDir, filepath.Base(id)) 259 } 260 261 // Remove unmounts and removes the dir information. 262 func (d *Driver) Remove(id string) error { 263 rID, err := d.resolveID(id) 264 if err != nil { 265 return err 266 } 267 268 // This retry loop is due to a bug in Windows (Internal bug #9432268) 269 // if GetContainers fails with ErrVmcomputeOperationInvalidState 270 // it is a transient error. Retry until it succeeds. 271 var computeSystems []hcsshim.ContainerProperties 272 retryCount := 0 273 for { 274 // Get and terminate any template VMs that are currently using the layer. 275 // Note: It is unfortunate that we end up in the graphdrivers Remove() call 276 // for both containers and images, but the logic for template VMs is only 277 // needed for images - specifically we are looking to see if a base layer 278 // is in use by a template VM as a result of having started a Hyper-V 279 // container at some point. 280 // 281 // We have a retry loop for ErrVmcomputeOperationInvalidState and 282 // ErrVmcomputeOperationAccessIsDenied as there is a race condition 283 // in RS1 and RS2 building during enumeration when a silo is going away 284 // for example under it, in HCS. AccessIsDenied added to fix 30278. 285 // 286 // TODO: For RS3, we can remove the retries. Also consider 287 // using platform APIs (if available) to get this more succinctly. Also 288 // consider enhancing the Remove() interface to have context of why 289 // the remove is being called - that could improve efficiency by not 290 // enumerating compute systems during a remove of a container as it's 291 // not required. 292 computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) 293 if err != nil { 294 if osversion.Build() >= osversion.RS3 { 295 return err 296 } 297 if (err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied) { 298 if retryCount >= 500 { 299 break 300 } 301 retryCount++ 302 time.Sleep(10 * time.Millisecond) 303 continue 304 } 305 return err 306 } 307 break 308 } 309 310 for _, computeSystem := range computeSystems { 311 if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { 312 container, err := hcsshim.OpenContainer(computeSystem.ID) 313 if err != nil { 314 return err 315 } 316 err = container.Terminate() 317 if hcsshim.IsPending(err) { 318 err = container.Wait() 319 } else if hcsshim.IsAlreadyStopped(err) { 320 err = nil 321 } 322 323 _ = container.Close() 324 if err != nil { 325 return err 326 } 327 } 328 } 329 330 layerPath := filepath.Join(d.info.HomeDir, rID) 331 tmpID := fmt.Sprintf("%s-removing", rID) 332 tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) 333 if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { 334 if !os.IsPermission(err) { 335 return err 336 } 337 // If permission denied, it's possible that the scratch is still mounted, an 338 // artifact after a hard daemon crash for example. Worth a shot to try detaching it 339 // before retrying the rename. 340 sandbox := filepath.Join(layerPath, "sandbox.vhdx") 341 if _, statErr := os.Stat(sandbox); statErr == nil { 342 if detachErr := vhd.DetachVhd(sandbox); detachErr != nil { 343 return errors.Wrapf(err, "failed to detach VHD: %s", detachErr) 344 } 345 if renameErr := os.Rename(layerPath, tmpLayerPath); renameErr != nil && !os.IsNotExist(renameErr) { 346 return errors.Wrapf(err, "second rename attempt following detach failed: %s", renameErr) 347 } 348 } 349 } 350 if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { 351 logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) 352 } 353 354 return nil 355 } 356 357 // GetLayerPath gets the layer path on host 358 func (d *Driver) GetLayerPath(id string) (string, error) { 359 return d.dir(id), nil 360 } 361 362 // Get returns the rootfs path for the id. This will mount the dir at its given path. 363 func (d *Driver) Get(id, mountLabel string) (string, error) { 364 logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) 365 var dir string 366 367 rID, err := d.resolveID(id) 368 if err != nil { 369 return "", err 370 } 371 if count := d.ctr.Increment(rID); count > 1 { 372 return d.cache[rID], nil 373 } 374 375 // Getting the layer paths must be done outside of the lock. 376 layerChain, err := d.getLayerChain(rID) 377 if err != nil { 378 d.ctr.Decrement(rID) 379 return "", err 380 } 381 382 if err := hcsshim.ActivateLayer(d.info, rID); err != nil { 383 d.ctr.Decrement(rID) 384 return "", err 385 } 386 if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { 387 d.ctr.Decrement(rID) 388 if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { 389 logrus.Warnf("Failed to Deactivate %s: %s", id, err) 390 } 391 return "", err 392 } 393 394 mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) 395 if err != nil { 396 d.ctr.Decrement(rID) 397 if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { 398 logrus.Warnf("Failed to Unprepare %s: %s", id, err) 399 } 400 if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { 401 logrus.Warnf("Failed to Deactivate %s: %s", id, err) 402 } 403 return "", err 404 } 405 d.cacheMu.Lock() 406 d.cache[rID] = mountPath 407 d.cacheMu.Unlock() 408 409 // If the layer has a mount path, use that. Otherwise, use the 410 // folder path. 411 if mountPath != "" { 412 dir = mountPath 413 } else { 414 dir = d.dir(id) 415 } 416 417 return dir, nil 418 } 419 420 // Put adds a new layer to the driver. 421 func (d *Driver) Put(id string) error { 422 logrus.Debugf("WindowsGraphDriver Put() id %s", id) 423 424 rID, err := d.resolveID(id) 425 if err != nil { 426 return err 427 } 428 if count := d.ctr.Decrement(rID); count > 0 { 429 return nil 430 } 431 d.cacheMu.Lock() 432 _, exists := d.cache[rID] 433 delete(d.cache, rID) 434 d.cacheMu.Unlock() 435 436 // If the cache was not populated, then the layer was left unprepared and deactivated 437 if !exists { 438 return nil 439 } 440 441 if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { 442 return err 443 } 444 return hcsshim.DeactivateLayer(d.info, rID) 445 } 446 447 // Cleanup ensures the information the driver stores is properly removed. 448 // We use this opportunity to cleanup any -removing folders which may be 449 // still left if the daemon was killed while it was removing a layer. 450 func (d *Driver) Cleanup() error { 451 items, err := os.ReadDir(d.info.HomeDir) 452 if err != nil { 453 if os.IsNotExist(err) { 454 return nil 455 } 456 return err 457 } 458 459 // Note we don't return an error below - it's possible the files 460 // are locked. However, next time around after the daemon exits, 461 // we likely will be able to cleanup successfully. Instead we log 462 // warnings if there are errors. 463 for _, item := range items { 464 if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { 465 if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { 466 logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) 467 } else { 468 logrus.Infof("Cleaned up %s", item.Name()) 469 } 470 } 471 } 472 473 return nil 474 } 475 476 // Diff produces an archive of the changes between the specified 477 // layer and its parent layer which may be "". 478 // The layer should be mounted when calling this function 479 func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { 480 rID, err := d.resolveID(id) 481 if err != nil { 482 return 483 } 484 485 layerChain, err := d.getLayerChain(rID) 486 if err != nil { 487 return 488 } 489 490 // this is assuming that the layer is unmounted 491 if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { 492 return nil, err 493 } 494 prepare := func() { 495 if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { 496 logrus.Warnf("Failed to Deactivate %s: %s", rID, err) 497 } 498 } 499 500 arch, err := d.exportLayer(rID, layerChain) 501 if err != nil { 502 prepare() 503 return 504 } 505 return ioutils.NewReadCloserWrapper(arch, func() error { 506 err := arch.Close() 507 prepare() 508 return err 509 }), nil 510 } 511 512 // Changes produces a list of changes between the specified layer 513 // and its parent layer. If parent is "", then all changes will be ADD changes. 514 // The layer should not be mounted when calling this function. 515 func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { 516 rID, err := d.resolveID(id) 517 if err != nil { 518 return nil, err 519 } 520 parentChain, err := d.getLayerChain(rID) 521 if err != nil { 522 return nil, err 523 } 524 525 if err := hcsshim.ActivateLayer(d.info, rID); err != nil { 526 return nil, err 527 } 528 defer func() { 529 if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { 530 logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) 531 } 532 }() 533 534 var changes []archive.Change 535 err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { 536 r, err := hcsshim.NewLayerReader(d.info, id, parentChain) 537 if err != nil { 538 return err 539 } 540 defer r.Close() 541 542 for { 543 name, _, fileInfo, err := r.Next() 544 if err == io.EOF { 545 return nil 546 } 547 if err != nil { 548 return err 549 } 550 name = filepath.ToSlash(name) 551 if fileInfo == nil { 552 changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) 553 } else { 554 // Currently there is no way to tell between an add and a modify. 555 changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) 556 } 557 } 558 }) 559 if err != nil { 560 return nil, err 561 } 562 563 return changes, nil 564 } 565 566 // ApplyDiff extracts the changeset from the given diff into the 567 // layer with the specified id and parent, returning the size of the 568 // new layer in bytes. 569 // The layer should not be mounted when calling this function 570 func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { 571 var layerChain []string 572 if parent != "" { 573 rPId, err := d.resolveID(parent) 574 if err != nil { 575 return 0, err 576 } 577 parentChain, err := d.getLayerChain(rPId) 578 if err != nil { 579 return 0, err 580 } 581 parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) 582 if err != nil { 583 return 0, err 584 } 585 layerChain = append(layerChain, parentPath) 586 layerChain = append(layerChain, parentChain...) 587 } 588 589 size, err := d.importLayer(id, diff, layerChain) 590 if err != nil { 591 return 0, err 592 } 593 594 if err = d.setLayerChain(id, layerChain); err != nil { 595 return 0, err 596 } 597 598 return size, nil 599 } 600 601 // DiffSize calculates the changes between the specified layer 602 // and its parent and returns the size in bytes of the changes 603 // relative to its base filesystem directory. 604 func (d *Driver) DiffSize(id, parent string) (size int64, err error) { 605 rPId, err := d.resolveID(parent) 606 if err != nil { 607 return 608 } 609 610 changes, err := d.Changes(id, rPId) 611 if err != nil { 612 return 613 } 614 615 layerFs, err := d.Get(id, "") 616 if err != nil { 617 return 618 } 619 defer d.Put(id) 620 621 return archive.ChangesSize(layerFs, changes), nil 622 } 623 624 // GetMetadata returns custom driver information. 625 func (d *Driver) GetMetadata(id string) (map[string]string, error) { 626 m := make(map[string]string) 627 m["dir"] = d.dir(id) 628 return m, nil 629 } 630 631 func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { 632 t := tar.NewWriter(w) 633 for { 634 name, size, fileInfo, err := r.Next() 635 if err == io.EOF { 636 break 637 } 638 if err != nil { 639 return err 640 } 641 if fileInfo == nil { 642 // Write a whiteout file. 643 hdr := &tar.Header{ 644 Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), 645 } 646 err := t.WriteHeader(hdr) 647 if err != nil { 648 return err 649 } 650 } else { 651 err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) 652 if err != nil { 653 return err 654 } 655 } 656 } 657 return t.Close() 658 } 659 660 // exportLayer generates an archive from a layer based on the given ID. 661 func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { 662 archive, w := io.Pipe() 663 go func() { 664 err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { 665 r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) 666 if err != nil { 667 return err 668 } 669 670 err = writeTarFromLayer(r, w) 671 cerr := r.Close() 672 if err == nil { 673 err = cerr 674 } 675 return err 676 }) 677 w.CloseWithError(err) 678 }() 679 680 return archive, nil 681 } 682 683 // writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and 684 // writes it to a backup stream, and also saves any files that will be mutated 685 // by the import layer process to a backup location. 686 func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { 687 var bcdBackup *os.File 688 var bcdBackupWriter *winio.BackupFileWriter 689 if backupPath, ok := mutatedFiles[hdr.Name]; ok { 690 bcdBackup, err = os.Create(filepath.Join(root, backupPath)) 691 if err != nil { 692 return nil, err 693 } 694 defer func() { 695 cerr := bcdBackup.Close() 696 if err == nil { 697 err = cerr 698 } 699 }() 700 701 bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) 702 defer func() { 703 cerr := bcdBackupWriter.Close() 704 if err == nil { 705 err = cerr 706 } 707 }() 708 709 buf.Reset(io.MultiWriter(w, bcdBackupWriter)) 710 } else { 711 buf.Reset(w) 712 } 713 714 defer func() { 715 ferr := buf.Flush() 716 if err == nil { 717 err = ferr 718 } 719 }() 720 721 return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) 722 } 723 724 func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { 725 t := tar.NewReader(r) 726 hdr, err := t.Next() 727 totalSize := int64(0) 728 buf := bufio.NewWriter(nil) 729 for err == nil { 730 base := path.Base(hdr.Name) 731 if strings.HasPrefix(base, archive.WhiteoutPrefix) { 732 name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) 733 err = w.Remove(filepath.FromSlash(name)) 734 if err != nil { 735 return 0, err 736 } 737 hdr, err = t.Next() 738 } else if hdr.Typeflag == tar.TypeLink { 739 err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) 740 if err != nil { 741 return 0, err 742 } 743 hdr, err = t.Next() 744 } else { 745 var ( 746 name string 747 size int64 748 fileInfo *winio.FileBasicInfo 749 ) 750 name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) 751 if err != nil { 752 return 0, err 753 } 754 err = w.Add(filepath.FromSlash(name), fileInfo) 755 if err != nil { 756 return 0, err 757 } 758 hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) 759 totalSize += size 760 } 761 } 762 if err != io.EOF { 763 return 0, err 764 } 765 return totalSize, nil 766 } 767 768 // importLayer adds a new layer to the tag and graph store based on the given data. 769 func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { 770 if !noreexec { 771 cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) 772 output := bytes.NewBuffer(nil) 773 cmd.Stdin = layerData 774 cmd.Stdout = output 775 cmd.Stderr = output 776 777 if err = cmd.Start(); err != nil { 778 return 779 } 780 781 if err = cmd.Wait(); err != nil { 782 return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) 783 } 784 785 return strconv.ParseInt(output.String(), 10, 64) 786 } 787 return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) 788 } 789 790 // writeLayerReexec is the re-exec entry point for writing a layer from a tar file 791 func writeLayerReexec() { 792 size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) 793 if err != nil { 794 fmt.Fprint(os.Stderr, err) 795 os.Exit(1) 796 } 797 fmt.Fprint(os.Stdout, size) 798 } 799 800 // writeLayer writes a layer from a tar file. 801 func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (size int64, retErr error) { 802 err := winio.EnableProcessPrivileges([]string{winio.SeSecurityPrivilege, winio.SeBackupPrivilege, winio.SeRestorePrivilege}) 803 if err != nil { 804 return 0, err 805 } 806 if noreexec { 807 defer func() { 808 if err := winio.DisableProcessPrivileges([]string{winio.SeSecurityPrivilege, winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { 809 // This should never happen, but just in case when in debugging mode. 810 // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. 811 panic("Failed to disabled process privileges while in non re-exec mode") 812 } 813 }() 814 } 815 816 info := hcsshim.DriverInfo{ 817 Flavour: filterDriver, 818 HomeDir: home, 819 } 820 821 w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) 822 if err != nil { 823 return 0, err 824 } 825 826 defer func() { 827 if err := w.Close(); err != nil { 828 // This error should not be discarded as a failure here 829 // could result in an invalid layer on disk 830 if retErr == nil { 831 retErr = err 832 } 833 } 834 }() 835 836 return writeLayerFromTar(layerData, w, filepath.Join(home, id)) 837 } 838 839 // resolveID computes the layerID information based on the given id. 840 func (d *Driver) resolveID(id string) (string, error) { 841 content, err := os.ReadFile(filepath.Join(d.dir(id), "layerID")) 842 if os.IsNotExist(err) { 843 return id, nil 844 } else if err != nil { 845 return "", err 846 } 847 return string(content), nil 848 } 849 850 // setID stores the layerId in disk. 851 func (d *Driver) setID(id, altID string) error { 852 return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) 853 } 854 855 // getLayerChain returns the layer chain information. 856 func (d *Driver) getLayerChain(id string) ([]string, error) { 857 jPath := filepath.Join(d.dir(id), "layerchain.json") 858 content, err := os.ReadFile(jPath) 859 if os.IsNotExist(err) { 860 return nil, nil 861 } else if err != nil { 862 return nil, fmt.Errorf("Unable to read layerchain file - %s", err) 863 } 864 865 var layerChain []string 866 err = json.Unmarshal(content, &layerChain) 867 if err != nil { 868 return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) 869 } 870 871 return layerChain, nil 872 } 873 874 // setLayerChain stores the layer chain information in disk. 875 func (d *Driver) setLayerChain(id string, chain []string) error { 876 content, err := json.Marshal(&chain) 877 if err != nil { 878 return fmt.Errorf("Failed to marshall layerchain json - %s", err) 879 } 880 881 jPath := filepath.Join(d.dir(id), "layerchain.json") 882 err = os.WriteFile(jPath, content, 0600) 883 if err != nil { 884 return fmt.Errorf("Unable to write layerchain file - %s", err) 885 } 886 887 return nil 888 } 889 890 type fileGetCloserWithBackupPrivileges struct { 891 path string 892 } 893 894 func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { 895 if backupPath, ok := mutatedFiles[filename]; ok { 896 return os.Open(filepath.Join(fg.path, backupPath)) 897 } 898 899 var f *os.File 900 // Open the file while holding the Windows backup privilege. This ensures that the 901 // file can be opened even if the caller does not actually have access to it according 902 // to the security descriptor. Also use sequential file access to avoid depleting the 903 // standby list - Microsoft VSO Bug Tracker #9900466 904 err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { 905 path := longpath.AddPrefix(filepath.Join(fg.path, filename)) 906 p, err := windows.UTF16FromString(path) 907 if err != nil { 908 return err 909 } 910 const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN 911 h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) 912 if err != nil { 913 return &os.PathError{Op: "open", Path: path, Err: err} 914 } 915 f = os.NewFile(uintptr(h), path) 916 return nil 917 }) 918 return f, err 919 } 920 921 func (fg *fileGetCloserWithBackupPrivileges) Close() error { 922 return nil 923 } 924 925 // DiffGetter returns a FileGetCloser that can read files from the directory that 926 // contains files for the layer differences. Used for direct access for tar-split. 927 func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { 928 id, err := d.resolveID(id) 929 if err != nil { 930 return nil, err 931 } 932 933 return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil 934 } 935 936 func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { 937 options := storageOptions{} 938 939 // Read size to change the block device size per container. 940 for key, val := range storageOpt { 941 key := strings.ToLower(key) 942 switch key { 943 case "size": 944 size, err := units.RAMInBytes(val) 945 if err != nil { 946 return nil, err 947 } 948 options.size = uint64(size) 949 } 950 } 951 return &options, nil 952 }