github.com/zhouyu0/docker-note@v0.0.0-20190722021225-b8d3825084db/daemon/graphdriver/lcow/lcow.go (about)

     1  // +build windows
     2  
     3  // Maintainer:  jhowardmsft
     4  // Locale:      en-gb
     5  // About:       Graph-driver for Linux Containers On Windows (LCOW)
     6  //
     7  // This graphdriver runs in two modes. Yet to be determined which one will
     8  // be the shipping mode. The global mode is where a single utility VM
     9  // is used for all service VM tool operations. This isn't safe security-wise
    10  // as it's attaching a sandbox of multiple containers to it, containing
    11  // untrusted data. This may be fine for client devops scenarios. In
    12  // safe mode, a unique utility VM is instantiated for all service VM tool
    13  // operations. The downside of safe-mode is that operations are slower as
    14  // a new service utility VM has to be started and torn-down when needed.
    15  //
    16  // Options:
    17  //
    18  // The following options are read by the graphdriver itself:
    19  //
    20  //   * lcow.globalmode - Enables global service VM Mode
    21  //        -- Possible values:     true/false
    22  //        -- Default if omitted:  false
    23  //
    24  //   * lcow.sandboxsize - Specifies a custom sandbox size in GB for starting a container
    25  //        -- Possible values:      >= default sandbox size (opengcs defined, currently 20)
    26  //        -- Default if omitted:  20
    27  //
    28  // The following options are read by opengcs:
    29  //
    30  //   * lcow.kirdpath - Specifies a custom path to a kernel/initrd pair
    31  //        -- Possible values:      Any local path that is not a mapped drive
    32  //        -- Default if omitted:  %ProgramFiles%\Linux Containers
    33  //
    34  //   * lcow.kernel - Specifies a custom kernel file located in the `lcow.kirdpath` path
    35  //        -- Possible values:      Any valid filename
    36  //        -- Default if omitted:  bootx64.efi
    37  //
    38  //   * lcow.initrd - Specifies a custom initrd file located in the `lcow.kirdpath` path
    39  //        -- Possible values:      Any valid filename
    40  //        -- Default if omitted:  initrd.img
    41  //
    42  //   * lcow.bootparameters - Specifies additional boot parameters for booting in kernel+initrd mode
    43  //        -- Possible values:      Any valid linux kernel boot options
    44  //        -- Default if omitted:  <nil>
    45  //
    46  //   * lcow.vhdx - Specifies a custom vhdx file to boot (instead of a kernel+initrd)
    47  //        -- Possible values:      Any valid filename
    48  //        -- Default if omitted:  uvm.vhdx under `lcow.kirdpath`
    49  //
    50  //   * lcow.timeout - Specifies a timeout for utility VM operations in seconds
    51  //        -- Possible values:      >=0
    52  //        -- Default if omitted:  300
    53  
    54  // TODO: Grab logs from SVM at terminate or errors
    55  
    56  package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
    57  
    58  import (
    59  	"bytes"
    60  	"encoding/json"
    61  	"fmt"
    62  	"io"
    63  	"io/ioutil"
    64  	"os"
    65  	"path"
    66  	"path/filepath"
    67  	"strconv"
    68  	"strings"
    69  	"sync"
    70  	"syscall"
    71  	"time"
    72  
    73  	"github.com/Microsoft/hcsshim"
    74  	"github.com/Microsoft/hcsshim/ext4/tar2ext4"
    75  	"github.com/Microsoft/opengcs/client"
    76  	"github.com/docker/docker/daemon/graphdriver"
    77  	"github.com/docker/docker/pkg/archive"
    78  	"github.com/docker/docker/pkg/containerfs"
    79  	"github.com/docker/docker/pkg/idtools"
    80  	"github.com/docker/docker/pkg/ioutils"
    81  	"github.com/docker/docker/pkg/reexec"
    82  	"github.com/docker/docker/pkg/system"
    83  	"github.com/sirupsen/logrus"
    84  )
    85  
    86  // noreexec controls reexec functionality. Off by default, on for debugging purposes.
    87  var noreexec = false
    88  
    89  // init registers this driver to the register. It gets initialised by the
    90  // function passed in the second parameter, implemented in this file.
    91  func init() {
    92  	graphdriver.Register("lcow", InitDriver)
    93  	// DOCKER_LCOW_NOREEXEC allows for inline processing which makes
    94  	// debugging issues in the re-exec codepath significantly easier.
    95  	if os.Getenv("DOCKER_LCOW_NOREEXEC") != "" {
    96  		logrus.Warnf("LCOW Graphdriver is set to not re-exec. This is intended for debugging purposes only.")
    97  		noreexec = true
    98  	} else {
    99  		reexec.Register("docker-lcow-tar2ext4", tar2ext4Reexec)
   100  	}
   101  }
   102  
   103  const (
   104  	// sandboxFilename is the name of the file containing a layer's sandbox (read-write layer).
   105  	sandboxFilename = "sandbox.vhdx"
   106  
   107  	// scratchFilename is the name of the scratch-space used by an SVM to avoid running out of memory.
   108  	scratchFilename = "scratch.vhdx"
   109  
   110  	// layerFilename is the name of the file containing a layer's read-only contents.
   111  	// Note this really is VHD format, not VHDX.
   112  	layerFilename = "layer.vhd"
   113  
   114  	// toolsScratchPath is a location in a service utility VM that the tools can use as a
   115  	// scratch space to avoid running out of memory.
   116  	toolsScratchPath = "/tmp/scratch"
   117  
   118  	// svmGlobalID is the ID used in the serviceVMs map for the global service VM when running in "global" mode.
   119  	svmGlobalID = "_lcow_global_svm_"
   120  
   121  	// cacheDirectory is the sub-folder under the driver's data-root used to cache blank sandbox and scratch VHDs.
   122  	cacheDirectory = "cache"
   123  
   124  	// scratchDirectory is the sub-folder under the driver's data-root used for scratch VHDs in service VMs
   125  	scratchDirectory = "scratch"
   126  
   127  	// errOperationPending is the HRESULT returned by the HCS when the VM termination operation is still pending.
   128  	errOperationPending syscall.Errno = 0xc0370103
   129  )
   130  
   131  // Driver represents an LCOW graph driver.
   132  type Driver struct {
   133  	dataRoot           string     // Root path on the host where we are storing everything.
   134  	cachedSandboxFile  string     // Location of the local default-sized cached sandbox.
   135  	cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox.
   136  	cachedScratchFile  string     // Location of the local cached empty scratch space.
   137  	cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch.
   138  	options            []string   // Graphdriver options we are initialised with.
   139  	globalMode         bool       // Indicates if running in an unsafe/global service VM mode.
   140  
   141  	// NOTE: It is OK to use a cache here because Windows does not support
   142  	// restoring containers when the daemon dies.
   143  	serviceVms *serviceVMMap // Map of the configs representing the service VM(s) we are running.
   144  }
   145  
   146  // layerDetails is the structure returned by a helper function `getLayerDetails`
   147  // for getting information about a layer folder
   148  type layerDetails struct {
   149  	filename  string // \path\to\sandbox.vhdx or \path\to\layer.vhd
   150  	size      int64  // size of the above file
   151  	isSandbox bool   // true if sandbox.vhdx
   152  }
   153  
   154  // deletefiles is a helper function for initialisation where we delete any
   155  // left-over scratch files in case we were previously forcibly terminated.
   156  func deletefiles(path string, f os.FileInfo, err error) error {
   157  	if strings.HasSuffix(f.Name(), ".vhdx") {
   158  		logrus.Warnf("lcowdriver: init: deleting stale scratch file %s", path)
   159  		return os.Remove(path)
   160  	}
   161  	return nil
   162  }
   163  
   164  // InitDriver returns a new LCOW storage driver.
   165  func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphdriver.Driver, error) {
   166  	title := "lcowdriver: init:"
   167  
   168  	cd := filepath.Join(dataRoot, cacheDirectory)
   169  	sd := filepath.Join(dataRoot, scratchDirectory)
   170  
   171  	d := &Driver{
   172  		dataRoot:          dataRoot,
   173  		options:           options,
   174  		cachedSandboxFile: filepath.Join(cd, sandboxFilename),
   175  		cachedScratchFile: filepath.Join(cd, scratchFilename),
   176  		serviceVms: &serviceVMMap{
   177  			svms: make(map[string]*serviceVMMapItem),
   178  		},
   179  		globalMode: false,
   180  	}
   181  
   182  	// Looks for relevant options
   183  	for _, v := range options {
   184  		opt := strings.SplitN(v, "=", 2)
   185  		if len(opt) == 2 {
   186  			switch strings.ToLower(opt[0]) {
   187  			case "lcow.globalmode":
   188  				var err error
   189  				d.globalMode, err = strconv.ParseBool(opt[1])
   190  				if err != nil {
   191  					return nil, fmt.Errorf("%s failed to parse value for 'lcow.globalmode' - must be 'true' or 'false'", title)
   192  				}
   193  				break
   194  			}
   195  		}
   196  	}
   197  
   198  	// Make sure the dataRoot directory is created
   199  	if err := idtools.MkdirAllAndChown(dataRoot, 0700, idtools.Identity{UID: 0, GID: 0}); err != nil {
   200  		return nil, fmt.Errorf("%s failed to create '%s': %v", title, dataRoot, err)
   201  	}
   202  
   203  	// Make sure the cache directory is created under dataRoot
   204  	if err := idtools.MkdirAllAndChown(cd, 0700, idtools.Identity{UID: 0, GID: 0}); err != nil {
   205  		return nil, fmt.Errorf("%s failed to create '%s': %v", title, cd, err)
   206  	}
   207  
   208  	// Make sure the scratch directory is created under dataRoot
   209  	if err := idtools.MkdirAllAndChown(sd, 0700, idtools.Identity{UID: 0, GID: 0}); err != nil {
   210  		return nil, fmt.Errorf("%s failed to create '%s': %v", title, sd, err)
   211  	}
   212  
   213  	// Delete any items in the scratch directory
   214  	filepath.Walk(sd, deletefiles)
   215  
   216  	logrus.Infof("%s dataRoot: %s globalMode: %t", title, dataRoot, d.globalMode)
   217  
   218  	return d, nil
   219  }
   220  
   221  func (d *Driver) getVMID(id string) string {
   222  	if d.globalMode {
   223  		return svmGlobalID
   224  	}
   225  	return id
   226  }
   227  
   228  // remapLongToShortContainerPath does the mapping of a long container path for a
   229  // SCSI attached disk, to a short container path where it's actually mounted.
   230  func remapLongToShortContainerPath(longContainerPath string, attachCounter uint64, svmName string) string {
   231  	shortContainerPath := longContainerPath
   232  	if shortContainerPath != "" && shortContainerPath != toolsScratchPath {
   233  		shortContainerPath = fmt.Sprintf("/tmp/d%d", attachCounter)
   234  		logrus.Debugf("lcowdriver: UVM %s: remapping %s --> %s", svmName, longContainerPath, shortContainerPath)
   235  	}
   236  	return shortContainerPath
   237  }
   238  
   239  // startServiceVMIfNotRunning starts a service utility VM if it is not currently running.
   240  // It can optionally be started with a mapped virtual disk. Returns a opengcs config structure
   241  // representing the VM.
   242  func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.MappedVirtualDisk, context string) (_ *serviceVM, err error) {
   243  	// Use the global ID if in global mode
   244  	id = d.getVMID(id)
   245  
   246  	title := "lcowdriver: startServiceVMIfNotRunning " + id
   247  
   248  	// Attempt to add ID to the service vm map
   249  	logrus.Debugf("%s: adding entry to service vm map", title)
   250  	svm, exists, err := d.serviceVms.add(id)
   251  	if err != nil && err == errVMisTerminating {
   252  		// VM is in the process of terminating. Wait until it's done and and then try again
   253  		logrus.Debugf("%s: VM with current ID still in the process of terminating", title)
   254  		if err := svm.getStopError(); err != nil {
   255  			logrus.Debugf("%s: VM did not stop successfully: %s", title, err)
   256  			return nil, err
   257  		}
   258  		return d.startServiceVMIfNotRunning(id, mvdToAdd, context)
   259  	} else if err != nil {
   260  		logrus.Debugf("%s: failed to add service vm to map: %s", title, err)
   261  		return nil, fmt.Errorf("%s: failed to add to service vm map: %s", title, err)
   262  	}
   263  
   264  	if exists {
   265  		// Service VM is already up and running. In this case, just hot add the vhds.
   266  		// Note that hotAddVHDs will remap long to short container paths, so no need
   267  		// for us to that here.
   268  		logrus.Debugf("%s: service vm already exists. Just hot adding: %+v", title, mvdToAdd)
   269  		if err := svm.hotAddVHDs(mvdToAdd...); err != nil {
   270  			logrus.Debugf("%s: failed to hot add vhds on service vm creation: %s", title, err)
   271  			return nil, fmt.Errorf("%s: failed to hot add vhds on service vm: %s", title, err)
   272  		}
   273  		return svm, nil
   274  	}
   275  
   276  	// We are the first service for this id, so we need to start it
   277  	logrus.Debugf("%s: service vm doesn't exist. Now starting it up", title)
   278  
   279  	defer func() {
   280  		// Signal that start has finished, passing in the error if any.
   281  		svm.signalStartFinished(err)
   282  		if err != nil {
   283  			// We added a ref to the VM, since we failed, we should delete the ref.
   284  			d.terminateServiceVM(id, "error path on startServiceVMIfNotRunning", false)
   285  		}
   286  	}()
   287  
   288  	// Generate a default configuration
   289  	if err := svm.config.GenerateDefault(d.options); err != nil {
   290  		return nil, fmt.Errorf("%s: failed to generate default gogcs configuration for global svm (%s): %s", title, context, err)
   291  	}
   292  
   293  	// For the name, we deliberately suffix if safe-mode to ensure that it doesn't
   294  	// clash with another utility VM which may be running for the container itself.
   295  	// This also makes it easier to correlate through Get-ComputeProcess.
   296  	if id == svmGlobalID {
   297  		svm.config.Name = svmGlobalID
   298  	} else {
   299  		svm.config.Name = fmt.Sprintf("%s_svm", id)
   300  	}
   301  
   302  	// Ensure we take the cached scratch mutex around the check to ensure the file is complete
   303  	// and not in the process of being created by another thread.
   304  	scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id))
   305  
   306  	logrus.Debugf("%s: locking cachedScratchMutex", title)
   307  	d.cachedScratchMutex.Lock()
   308  	if _, err := os.Stat(d.cachedScratchFile); err == nil {
   309  		// Make a copy of cached scratch to the scratch directory
   310  		logrus.Debugf("%s: (%s) cloning cached scratch for mvd", title, context)
   311  		if err := client.CopyFile(d.cachedScratchFile, scratchTargetFile, true); err != nil {
   312  			logrus.Debugf("%s: releasing cachedScratchMutex on err: %s", title, err)
   313  			d.cachedScratchMutex.Unlock()
   314  			return nil, err
   315  		}
   316  
   317  		// Add the cached clone as a mapped virtual disk
   318  		logrus.Debugf("%s: (%s) adding cloned scratch as mvd", title, context)
   319  		mvd := hcsshim.MappedVirtualDisk{
   320  			HostPath:          scratchTargetFile,
   321  			ContainerPath:     toolsScratchPath,
   322  			CreateInUtilityVM: true,
   323  		}
   324  		svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd)
   325  		svm.scratchAttached = true
   326  	}
   327  
   328  	logrus.Debugf("%s: releasing cachedScratchMutex", title)
   329  	d.cachedScratchMutex.Unlock()
   330  
   331  	// Add mapped virtual disks. First those that are already in the configuration. Generally,
   332  	// the only one that will be here is the service VMs scratch. The exception is when invoked
   333  	// via the graphdrivers DiffGetter implementation.
   334  	for i, mvd := range svm.config.MappedVirtualDisks {
   335  		svm.attachCounter++
   336  		svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter}
   337  
   338  		// No-op for the service VMs scratch disk. Only applicable in the DiffGetter interface invocation.
   339  		svm.config.MappedVirtualDisks[i].ContainerPath = remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name)
   340  	}
   341  
   342  	// Then the remaining ones to add, and adding them to the startup configuration.
   343  	for _, mvd := range mvdToAdd {
   344  		svm.attachCounter++
   345  		svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter}
   346  		mvd.ContainerPath = remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name)
   347  		svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd)
   348  	}
   349  
   350  	// Start it.
   351  	logrus.Debugf("%s: (%s) starting %s", title, context, svm.config.Name)
   352  	if err := svm.config.StartUtilityVM(); err != nil {
   353  		return nil, fmt.Errorf("failed to start service utility VM (%s): %s", context, err)
   354  	}
   355  
   356  	// defer function to terminate the VM if the next steps fail
   357  	defer func() {
   358  		if err != nil {
   359  			waitTerminate(svm, fmt.Sprintf("%s: (%s)", title, context))
   360  		}
   361  	}()
   362  
   363  	// Now we have a running service VM, we can create the cached scratch file if it doesn't exist.
   364  	logrus.Debugf("%s: locking cachedScratchMutex", title)
   365  	d.cachedScratchMutex.Lock()
   366  	if _, err := os.Stat(d.cachedScratchFile); err != nil {
   367  		logrus.Debugf("%s: (%s) creating an SVM scratch", title, context)
   368  
   369  		// Don't use svm.CreateExt4Vhdx since that only works when the service vm is setup,
   370  		// but we're still in that process right now.
   371  		if err := svm.config.CreateExt4Vhdx(scratchTargetFile, client.DefaultVhdxSizeGB, d.cachedScratchFile); err != nil {
   372  			logrus.Debugf("%s: (%s) releasing cachedScratchMutex on error path", title, context)
   373  			d.cachedScratchMutex.Unlock()
   374  			logrus.Debugf("%s: failed to create vm scratch %s: %s", title, scratchTargetFile, err)
   375  			return nil, fmt.Errorf("failed to create SVM scratch VHDX (%s): %s", context, err)
   376  		}
   377  	}
   378  	logrus.Debugf("%s: (%s) releasing cachedScratchMutex", title, context)
   379  	d.cachedScratchMutex.Unlock()
   380  
   381  	// Hot-add the scratch-space if not already attached
   382  	if !svm.scratchAttached {
   383  		logrus.Debugf("%s: (%s) hot-adding scratch %s", title, context, scratchTargetFile)
   384  		if err := svm.hotAddVHDsAtStart(hcsshim.MappedVirtualDisk{
   385  			HostPath:          scratchTargetFile,
   386  			ContainerPath:     toolsScratchPath,
   387  			CreateInUtilityVM: true,
   388  		}); err != nil {
   389  			logrus.Debugf("%s: failed to hot-add scratch %s: %s", title, scratchTargetFile, err)
   390  			return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err)
   391  		}
   392  		svm.scratchAttached = true
   393  		// Don't need to ref-count here as it will be done via hotAddVHDsAtStart() call above.
   394  	}
   395  
   396  	logrus.Debugf("%s: (%s) success", title, context)
   397  	return svm, nil
   398  }
   399  
   400  // terminateServiceVM terminates a service utility VM if its running if it's,
   401  // not being used by any goroutine, but does nothing when in global mode as it's
   402  // lifetime is limited to that of the daemon. If the force flag is set, then
   403  // the VM will be killed regardless of the ref count or if it's global.
   404  func (d *Driver) terminateServiceVM(id, context string, force bool) (err error) {
   405  	// We don't do anything in safe mode unless the force flag has been passed, which
   406  	// is only the case for cleanup at driver termination.
   407  	if d.globalMode && !force {
   408  		logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context)
   409  		return nil
   410  	}
   411  
   412  	id = d.getVMID(id)
   413  
   414  	var svm *serviceVM
   415  	var lastRef bool
   416  	if !force {
   417  		// In the not force case, we ref count
   418  		svm, lastRef, err = d.serviceVms.decrementRefCount(id)
   419  	} else {
   420  		// In the force case, we ignore the ref count and just set it to 0
   421  		svm, err = d.serviceVms.setRefCountZero(id)
   422  		lastRef = true
   423  	}
   424  
   425  	if err == errVMUnknown {
   426  		return nil
   427  	} else if err == errVMisTerminating {
   428  		return svm.getStopError()
   429  	} else if !lastRef {
   430  		return nil
   431  	}
   432  
   433  	// We run the deletion of the scratch as a deferred function to at least attempt
   434  	// clean-up in case of errors.
   435  	defer func() {
   436  		if svm.scratchAttached {
   437  			scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id))
   438  			logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - deleting scratch %s", id, context, scratchTargetFile)
   439  			if errRemove := os.Remove(scratchTargetFile); errRemove != nil {
   440  				logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, errRemove)
   441  				err = errRemove
   442  			}
   443  		}
   444  
   445  		// This function shouldn't actually return error unless there is a bug
   446  		if errDelete := d.serviceVms.deleteID(id); errDelete != nil {
   447  			logrus.Warnf("failed to service vm from svm map %s (%s): %s", id, context, errDelete)
   448  		}
   449  
   450  		// Signal that this VM has stopped
   451  		svm.signalStopFinished(err)
   452  	}()
   453  
   454  	// Now it's possible that the service VM failed to start and now we are trying to terminate it.
   455  	// In this case, we will relay the error to the goroutines waiting for this vm to stop.
   456  	if err := svm.getStartError(); err != nil {
   457  		logrus.Debugf("lcowdriver: terminateservicevm: %s had failed to start up: %s", id, err)
   458  		return err
   459  	}
   460  
   461  	if err := waitTerminate(svm, fmt.Sprintf("terminateservicevm: %s (%s)", id, context)); err != nil {
   462  		return err
   463  	}
   464  
   465  	logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - success", id, context)
   466  	return nil
   467  }
   468  
   469  func waitTerminate(svm *serviceVM, context string) error {
   470  	if svm.config == nil {
   471  		return fmt.Errorf("lcowdriver: waitTermiante: Nil utility VM. %s", context)
   472  	}
   473  
   474  	logrus.Debugf("lcowdriver: waitTerminate: Calling terminate: %s", context)
   475  	if err := svm.config.Uvm.Terminate(); err != nil {
   476  		// We might get operation still pending from the HCS. In that case, we shouldn't return
   477  		// an error since we call wait right after.
   478  		underlyingError := err
   479  		if conterr, ok := err.(*hcsshim.ContainerError); ok {
   480  			underlyingError = conterr.Err
   481  		}
   482  
   483  		if syscallErr, ok := underlyingError.(syscall.Errno); ok {
   484  			underlyingError = syscallErr
   485  		}
   486  
   487  		if underlyingError != errOperationPending {
   488  			return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err)
   489  		}
   490  		logrus.Debugf("lcowdriver: waitTerminate: uvm.Terminate() returned operation pending (%s)", context)
   491  	}
   492  
   493  	logrus.Debugf("lcowdriver: waitTerminate: (%s) - waiting for utility VM to terminate", context)
   494  	if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil {
   495  		return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err)
   496  	}
   497  	return nil
   498  }
   499  
   500  // String returns the string representation of a driver. This should match
   501  // the name the graph driver has been registered with.
   502  func (d *Driver) String() string {
   503  	return "lcow"
   504  }
   505  
   506  // Status returns the status of the driver.
   507  func (d *Driver) Status() [][2]string {
   508  	return [][2]string{
   509  		{"LCOW", ""},
   510  		// TODO: Add some more info here - mode, home, ....
   511  	}
   512  }
   513  
   514  // Exists returns true if the given id is registered with this driver.
   515  func (d *Driver) Exists(id string) bool {
   516  	_, err := os.Lstat(d.dir(id))
   517  	logrus.Debugf("lcowdriver: exists: id %s %t", id, err == nil)
   518  	return err == nil
   519  }
   520  
   521  // CreateReadWrite creates a layer that is writable for use as a container
   522  // file system. That equates to creating a sandbox.
   523  func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
   524  	title := fmt.Sprintf("lcowdriver: createreadwrite: id %s", id)
   525  	logrus.Debugf(title)
   526  
   527  	// First we need to create the folder
   528  	if err := d.Create(id, parent, opts); err != nil {
   529  		return err
   530  	}
   531  
   532  	// Look for an explicit sandbox size option.
   533  	sandboxSize := uint64(client.DefaultVhdxSizeGB)
   534  	for k, v := range opts.StorageOpt {
   535  		switch strings.ToLower(k) {
   536  		case "lcow.sandboxsize":
   537  			var err error
   538  			sandboxSize, err = strconv.ParseUint(v, 10, 32)
   539  			if err != nil {
   540  				return fmt.Errorf("%s failed to parse value '%s' for 'lcow.sandboxsize'", title, v)
   541  			}
   542  			if sandboxSize < client.DefaultVhdxSizeGB {
   543  				return fmt.Errorf("%s 'lcow.sandboxsize' option cannot be less than %d", title, client.DefaultVhdxSizeGB)
   544  			}
   545  			break
   546  		}
   547  	}
   548  
   549  	// Massive perf optimisation here. If we know that the RW layer is the default size,
   550  	// and that the cached sandbox already exists, and we are running in safe mode, we
   551  	// can just do a simple copy into the layers sandbox file without needing to start a
   552  	// unique service VM. For a global service VM, it doesn't really matter. Of course,
   553  	// this is only the case where the sandbox is the default size.
   554  	//
   555  	// Make sure we have the sandbox mutex taken while we are examining it.
   556  	if sandboxSize == client.DefaultVhdxSizeGB {
   557  		logrus.Debugf("%s: locking cachedSandboxMutex", title)
   558  		d.cachedSandboxMutex.Lock()
   559  		_, err := os.Stat(d.cachedSandboxFile)
   560  		logrus.Debugf("%s: releasing cachedSandboxMutex", title)
   561  		d.cachedSandboxMutex.Unlock()
   562  		if err == nil {
   563  			logrus.Debugf("%s: using cached sandbox to populate", title)
   564  			if err := client.CopyFile(d.cachedSandboxFile, filepath.Join(d.dir(id), sandboxFilename), true); err != nil {
   565  				return err
   566  			}
   567  			return nil
   568  		}
   569  	}
   570  
   571  	logrus.Debugf("%s: creating SVM to create sandbox", title)
   572  	svm, err := d.startServiceVMIfNotRunning(id, nil, "createreadwrite")
   573  	if err != nil {
   574  		return err
   575  	}
   576  	defer d.terminateServiceVM(id, "createreadwrite", false)
   577  
   578  	// So the sandbox needs creating. If default size ensure we are the only thread populating the cache.
   579  	// Non-default size we don't store, just create them one-off so no need to lock the cachedSandboxMutex.
   580  	if sandboxSize == client.DefaultVhdxSizeGB {
   581  		logrus.Debugf("%s: locking cachedSandboxMutex for creation", title)
   582  		d.cachedSandboxMutex.Lock()
   583  		defer func() {
   584  			logrus.Debugf("%s: releasing cachedSandboxMutex for creation", title)
   585  			d.cachedSandboxMutex.Unlock()
   586  		}()
   587  	}
   588  
   589  	// Make sure we don't write to our local cached copy if this is for a non-default size request.
   590  	targetCacheFile := d.cachedSandboxFile
   591  	if sandboxSize != client.DefaultVhdxSizeGB {
   592  		targetCacheFile = ""
   593  	}
   594  
   595  	// Create the ext4 vhdx
   596  	logrus.Debugf("%s: creating sandbox ext4 vhdx", title)
   597  	if err := svm.createExt4VHDX(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil {
   598  		logrus.Debugf("%s: failed to create sandbox vhdx for %s: %s", title, id, err)
   599  		return err
   600  	}
   601  	return nil
   602  }
   603  
   604  // Create creates the folder for the layer with the given id, and
   605  // adds it to the layer chain.
   606  func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
   607  	logrus.Debugf("lcowdriver: create: id %s parent: %s", id, parent)
   608  
   609  	parentChain, err := d.getLayerChain(parent)
   610  	if err != nil {
   611  		return err
   612  	}
   613  
   614  	var layerChain []string
   615  	if parent != "" {
   616  		if !d.Exists(parent) {
   617  			return fmt.Errorf("lcowdriver: cannot create layer folder with missing parent %s", parent)
   618  		}
   619  		layerChain = []string{d.dir(parent)}
   620  	}
   621  	layerChain = append(layerChain, parentChain...)
   622  
   623  	// Make sure layers are created with the correct ACL so that VMs can access them.
   624  	layerPath := d.dir(id)
   625  	logrus.Debugf("lcowdriver: create: id %s: creating %s", id, layerPath)
   626  	if err := system.MkdirAllWithACL(layerPath, 755, system.SddlNtvmAdministratorsLocalSystem); err != nil {
   627  		return err
   628  	}
   629  
   630  	if err := d.setLayerChain(id, layerChain); err != nil {
   631  		if err2 := os.RemoveAll(layerPath); err2 != nil {
   632  			logrus.Warnf("failed to remove layer %s: %s", layerPath, err2)
   633  		}
   634  		return err
   635  	}
   636  	logrus.Debugf("lcowdriver: create: id %s: success", id)
   637  
   638  	return nil
   639  }
   640  
   641  // Remove unmounts and removes the dir information.
   642  func (d *Driver) Remove(id string) error {
   643  	logrus.Debugf("lcowdriver: remove: id %s", id)
   644  	tmpID := fmt.Sprintf("%s-removing", id)
   645  	tmpLayerPath := d.dir(tmpID)
   646  	layerPath := d.dir(id)
   647  
   648  	logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath)
   649  
   650  	// Unmount all the layers
   651  	err := d.Put(id)
   652  	if err != nil {
   653  		logrus.Debugf("lcowdriver: remove id %s: failed to unmount: %s", id, err)
   654  		return err
   655  	}
   656  
   657  	// for non-global case just kill the vm
   658  	if !d.globalMode {
   659  		if err := d.terminateServiceVM(id, fmt.Sprintf("Remove %s", id), true); err != nil {
   660  			return err
   661  		}
   662  	}
   663  
   664  	if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) {
   665  		return err
   666  	}
   667  
   668  	if err := os.RemoveAll(tmpLayerPath); err != nil {
   669  		return err
   670  	}
   671  
   672  	logrus.Debugf("lcowdriver: remove: id %s: layerPath %s succeeded", id, layerPath)
   673  	return nil
   674  }
   675  
   676  // Get returns the rootfs path for the id. It is reference counted and
   677  // effectively can be thought of as a "mount the layer into the utility
   678  // vm if it isn't already". The contract from the caller of this is that
   679  // all Gets and Puts are matched. It -should- be the case that on cleanup,
   680  // nothing is mounted.
   681  //
   682  // For optimisation, we don't actually mount the filesystem (which in our
   683  // case means [hot-]adding it to a service VM. But we track that and defer
   684  // the actual adding to the point we need to access it.
   685  func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
   686  	title := fmt.Sprintf("lcowdriver: get: %s", id)
   687  	logrus.Debugf(title)
   688  
   689  	// Generate the mounts needed for the defered operation.
   690  	disks, err := d.getAllMounts(id)
   691  	if err != nil {
   692  		logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err)
   693  		return nil, fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err)
   694  	}
   695  
   696  	logrus.Debugf("%s: got layer mounts: %+v", title, disks)
   697  	return &lcowfs{
   698  		root:        unionMountName(disks),
   699  		d:           d,
   700  		mappedDisks: disks,
   701  		vmID:        d.getVMID(id),
   702  	}, nil
   703  }
   704  
   705  // Put does the reverse of get. If there are no more references to
   706  // the layer, it unmounts it from the utility VM.
   707  func (d *Driver) Put(id string) error {
   708  	title := fmt.Sprintf("lcowdriver: put: %s", id)
   709  
   710  	// Get the service VM that we need to remove from
   711  	svm, err := d.serviceVms.get(d.getVMID(id))
   712  	if err == errVMUnknown {
   713  		return nil
   714  	} else if err == errVMisTerminating {
   715  		return svm.getStopError()
   716  	}
   717  
   718  	// Generate the mounts that Get() might have mounted
   719  	disks, err := d.getAllMounts(id)
   720  	if err != nil {
   721  		logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err)
   722  		return fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err)
   723  	}
   724  
   725  	// Now, we want to perform the unmounts, hot-remove and stop the service vm.
   726  	// We want to go though all the steps even if we have an error to clean up properly
   727  	err = svm.deleteUnionMount(unionMountName(disks), disks...)
   728  	if err != nil {
   729  		logrus.Debugf("%s failed to delete union mount %s: %s", title, id, err)
   730  	}
   731  
   732  	err1 := svm.hotRemoveVHDs(disks...)
   733  	if err1 != nil {
   734  		logrus.Debugf("%s failed to hot remove vhds %s: %s", title, id, err)
   735  		if err == nil {
   736  			err = err1
   737  		}
   738  	}
   739  
   740  	err1 = d.terminateServiceVM(id, fmt.Sprintf("Put %s", id), false)
   741  	if err1 != nil {
   742  		logrus.Debugf("%s failed to terminate service vm %s: %s", title, id, err1)
   743  		if err == nil {
   744  			err = err1
   745  		}
   746  	}
   747  	logrus.Debugf("Put succeeded on id %s", id)
   748  	return err
   749  }
   750  
   751  // Cleanup ensures the information the driver stores is properly removed.
   752  // We use this opportunity to cleanup any -removing folders which may be
   753  // still left if the daemon was killed while it was removing a layer.
   754  func (d *Driver) Cleanup() error {
   755  	title := "lcowdriver: cleanup"
   756  
   757  	items, err := ioutil.ReadDir(d.dataRoot)
   758  	if err != nil {
   759  		if os.IsNotExist(err) {
   760  			return nil
   761  		}
   762  		return err
   763  	}
   764  
   765  	// Note we don't return an error below - it's possible the files
   766  	// are locked. However, next time around after the daemon exits,
   767  	// we likely will be able to to cleanup successfully. Instead we log
   768  	// warnings if there are errors.
   769  	for _, item := range items {
   770  		if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") {
   771  			if err := os.RemoveAll(filepath.Join(d.dataRoot, item.Name())); err != nil {
   772  				logrus.Warnf("%s failed to cleanup %s: %s", title, item.Name(), err)
   773  			} else {
   774  				logrus.Infof("%s cleaned up %s", title, item.Name())
   775  			}
   776  		}
   777  	}
   778  
   779  	// Cleanup any service VMs we have running, along with their scratch spaces.
   780  	// We don't take the lock for this as it's taken in terminateServiceVm.
   781  	for k, v := range d.serviceVms.svms {
   782  		logrus.Debugf("%s svm entry: %s: %+v", title, k, v)
   783  		d.terminateServiceVM(k, "cleanup", true)
   784  	}
   785  
   786  	return nil
   787  }
   788  
   789  // Diff takes a layer (and it's parent layer which may be null, but
   790  // is ignored by this implementation below) and returns a reader for
   791  // a tarstream representing the layers contents. The id could be
   792  // a read-only "layer.vhd" or a read-write "sandbox.vhdx". The semantics
   793  // of this function dictate that the layer is already mounted.
   794  // However, as we do lazy mounting as a performance optimisation,
   795  // this will likely not be the case.
   796  func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
   797  	title := fmt.Sprintf("lcowdriver: diff: %s", id)
   798  
   799  	// Get VHDX info
   800  	ld, err := getLayerDetails(d.dir(id))
   801  	if err != nil {
   802  		logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err)
   803  		return nil, err
   804  	}
   805  
   806  	// Start the SVM with a mapped virtual disk. Note that if the SVM is
   807  	// already running and we are in global mode, this will be
   808  	// hot-added.
   809  	mvd := hcsshim.MappedVirtualDisk{
   810  		HostPath:          ld.filename,
   811  		ContainerPath:     hostToGuest(ld.filename),
   812  		CreateInUtilityVM: true,
   813  		ReadOnly:          true,
   814  	}
   815  
   816  	logrus.Debugf("%s: starting service VM", title)
   817  	svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diff %s", id))
   818  	if err != nil {
   819  		return nil, err
   820  	}
   821  
   822  	logrus.Debugf("lcowdriver: diff: waiting for svm to finish booting")
   823  	err = svm.getStartError()
   824  	if err != nil {
   825  		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
   826  		return nil, fmt.Errorf("lcowdriver: diff: svm failed to boot: %s", err)
   827  	}
   828  
   829  	// Obtain the tar stream for it
   830  	// The actual container path will have be remapped to a short name, so use that.
   831  	actualContainerPath := svm.getShortContainerPath(&mvd)
   832  	if actualContainerPath == "" {
   833  		return nil, fmt.Errorf("failed to get short container path for %+v in SVM %s", mvd, svm.config.Name)
   834  	}
   835  	logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, actualContainerPath, ld.size, ld.isSandbox)
   836  	tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, actualContainerPath, ld.isSandbox, ld.size)
   837  	if err != nil {
   838  		svm.hotRemoveVHDs(mvd)
   839  		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
   840  		return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err)
   841  	}
   842  
   843  	logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent)
   844  
   845  	// In safe/non-global mode, we can't tear down the service VM until things have been read.
   846  	return ioutils.NewReadCloserWrapper(tarReadCloser, func() error {
   847  		tarReadCloser.Close()
   848  		svm.hotRemoveVHDs(mvd)
   849  		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
   850  		return nil
   851  	}), nil
   852  }
   853  
   854  // ApplyDiff extracts the changeset from the given diff into the
   855  // layer with the specified id and parent, returning the size of the
   856  // new layer in bytes. The layer should not be mounted when calling
   857  // this function. Another way of describing this is that ApplyDiff writes
   858  // to a new layer (a VHD in LCOW) the contents of a tarstream it's given.
   859  func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
   860  	logrus.Debugf("lcowdriver: applydiff: id %s", id)
   861  
   862  	// Log failures here as it's undiagnosable sometimes, due to a possible panic.
   863  	// See https://github.com/moby/moby/issues/37955 for more information.
   864  
   865  	dest := filepath.Join(d.dataRoot, id, layerFilename)
   866  	if !noreexec {
   867  		cmd := reexec.Command([]string{"docker-lcow-tar2ext4", dest}...)
   868  		stdout := bytes.NewBuffer(nil)
   869  		stderr := bytes.NewBuffer(nil)
   870  		cmd.Stdin = diff
   871  		cmd.Stdout = stdout
   872  		cmd.Stderr = stderr
   873  
   874  		if err := cmd.Start(); err != nil {
   875  			logrus.Warnf("lcowdriver: applydiff: id %s failed to start re-exec: %s", id, err)
   876  			return 0, err
   877  		}
   878  
   879  		if err := cmd.Wait(); err != nil {
   880  			logrus.Warnf("lcowdriver: applydiff: id %s failed %s", id, err)
   881  			return 0, fmt.Errorf("re-exec error: %v: stderr: %s", err, stderr)
   882  		}
   883  		return strconv.ParseInt(stdout.String(), 10, 64)
   884  	}
   885  	// The inline case
   886  	size, err := tar2ext4Actual(dest, diff)
   887  	if err != nil {
   888  		logrus.Warnf("lcowdriver: applydiff: id %s failed %s", id, err)
   889  	}
   890  	return size, err
   891  }
   892  
   893  // tar2ext4Reexec is the re-exec entry point for writing a layer from a tar file
   894  func tar2ext4Reexec() {
   895  	size, err := tar2ext4Actual(os.Args[1], os.Stdin)
   896  	if err != nil {
   897  		fmt.Fprint(os.Stderr, err)
   898  		os.Exit(1)
   899  	}
   900  	fmt.Fprint(os.Stdout, size)
   901  }
   902  
   903  // tar2ext4Actual is the implementation of tar2ext to write a layer from a tar file.
   904  // It can be called through re-exec (default), or inline for debugging.
   905  func tar2ext4Actual(dest string, diff io.Reader) (int64, error) {
   906  	// maxDiskSize is not relating to the sandbox size - this is the
   907  	// maximum possible size a layer VHD generated can be from an EXT4
   908  	// layout perspective.
   909  	const maxDiskSize = 128 * 1024 * 1024 * 1024 // 128GB
   910  	out, err := os.Create(dest)
   911  	if err != nil {
   912  		return 0, err
   913  	}
   914  	defer out.Close()
   915  	if err := tar2ext4.Convert(
   916  		diff,
   917  		out,
   918  		tar2ext4.AppendVhdFooter,
   919  		tar2ext4.ConvertWhiteout,
   920  		tar2ext4.MaximumDiskSize(maxDiskSize)); err != nil {
   921  		return 0, err
   922  	}
   923  	fi, err := os.Stat(dest)
   924  	if err != nil {
   925  		return 0, err
   926  	}
   927  	return fi.Size(), nil
   928  }
   929  
   930  // Changes produces a list of changes between the specified layer
   931  // and its parent layer. If parent is "", then all changes will be ADD changes.
   932  // The layer should not be mounted when calling this function.
   933  func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
   934  	logrus.Debugf("lcowdriver: changes: id %s parent %s", id, parent)
   935  	// TODO @gupta-ak. Needs implementation with assistance from service VM
   936  	return nil, nil
   937  }
   938  
   939  // DiffSize calculates the changes between the specified layer
   940  // and its parent and returns the size in bytes of the changes
   941  // relative to its base filesystem directory.
   942  func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
   943  	logrus.Debugf("lcowdriver: diffsize: id %s", id)
   944  	// TODO @gupta-ak. Needs implementation with assistance from service VM
   945  	return 0, nil
   946  }
   947  
   948  // GetMetadata returns custom driver information.
   949  func (d *Driver) GetMetadata(id string) (map[string]string, error) {
   950  	logrus.Debugf("lcowdriver: getmetadata: id %s", id)
   951  	m := make(map[string]string)
   952  	m["dir"] = d.dir(id)
   953  	return m, nil
   954  }
   955  
   956  // GetLayerPath gets the layer path on host (path to VHD/VHDX)
   957  func (d *Driver) GetLayerPath(id string) (string, error) {
   958  	return d.dir(id), nil
   959  }
   960  
   961  // dir returns the absolute path to the layer.
   962  func (d *Driver) dir(id string) string {
   963  	return filepath.Join(d.dataRoot, filepath.Base(id))
   964  }
   965  
   966  // getLayerChain returns the layer chain information.
   967  func (d *Driver) getLayerChain(id string) ([]string, error) {
   968  	jPath := filepath.Join(d.dir(id), "layerchain.json")
   969  	logrus.Debugf("lcowdriver: getlayerchain: id %s json %s", id, jPath)
   970  	content, err := ioutil.ReadFile(jPath)
   971  	if os.IsNotExist(err) {
   972  		return nil, nil
   973  	} else if err != nil {
   974  		return nil, fmt.Errorf("lcowdriver: getlayerchain: %s unable to read layerchain file %s: %s", id, jPath, err)
   975  	}
   976  
   977  	var layerChain []string
   978  	err = json.Unmarshal(content, &layerChain)
   979  	if err != nil {
   980  		return nil, fmt.Errorf("lcowdriver: getlayerchain: %s failed to unmarshall layerchain file %s: %s", id, jPath, err)
   981  	}
   982  	return layerChain, nil
   983  }
   984  
   985  // setLayerChain stores the layer chain information on disk.
   986  func (d *Driver) setLayerChain(id string, chain []string) error {
   987  	content, err := json.Marshal(&chain)
   988  	if err != nil {
   989  		return fmt.Errorf("lcowdriver: setlayerchain: %s failed to marshall layerchain json: %s", id, err)
   990  	}
   991  
   992  	jPath := filepath.Join(d.dir(id), "layerchain.json")
   993  	logrus.Debugf("lcowdriver: setlayerchain: id %s json %s", id, jPath)
   994  	err = ioutil.WriteFile(jPath, content, 0600)
   995  	if err != nil {
   996  		return fmt.Errorf("lcowdriver: setlayerchain: %s failed to write layerchain file: %s", id, err)
   997  	}
   998  	return nil
   999  }
  1000  
  1001  // getLayerDetails is a utility for getting a file name, size and indication of
  1002  // sandbox for a VHD(x) in a folder. A read-only layer will be layer.vhd. A
  1003  // read-write layer will be sandbox.vhdx.
  1004  func getLayerDetails(folder string) (*layerDetails, error) {
  1005  	var fileInfo os.FileInfo
  1006  	ld := &layerDetails{
  1007  		isSandbox: false,
  1008  		filename:  filepath.Join(folder, layerFilename),
  1009  	}
  1010  
  1011  	fileInfo, err := os.Stat(ld.filename)
  1012  	if err != nil {
  1013  		ld.filename = filepath.Join(folder, sandboxFilename)
  1014  		if fileInfo, err = os.Stat(ld.filename); err != nil {
  1015  			return nil, fmt.Errorf("failed to locate layer or sandbox in %s", folder)
  1016  		}
  1017  		ld.isSandbox = true
  1018  	}
  1019  	ld.size = fileInfo.Size()
  1020  
  1021  	return ld, nil
  1022  }
  1023  
  1024  func (d *Driver) getAllMounts(id string) ([]hcsshim.MappedVirtualDisk, error) {
  1025  	layerChain, err := d.getLayerChain(id)
  1026  	if err != nil {
  1027  		return nil, err
  1028  	}
  1029  	layerChain = append([]string{d.dir(id)}, layerChain...)
  1030  
  1031  	logrus.Debugf("getting all  layers: %v", layerChain)
  1032  	disks := make([]hcsshim.MappedVirtualDisk, len(layerChain), len(layerChain))
  1033  	for i := range layerChain {
  1034  		ld, err := getLayerDetails(layerChain[i])
  1035  		if err != nil {
  1036  			logrus.Debugf("Failed to get LayerVhdDetails from %s: %s", layerChain[i], err)
  1037  			return nil, err
  1038  		}
  1039  		disks[i].HostPath = ld.filename
  1040  		disks[i].ContainerPath = hostToGuest(ld.filename)
  1041  		disks[i].CreateInUtilityVM = true
  1042  		disks[i].ReadOnly = !ld.isSandbox
  1043  	}
  1044  	return disks, nil
  1045  }
  1046  
  1047  func hostToGuest(hostpath string) string {
  1048  	// This is the "long" container path. At the point of which we are
  1049  	// calculating this, we don't know which service VM we're going to be
  1050  	// using, so we can't translate this to a short path yet, instead
  1051  	// deferring until the point of which it's added to an SVM. We don't
  1052  	// use long container paths in SVMs for SCSI disks, otherwise it can cause
  1053  	// command line operations that we invoke to fail due to being over ~4200
  1054  	// characters when there are ~47 layers involved. An example of this is
  1055  	// the mount call to create the overlay across multiple SCSI-attached disks.
  1056  	// It doesn't affect VPMem attached layers during container creation as
  1057  	// these get mapped by openGCS to /tmp/N/M where N is a container instance
  1058  	// number, and M is a layer number.
  1059  	return fmt.Sprintf("/tmp/%s", filepath.Base(filepath.Dir(hostpath)))
  1060  }
  1061  
  1062  func unionMountName(disks []hcsshim.MappedVirtualDisk) string {
  1063  	return fmt.Sprintf("%s-mount", disks[0].ContainerPath)
  1064  }
  1065  
  1066  type nopCloser struct {
  1067  	io.Reader
  1068  }
  1069  
  1070  func (nopCloser) Close() error {
  1071  	return nil
  1072  }
  1073  
  1074  type fileGetCloserFromSVM struct {
  1075  	id  string
  1076  	svm *serviceVM
  1077  	mvd *hcsshim.MappedVirtualDisk
  1078  	d   *Driver
  1079  }
  1080  
  1081  func (fgc *fileGetCloserFromSVM) Close() error {
  1082  	if fgc.svm != nil {
  1083  		if fgc.mvd != nil {
  1084  			if err := fgc.svm.hotRemoveVHDs(*fgc.mvd); err != nil {
  1085  				// We just log this as we're going to tear down the SVM imminently unless in global mode
  1086  				logrus.Errorf("failed to remove mvd %s: %s", fgc.mvd.ContainerPath, err)
  1087  			}
  1088  		}
  1089  	}
  1090  	if fgc.d != nil && fgc.svm != nil && fgc.id != "" {
  1091  		if err := fgc.d.terminateServiceVM(fgc.id, fmt.Sprintf("diffgetter %s", fgc.id), false); err != nil {
  1092  			return err
  1093  		}
  1094  	}
  1095  	return nil
  1096  }
  1097  
  1098  func (fgc *fileGetCloserFromSVM) Get(filename string) (io.ReadCloser, error) {
  1099  	errOut := &bytes.Buffer{}
  1100  	outOut := &bytes.Buffer{}
  1101  	// Must map to the actual "short" container path where the SCSI disk was mounted
  1102  	actualContainerPath := fgc.svm.getShortContainerPath(fgc.mvd)
  1103  	if actualContainerPath == "" {
  1104  		return nil, fmt.Errorf("inconsistency detected: couldn't get short container path for %+v in utility VM %s", fgc.mvd, fgc.svm.config.Name)
  1105  	}
  1106  	file := path.Join(actualContainerPath, filename)
  1107  	if err := fgc.svm.runProcess(fmt.Sprintf("cat %s", file), nil, outOut, errOut); err != nil {
  1108  		logrus.Debugf("cat %s failed: %s", file, errOut.String())
  1109  		return nil, err
  1110  	}
  1111  	return nopCloser{bytes.NewReader(outOut.Bytes())}, nil
  1112  }
  1113  
  1114  // DiffGetter returns a FileGetCloser that can read files from the directory that
  1115  // contains files for the layer differences. Used for direct access for tar-split.
  1116  func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
  1117  	title := fmt.Sprintf("lcowdriver: diffgetter: %s", id)
  1118  	logrus.Debugf(title)
  1119  
  1120  	ld, err := getLayerDetails(d.dir(id))
  1121  	if err != nil {
  1122  		logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err)
  1123  		return nil, err
  1124  	}
  1125  
  1126  	// Start the SVM with a mapped virtual disk. Note that if the SVM is
  1127  	// already running and we are in global mode, this will be hot-added.
  1128  	mvd := hcsshim.MappedVirtualDisk{
  1129  		HostPath:          ld.filename,
  1130  		ContainerPath:     hostToGuest(ld.filename),
  1131  		CreateInUtilityVM: true,
  1132  		ReadOnly:          true,
  1133  	}
  1134  
  1135  	logrus.Debugf("%s: starting service VM", title)
  1136  	svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diffgetter %s", id))
  1137  	if err != nil {
  1138  		return nil, err
  1139  	}
  1140  
  1141  	logrus.Debugf("%s: waiting for svm to finish booting", title)
  1142  	err = svm.getStartError()
  1143  	if err != nil {
  1144  		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
  1145  		return nil, fmt.Errorf("%s: svm failed to boot: %s", title, err)
  1146  	}
  1147  
  1148  	return &fileGetCloserFromSVM{
  1149  		id:  id,
  1150  		svm: svm,
  1151  		mvd: &mvd,
  1152  		d:   d}, nil
  1153  }