github.com/rita33cool1/iot-system-gateway@v0.0.0-20200911033302-e65bde238cc5/docker-engine/daemon/graphdriver/lcow/lcow_svm.go (about)

     1  // +build windows
     2  
     3  package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow"
     4  
     5  import (
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"strings"
    10  	"sync"
    11  	"time"
    12  
    13  	"github.com/Microsoft/hcsshim"
    14  	"github.com/Microsoft/opengcs/client"
    15  	"github.com/sirupsen/logrus"
    16  )
    17  
    18  // Code for all the service VM management for the LCOW graphdriver
    19  
    20  var errVMisTerminating = errors.New("service VM is shutting down")
    21  var errVMUnknown = errors.New("service vm id is unknown")
    22  var errVMStillHasReference = errors.New("Attemping to delete a VM that is still being used")
    23  
    24  // serviceVMMap is the struct representing the id -> service VM mapping.
    25  type serviceVMMap struct {
    26  	sync.Mutex
    27  	svms map[string]*serviceVMMapItem
    28  }
    29  
    30  // serviceVMMapItem is our internal structure representing an item in our
    31  // map of service VMs we are maintaining.
    32  type serviceVMMapItem struct {
    33  	svm      *serviceVM // actual service vm object
    34  	refCount int        // refcount for VM
    35  }
    36  
    37  type serviceVM struct {
    38  	sync.Mutex                     // Serialises operations being performed in this service VM.
    39  	scratchAttached bool           // Has a scratch been attached?
    40  	config          *client.Config // Represents the service VM item.
    41  
    42  	// Indicates that the vm is started
    43  	startStatus chan interface{}
    44  	startError  error
    45  
    46  	// Indicates that the vm is stopped
    47  	stopStatus chan interface{}
    48  	stopError  error
    49  
    50  	attachedVHDs map[string]int // Map ref counting all the VHDS we've hot-added/hot-removed.
    51  	unionMounts  map[string]int // Map ref counting all the union filesystems we mounted.
    52  }
    53  
    54  // add will add an id to the service vm map. There are three cases:
    55  // 	- entry doesn't exist:
    56  // 		- add id to map and return a new vm that the caller can manually configure+start
    57  //	- entry does exist
    58  //  	- return vm in map and increment ref count
    59  //  - entry does exist but the ref count is 0
    60  //		- return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
    61  func (svmMap *serviceVMMap) add(id string) (svm *serviceVM, alreadyExists bool, err error) {
    62  	svmMap.Lock()
    63  	defer svmMap.Unlock()
    64  	if svm, ok := svmMap.svms[id]; ok {
    65  		if svm.refCount == 0 {
    66  			return svm.svm, true, errVMisTerminating
    67  		}
    68  		svm.refCount++
    69  		return svm.svm, true, nil
    70  	}
    71  
    72  	// Doesn't exist, so create an empty svm to put into map and return
    73  	newSVM := &serviceVM{
    74  		startStatus:  make(chan interface{}),
    75  		stopStatus:   make(chan interface{}),
    76  		attachedVHDs: make(map[string]int),
    77  		unionMounts:  make(map[string]int),
    78  		config:       &client.Config{},
    79  	}
    80  	svmMap.svms[id] = &serviceVMMapItem{
    81  		svm:      newSVM,
    82  		refCount: 1,
    83  	}
    84  	return newSVM, false, nil
    85  }
    86  
    87  // get will get the service vm from the map. There are three cases:
    88  // 	- entry doesn't exist:
    89  // 		- return errVMUnknown
    90  //	- entry does exist
    91  //  	- return vm with no error
    92  //  - entry does exist but the ref count is 0
    93  //		- return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
    94  func (svmMap *serviceVMMap) get(id string) (*serviceVM, error) {
    95  	svmMap.Lock()
    96  	defer svmMap.Unlock()
    97  	svm, ok := svmMap.svms[id]
    98  	if !ok {
    99  		return nil, errVMUnknown
   100  	}
   101  	if svm.refCount == 0 {
   102  		return svm.svm, errVMisTerminating
   103  	}
   104  	return svm.svm, nil
   105  }
   106  
   107  // decrementRefCount decrements the ref count of the given ID from the map. There are four cases:
   108  // 	- entry doesn't exist:
   109  // 		- return errVMUnknown
   110  //  - entry does exist but the ref count is 0
   111  //		- return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
   112  //	- entry does exist but ref count is 1
   113  //  	- return vm and set lastRef to true. The caller can then stop the vm, delete the id from this map
   114  //      - and execute svm.signalStopFinished to signal the threads that the svm has been terminated.
   115  //	- entry does exist and ref count > 1
   116  //		- just reduce ref count and return svm
   117  func (svmMap *serviceVMMap) decrementRefCount(id string) (_ *serviceVM, lastRef bool, _ error) {
   118  	svmMap.Lock()
   119  	defer svmMap.Unlock()
   120  
   121  	svm, ok := svmMap.svms[id]
   122  	if !ok {
   123  		return nil, false, errVMUnknown
   124  	}
   125  	if svm.refCount == 0 {
   126  		return svm.svm, false, errVMisTerminating
   127  	}
   128  	svm.refCount--
   129  	return svm.svm, svm.refCount == 0, nil
   130  }
   131  
   132  // setRefCountZero works the same way as decrementRefCount, but sets ref count to 0 instead of decrementing it.
   133  func (svmMap *serviceVMMap) setRefCountZero(id string) (*serviceVM, error) {
   134  	svmMap.Lock()
   135  	defer svmMap.Unlock()
   136  
   137  	svm, ok := svmMap.svms[id]
   138  	if !ok {
   139  		return nil, errVMUnknown
   140  	}
   141  	if svm.refCount == 0 {
   142  		return svm.svm, errVMisTerminating
   143  	}
   144  	svm.refCount = 0
   145  	return svm.svm, nil
   146  }
   147  
   148  // deleteID deletes the given ID from the map. If the refcount is not 0 or the
   149  // VM does not exist, then this function returns an error.
   150  func (svmMap *serviceVMMap) deleteID(id string) error {
   151  	svmMap.Lock()
   152  	defer svmMap.Unlock()
   153  	svm, ok := svmMap.svms[id]
   154  	if !ok {
   155  		return errVMUnknown
   156  	}
   157  	if svm.refCount != 0 {
   158  		return errVMStillHasReference
   159  	}
   160  	delete(svmMap.svms, id)
   161  	return nil
   162  }
   163  
   164  func (svm *serviceVM) signalStartFinished(err error) {
   165  	svm.Lock()
   166  	svm.startError = err
   167  	svm.Unlock()
   168  	close(svm.startStatus)
   169  }
   170  
   171  func (svm *serviceVM) getStartError() error {
   172  	<-svm.startStatus
   173  	svm.Lock()
   174  	defer svm.Unlock()
   175  	return svm.startError
   176  }
   177  
   178  func (svm *serviceVM) signalStopFinished(err error) {
   179  	svm.Lock()
   180  	svm.stopError = err
   181  	svm.Unlock()
   182  	close(svm.stopStatus)
   183  }
   184  
   185  func (svm *serviceVM) getStopError() error {
   186  	<-svm.stopStatus
   187  	svm.Lock()
   188  	defer svm.Unlock()
   189  	return svm.stopError
   190  }
   191  
   192  // hotAddVHDs waits for the service vm to start and then attaches the vhds.
   193  func (svm *serviceVM) hotAddVHDs(mvds ...hcsshim.MappedVirtualDisk) error {
   194  	if err := svm.getStartError(); err != nil {
   195  		return err
   196  	}
   197  	return svm.hotAddVHDsAtStart(mvds...)
   198  }
   199  
   200  // hotAddVHDsAtStart works the same way as hotAddVHDs but does not wait for the VM to start.
   201  func (svm *serviceVM) hotAddVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error {
   202  	svm.Lock()
   203  	defer svm.Unlock()
   204  	for i, mvd := range mvds {
   205  		if _, ok := svm.attachedVHDs[mvd.HostPath]; ok {
   206  			svm.attachedVHDs[mvd.HostPath]++
   207  			continue
   208  		}
   209  
   210  		if err := svm.config.HotAddVhd(mvd.HostPath, mvd.ContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil {
   211  			svm.hotRemoveVHDsNoLock(mvds[:i]...)
   212  			return err
   213  		}
   214  		svm.attachedVHDs[mvd.HostPath] = 1
   215  	}
   216  	return nil
   217  }
   218  
   219  // hotRemoveVHDs waits for the service vm to start and then removes the vhds.
   220  // The service VM must not be locked when calling this function.
   221  func (svm *serviceVM) hotRemoveVHDs(mvds ...hcsshim.MappedVirtualDisk) error {
   222  	if err := svm.getStartError(); err != nil {
   223  		return err
   224  	}
   225  	svm.Lock()
   226  	defer svm.Unlock()
   227  	return svm.hotRemoveVHDsNoLock(mvds...)
   228  }
   229  
   230  // hotRemoveVHDsNoLock removes VHDs from a service VM. When calling this function,
   231  // the contract is the service VM lock must be held.
   232  func (svm *serviceVM) hotRemoveVHDsNoLock(mvds ...hcsshim.MappedVirtualDisk) error {
   233  	var retErr error
   234  	for _, mvd := range mvds {
   235  		if _, ok := svm.attachedVHDs[mvd.HostPath]; !ok {
   236  			// We continue instead of returning an error if we try to hot remove a non-existent VHD.
   237  			// This is because one of the callers of the function is graphdriver.Put(). Since graphdriver.Get()
   238  			// defers the VM start to the first operation, it's possible that nothing have been hot-added
   239  			// when Put() is called. To avoid Put returning an error in that case, we simply continue if we
   240  			// don't find the vhd attached.
   241  			continue
   242  		}
   243  
   244  		if svm.attachedVHDs[mvd.HostPath] > 1 {
   245  			svm.attachedVHDs[mvd.HostPath]--
   246  			continue
   247  		}
   248  
   249  		// last VHD, so remove from VM and map
   250  		if err := svm.config.HotRemoveVhd(mvd.HostPath); err == nil {
   251  			delete(svm.attachedVHDs, mvd.HostPath)
   252  		} else {
   253  			// Take note of the error, but still continue to remove the other VHDs
   254  			logrus.Warnf("Failed to hot remove %s: %s", mvd.HostPath, err)
   255  			if retErr == nil {
   256  				retErr = err
   257  			}
   258  		}
   259  	}
   260  	return retErr
   261  }
   262  
   263  func (svm *serviceVM) createExt4VHDX(destFile string, sizeGB uint32, cacheFile string) error {
   264  	if err := svm.getStartError(); err != nil {
   265  		return err
   266  	}
   267  
   268  	svm.Lock()
   269  	defer svm.Unlock()
   270  	return svm.config.CreateExt4Vhdx(destFile, sizeGB, cacheFile)
   271  }
   272  
   273  func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedVirtualDisk) (err error) {
   274  	if len(mvds) == 0 {
   275  		return fmt.Errorf("createUnionMount: error must have at least 1 layer")
   276  	}
   277  
   278  	if err = svm.getStartError(); err != nil {
   279  		return err
   280  	}
   281  
   282  	svm.Lock()
   283  	defer svm.Unlock()
   284  	if _, ok := svm.unionMounts[mountName]; ok {
   285  		svm.unionMounts[mountName]++
   286  		return nil
   287  	}
   288  
   289  	var lowerLayers []string
   290  	if mvds[0].ReadOnly {
   291  		lowerLayers = append(lowerLayers, mvds[0].ContainerPath)
   292  	}
   293  
   294  	for i := 1; i < len(mvds); i++ {
   295  		lowerLayers = append(lowerLayers, mvds[i].ContainerPath)
   296  	}
   297  
   298  	logrus.Debugf("Doing the overlay mount with union directory=%s", mountName)
   299  	if err = svm.runProcess(fmt.Sprintf("mkdir -p %s", mountName), nil, nil, nil); err != nil {
   300  		return err
   301  	}
   302  
   303  	var cmd string
   304  	if len(mvds) == 1 {
   305  		// `FROM SCRATCH` case and the only layer. No overlay required.
   306  		cmd = fmt.Sprintf("mount %s %s", mvds[0].ContainerPath, mountName)
   307  	} else if mvds[0].ReadOnly {
   308  		// Readonly overlay
   309  		cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s %s",
   310  			strings.Join(lowerLayers, ","),
   311  			mountName)
   312  	} else {
   313  		upper := fmt.Sprintf("%s/upper", mvds[0].ContainerPath)
   314  		work := fmt.Sprintf("%s/work", mvds[0].ContainerPath)
   315  
   316  		if err = svm.runProcess(fmt.Sprintf("mkdir -p %s %s", upper, work), nil, nil, nil); err != nil {
   317  			return err
   318  		}
   319  
   320  		cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s,upperdir=%s,workdir=%s %s",
   321  			strings.Join(lowerLayers, ":"),
   322  			upper,
   323  			work,
   324  			mountName)
   325  	}
   326  
   327  	logrus.Debugf("createUnionMount: Executing mount=%s", cmd)
   328  	if err = svm.runProcess(cmd, nil, nil, nil); err != nil {
   329  		return err
   330  	}
   331  
   332  	svm.unionMounts[mountName] = 1
   333  	return nil
   334  }
   335  
   336  func (svm *serviceVM) deleteUnionMount(mountName string, disks ...hcsshim.MappedVirtualDisk) error {
   337  	if err := svm.getStartError(); err != nil {
   338  		return err
   339  	}
   340  
   341  	svm.Lock()
   342  	defer svm.Unlock()
   343  	if _, ok := svm.unionMounts[mountName]; !ok {
   344  		return nil
   345  	}
   346  
   347  	if svm.unionMounts[mountName] > 1 {
   348  		svm.unionMounts[mountName]--
   349  		return nil
   350  	}
   351  
   352  	logrus.Debugf("Removing union mount %s", mountName)
   353  	if err := svm.runProcess(fmt.Sprintf("umount %s", mountName), nil, nil, nil); err != nil {
   354  		return err
   355  	}
   356  
   357  	delete(svm.unionMounts, mountName)
   358  	return nil
   359  }
   360  
   361  func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
   362  	process, err := svm.config.RunProcess(command, stdin, stdout, stderr)
   363  	if err != nil {
   364  		return err
   365  	}
   366  	defer process.Close()
   367  
   368  	process.WaitTimeout(time.Duration(int(time.Second) * svm.config.UvmTimeoutSeconds))
   369  	exitCode, err := process.ExitCode()
   370  	if err != nil {
   371  		return err
   372  	}
   373  
   374  	if exitCode != 0 {
   375  		return fmt.Errorf("svm.runProcess: command %s failed with exit code %d", command, exitCode)
   376  	}
   377  	return nil
   378  }