github.com/Cloud-Foundations/Dominator@v0.3.4/hypervisor/manager/vm.go (about)

     1  package manager
     2  
     3  import (
     4  	"bytes"
     5  	"crypto/rand"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"net"
    10  	"net/http"
    11  	"os"
    12  	"os/exec"
    13  	"path/filepath"
    14  	"runtime"
    15  	"strings"
    16  	"syscall"
    17  	"time"
    18  
    19  	domlib "github.com/Cloud-Foundations/Dominator/dom/lib"
    20  	hyperclient "github.com/Cloud-Foundations/Dominator/hypervisor/client"
    21  	imclient "github.com/Cloud-Foundations/Dominator/imageserver/client"
    22  	"github.com/Cloud-Foundations/Dominator/lib/errors"
    23  	"github.com/Cloud-Foundations/Dominator/lib/filesystem"
    24  	"github.com/Cloud-Foundations/Dominator/lib/filesystem/scanner"
    25  	"github.com/Cloud-Foundations/Dominator/lib/filesystem/util"
    26  	"github.com/Cloud-Foundations/Dominator/lib/filter"
    27  	"github.com/Cloud-Foundations/Dominator/lib/format"
    28  	"github.com/Cloud-Foundations/Dominator/lib/fsutil"
    29  	"github.com/Cloud-Foundations/Dominator/lib/hash"
    30  	"github.com/Cloud-Foundations/Dominator/lib/image"
    31  	"github.com/Cloud-Foundations/Dominator/lib/json"
    32  	"github.com/Cloud-Foundations/Dominator/lib/lockwatcher"
    33  	"github.com/Cloud-Foundations/Dominator/lib/log"
    34  	"github.com/Cloud-Foundations/Dominator/lib/log/filelogger"
    35  	"github.com/Cloud-Foundations/Dominator/lib/log/prefixlogger"
    36  	"github.com/Cloud-Foundations/Dominator/lib/log/serverlogger"
    37  	"github.com/Cloud-Foundations/Dominator/lib/mbr"
    38  	libnet "github.com/Cloud-Foundations/Dominator/lib/net"
    39  	"github.com/Cloud-Foundations/Dominator/lib/objectcache"
    40  	"github.com/Cloud-Foundations/Dominator/lib/objectserver"
    41  	objclient "github.com/Cloud-Foundations/Dominator/lib/objectserver/client"
    42  	"github.com/Cloud-Foundations/Dominator/lib/rsync"
    43  	"github.com/Cloud-Foundations/Dominator/lib/srpc"
    44  	"github.com/Cloud-Foundations/Dominator/lib/stringutil"
    45  	"github.com/Cloud-Foundations/Dominator/lib/tags"
    46  	"github.com/Cloud-Foundations/Dominator/lib/tags/tagmatcher"
    47  	"github.com/Cloud-Foundations/Dominator/lib/verstr"
    48  	"github.com/Cloud-Foundations/Dominator/lib/wsyscall"
    49  	proto "github.com/Cloud-Foundations/Dominator/proto/hypervisor"
    50  	subproto "github.com/Cloud-Foundations/Dominator/proto/sub"
    51  	sublib "github.com/Cloud-Foundations/Dominator/sub/lib"
    52  )
    53  
    54  const (
    55  	bootlogFilename      = "bootlog"
    56  	lastPatchLogFilename = "lastPatchLog"
    57  	serialSockFilename   = "serial0.sock"
    58  
    59  	rebootJson = `{ "execute": "send-key",
    60       "arguments": { "keys": [ { "type": "qcode", "data": "ctrl" },
    61                                { "type": "qcode", "data": "alt" },
    62                                { "type": "qcode", "data": "delete" } ] } }
    63  `
    64  )
    65  
    66  var (
    67  	carriageReturnLiteral   = []byte{'\r'}
    68  	errorNoAccessToResource = errors.New("no access to resource")
    69  	newlineLiteral          = []byte{'\n'}
    70  	newlineReplacement      = []byte{'\\', 'n'}
    71  )
    72  
    73  func computeSize(minimumFreeBytes, roundupPower, size uint64) uint64 {
    74  	minBytes := size + size>>3 // 12% extra for good luck.
    75  	minBytes += minimumFreeBytes
    76  	if roundupPower < 24 {
    77  		roundupPower = 24 // 16 MiB.
    78  	}
    79  	imageUnits := minBytes >> roundupPower
    80  	if imageUnits<<roundupPower < minBytes {
    81  		imageUnits++
    82  	}
    83  	return imageUnits << roundupPower
    84  }
    85  
    86  func copyData(filename string, reader io.Reader, length uint64) error {
    87  	file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY,
    88  		fsutil.PrivateFilePerms)
    89  	if err != nil {
    90  		return err
    91  	}
    92  	defer file.Close()
    93  	if err := setVolumeSize(filename, length); err != nil {
    94  		return err
    95  	}
    96  	if reader == nil {
    97  		return nil
    98  	}
    99  	_, err = io.CopyN(file, reader, int64(length))
   100  	return err
   101  }
   102  
   103  func createTapDevice(bridge string) (*os.File, error) {
   104  	tapFile, tapName, err := libnet.CreateTapDevice()
   105  	if err != nil {
   106  		return nil, fmt.Errorf("error creating tap device: %s", err)
   107  	}
   108  	doAutoClose := true
   109  	defer func() {
   110  		if doAutoClose {
   111  			tapFile.Close()
   112  		}
   113  	}()
   114  	cmd := exec.Command("ip", "link", "set", tapName, "up")
   115  	if output, err := cmd.CombinedOutput(); err != nil {
   116  		return nil, fmt.Errorf("error upping: %s: %s", err, output)
   117  	}
   118  	cmd = exec.Command("ip", "link", "set", tapName, "master", bridge)
   119  	if output, err := cmd.CombinedOutput(); err != nil {
   120  		return nil, fmt.Errorf("error attaching: %s: %s", err, output)
   121  	}
   122  	doAutoClose = false
   123  	return tapFile, nil
   124  }
   125  
   126  func deleteFilesNotInImage(imgFS, vmFS *filesystem.FileSystem,
   127  	rootDir string, logger log.DebugLogger) error {
   128  	var totalBytes uint64
   129  	imgHashToInodesTable := imgFS.HashToInodesTable()
   130  	imgComputedFiles := make(map[string]struct{})
   131  	imgFS.ForEachFile(func(name string, inodeNumber uint64,
   132  		inode filesystem.GenericInode) error {
   133  		if _, ok := inode.(*filesystem.ComputedRegularInode); ok {
   134  			imgComputedFiles[name] = struct{}{}
   135  		}
   136  		return nil
   137  	})
   138  	for filename, inum := range vmFS.FilenameToInodeTable() {
   139  		if inode, ok := vmFS.InodeTable[inum].(*filesystem.RegularInode); ok {
   140  			if inode.Size < 1 {
   141  				continue
   142  			}
   143  			if _, isComputed := imgComputedFiles[filename]; isComputed {
   144  				continue
   145  			}
   146  			if _, inImage := imgHashToInodesTable[inode.Hash]; inImage {
   147  				continue
   148  			}
   149  			pathname := filepath.Join(rootDir, filename)
   150  			if err := os.Remove(pathname); err != nil {
   151  				return err
   152  			}
   153  			logger.Debugf(1, "pre-delete: %s\n", pathname)
   154  			totalBytes += inode.Size
   155  		}
   156  	}
   157  	logger.Debugf(0, "pre-delete: totalBytes: %s\n",
   158  		format.FormatBytes(totalBytes))
   159  	return nil
   160  }
   161  
   162  func extractKernel(volume proto.LocalVolume, extension string,
   163  	objectsGetter objectserver.ObjectsGetter, fs *filesystem.FileSystem,
   164  	bootInfo *util.BootInfoType) error {
   165  	dirent := bootInfo.KernelImageDirent
   166  	if dirent == nil {
   167  		return errors.New("no kernel image found")
   168  	}
   169  	inode, ok := dirent.Inode().(*filesystem.RegularInode)
   170  	if !ok {
   171  		return errors.New("kernel image is not a regular file")
   172  	}
   173  	inode.Size = 0
   174  	filename := filepath.Join(volume.DirectoryToCleanup, "kernel"+extension)
   175  	_, err := objectserver.LinkObject(filename, objectsGetter, inode.Hash)
   176  	if err != nil {
   177  		return err
   178  	}
   179  	dirent = bootInfo.InitrdImageDirent
   180  	if dirent != nil {
   181  		inode, ok := dirent.Inode().(*filesystem.RegularInode)
   182  		if !ok {
   183  			return errors.New("initrd image is not a regular file")
   184  		}
   185  		inode.Size = 0
   186  		filename := filepath.Join(volume.DirectoryToCleanup,
   187  			"initrd"+extension)
   188  		_, err = objectserver.LinkObject(filename, objectsGetter,
   189  			inode.Hash)
   190  		if err != nil {
   191  			return err
   192  		}
   193  	}
   194  	return nil
   195  }
   196  
   197  func maybeDrainAll(conn *srpc.Conn, request proto.CreateVmRequest) error {
   198  	if err := maybeDrainImage(conn, request.ImageDataSize); err != nil {
   199  		return err
   200  	}
   201  	if err := maybeDrainUserData(conn, request); err != nil {
   202  		return err
   203  	}
   204  	return nil
   205  }
   206  
   207  func maybeDrainImage(imageReader io.Reader, imageDataSize uint64) error {
   208  	if imageDataSize > 0 { // Drain data.
   209  		_, err := io.CopyN(ioutil.Discard, imageReader, int64(imageDataSize))
   210  		return err
   211  	}
   212  	return nil
   213  }
   214  
   215  func maybeDrainUserData(conn *srpc.Conn, request proto.CreateVmRequest) error {
   216  	if request.UserDataSize > 0 { // Drain data.
   217  		_, err := io.CopyN(ioutil.Discard, conn, int64(request.UserDataSize))
   218  		return err
   219  	}
   220  	return nil
   221  }
   222  
   223  // numSpecifiedVirtualCPUs calculates the number of virtual CPUs required for
   224  // the specified request. The request must be correct (i.e. sufficient vCPUs).
   225  func numSpecifiedVirtualCPUs(milliCPUs, vCPUs uint) uint {
   226  	nCpus := milliCPUs / 1000
   227  	if nCpus < 1 {
   228  		nCpus = 1
   229  	}
   230  	if nCpus*1000 < milliCPUs {
   231  		nCpus++
   232  	}
   233  	if nCpus < vCPUs {
   234  		nCpus = vCPUs
   235  	}
   236  	return nCpus
   237  }
   238  
   239  func readData(firstByte byte, moreBytes <-chan byte) []byte {
   240  	buffer := make([]byte, 1, len(moreBytes)+1)
   241  	buffer[0] = firstByte
   242  	for {
   243  		select {
   244  		case char, ok := <-moreBytes:
   245  			if !ok {
   246  				return buffer
   247  			}
   248  			buffer = append(buffer, char)
   249  		default:
   250  			return buffer
   251  		}
   252  	}
   253  }
   254  
   255  func readOne(objectsDir string, hashVal hash.Hash, length uint64,
   256  	reader io.Reader) error {
   257  	filename := filepath.Join(objectsDir, objectcache.HashToFilename(hashVal))
   258  	dirname := filepath.Dir(filename)
   259  	if err := os.MkdirAll(dirname, fsutil.DirPerms); err != nil {
   260  		return err
   261  	}
   262  	return fsutil.CopyToFile(filename, fsutil.PrivateFilePerms, reader, length)
   263  }
   264  
   265  // Returns bytes read up to a carriage return (which is discarded), and true if
   266  // the last byte was read, else false.
   267  func readUntilCarriageReturn(firstByte byte, moreBytes <-chan byte,
   268  	echo chan<- byte) ([]byte, bool) {
   269  	buffer := make([]byte, 1, len(moreBytes)+1)
   270  	buffer[0] = firstByte
   271  	echo <- firstByte
   272  	for char := range moreBytes {
   273  		echo <- char
   274  		if char == '\r' {
   275  			echo <- '\n'
   276  			return buffer, false
   277  		}
   278  		buffer = append(buffer, char)
   279  	}
   280  	return buffer, true
   281  }
   282  
   283  // removeFile will remove the specified filename. If the removal was successful
   284  // or the file does not exist, nil is returned, else an error is returned.
   285  func removeFile(filename string) error {
   286  	if err := os.Remove(filename); err != nil {
   287  		if !os.IsNotExist(err) {
   288  			return err
   289  		}
   290  	}
   291  	return nil
   292  }
   293  
   294  func setVolumeSize(filename string, size uint64) error {
   295  	if err := os.Truncate(filename, int64(size)); err != nil {
   296  		return err
   297  	}
   298  	return fsutil.Fallocate(filename, size)
   299  }
   300  
   301  func (m *Manager) acknowledgeVm(ipAddr net.IP,
   302  	authInfo *srpc.AuthInformation) error {
   303  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
   304  	if err != nil {
   305  		return err
   306  	}
   307  	defer vm.mutex.Unlock()
   308  	vm.destroyTimer.Stop()
   309  	return nil
   310  }
   311  
   312  func (m *Manager) addVmVolumes(ipAddr net.IP, authInfo *srpc.AuthInformation,
   313  	volumeSizes []uint64) error {
   314  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
   315  	if err != nil {
   316  		return err
   317  	}
   318  	defer vm.mutex.Unlock()
   319  	if vm.State != proto.StateStopped {
   320  		return errors.New("VM is not stopped")
   321  	}
   322  	volumes := make([]proto.Volume, 0, len(volumeSizes))
   323  	for _, size := range volumeSizes {
   324  		volumes = append(volumes, proto.Volume{Size: size})
   325  	}
   326  	volumeDirectories, err := vm.manager.getVolumeDirectories(0, 0, volumes,
   327  		vm.SpreadVolumes)
   328  	if err != nil {
   329  		return err
   330  	}
   331  	volumeLocations := make([]proto.LocalVolume, 0, len(volumes))
   332  	defer func() {
   333  		for _, volumeLocation := range volumeLocations {
   334  			os.Remove(volumeLocation.Filename)
   335  			os.Remove(volumeLocation.DirectoryToCleanup)
   336  		}
   337  	}()
   338  	for index, volumeDirectory := range volumeDirectories {
   339  		dirname := filepath.Join(volumeDirectory, vm.ipAddress)
   340  		filename := filepath.Join(dirname, indexToName(len(vm.Volumes)+index))
   341  		volumeLocation := proto.LocalVolume{
   342  			DirectoryToCleanup: dirname,
   343  			Filename:           filename,
   344  		}
   345  		if err := os.MkdirAll(dirname, fsutil.DirPerms); err != nil {
   346  			return err
   347  		}
   348  		cFlags := os.O_CREATE | os.O_EXCL | os.O_RDWR
   349  		file, err := os.OpenFile(filename, cFlags, fsutil.PrivateFilePerms)
   350  		if err != nil {
   351  			return err
   352  		} else {
   353  			file.Close()
   354  		}
   355  		if err := setVolumeSize(filename, volumeSizes[index]); err != nil {
   356  			return err
   357  		}
   358  		volumeLocations = append(volumeLocations, volumeLocation)
   359  	}
   360  	vm.VolumeLocations = append(vm.VolumeLocations, volumeLocations...)
   361  	volumeLocations = nil // Prevent cleanup. Thunderbirds are Go!
   362  	vm.Volumes = append(vm.Volumes, volumes...)
   363  	vm.writeAndSendInfo()
   364  	return nil
   365  }
   366  
   367  func (m *Manager) allocateVm(req proto.CreateVmRequest,
   368  	authInfo *srpc.AuthInformation) (*vmInfoType, error) {
   369  	dirname := filepath.Join(m.StateDir, "VMs")
   370  	if err := os.MkdirAll(dirname, fsutil.DirPerms); err != nil {
   371  		return nil, err
   372  	}
   373  	if err := req.ConsoleType.CheckValid(); err != nil {
   374  		return nil, err
   375  	}
   376  	if req.MemoryInMiB < 1 {
   377  		return nil, errors.New("no memory specified")
   378  	}
   379  	if req.MilliCPUs < 1 {
   380  		return nil, errors.New("no CPUs specified")
   381  	}
   382  	minimumCPUs := req.MilliCPUs / 1000
   383  	if req.VirtualCPUs > 0 && req.VirtualCPUs < minimumCPUs {
   384  		return nil, fmt.Errorf("VirtualCPUs must be at least %d", minimumCPUs)
   385  	}
   386  	subnetIDs := map[string]struct{}{req.SubnetId: {}}
   387  	for _, subnetId := range req.SecondarySubnetIDs {
   388  		if subnetId == "" {
   389  			return nil,
   390  				errors.New("cannot give unspecified secondary subnet ID")
   391  		}
   392  		if _, ok := subnetIDs[subnetId]; ok {
   393  			return nil,
   394  				fmt.Errorf("subnet: %s specified multiple times", subnetId)
   395  		}
   396  		subnetIDs[subnetId] = struct{}{}
   397  	}
   398  	address, subnetId, err := m.getFreeAddress(req.Address.IpAddress,
   399  		req.SubnetId, authInfo)
   400  	if err != nil {
   401  		return nil, err
   402  	}
   403  	addressesToFree := []proto.Address{address}
   404  	defer func() {
   405  		for _, address := range addressesToFree {
   406  			err := m.releaseAddressInPool(address)
   407  			if err != nil {
   408  				m.Logger.Println(err)
   409  			}
   410  		}
   411  	}()
   412  	var secondaryAddresses []proto.Address
   413  	for index, subnetId := range req.SecondarySubnetIDs {
   414  		var reqIpAddr net.IP
   415  		if index < len(req.SecondaryAddresses) {
   416  			reqIpAddr = req.SecondaryAddresses[index].IpAddress
   417  		}
   418  		secondaryAddress, _, err := m.getFreeAddress(reqIpAddr, subnetId,
   419  			authInfo)
   420  		if err != nil {
   421  			return nil, err
   422  		}
   423  		secondaryAddresses = append(secondaryAddresses, secondaryAddress)
   424  		addressesToFree = append(addressesToFree, secondaryAddress)
   425  	}
   426  	m.mutex.Lock()
   427  	defer m.mutex.Unlock()
   428  	if err := m.checkSufficientCPUWithLock(req.MilliCPUs); err != nil {
   429  		return nil, err
   430  	}
   431  	totalMemoryInMiB := getVmInfoMemoryInMiB(req.VmInfo)
   432  	err = m.checkSufficientMemoryWithLock(totalMemoryInMiB, nil)
   433  	if err != nil {
   434  		return nil, err
   435  	}
   436  	var ipAddress string
   437  	if len(address.IpAddress) < 1 {
   438  		ipAddress = "0.0.0.0"
   439  	} else {
   440  		ipAddress = address.IpAddress.String()
   441  	}
   442  	vm := &vmInfoType{
   443  		LocalVmInfo: proto.LocalVmInfo{
   444  			VmInfo: proto.VmInfo{
   445  				Address:            address,
   446  				CreatedOn:          time.Now(),
   447  				ConsoleType:        req.ConsoleType,
   448  				DestroyOnPowerdown: req.DestroyOnPowerdown,
   449  				DestroyProtection:  req.DestroyProtection,
   450  				DisableVirtIO:      req.DisableVirtIO,
   451  				ExtraKernelOptions: req.ExtraKernelOptions,
   452  				Hostname:           req.Hostname,
   453  				ImageName:          req.ImageName,
   454  				ImageURL:           req.ImageURL,
   455  				MemoryInMiB:        req.MemoryInMiB,
   456  				MilliCPUs:          req.MilliCPUs,
   457  				OwnerGroups:        req.OwnerGroups,
   458  				SpreadVolumes:      req.SpreadVolumes,
   459  				SecondaryAddresses: secondaryAddresses,
   460  				SecondarySubnetIDs: req.SecondarySubnetIDs,
   461  				State:              proto.StateStarting,
   462  				SubnetId:           subnetId,
   463  				Tags:               req.Tags,
   464  				VirtualCPUs:        req.VirtualCPUs,
   465  			},
   466  		},
   467  		manager:          m,
   468  		dirname:          filepath.Join(dirname, ipAddress),
   469  		ipAddress:        ipAddress,
   470  		logger:           prefixlogger.New(ipAddress+": ", m.Logger),
   471  		metadataChannels: make(map[chan<- string]struct{}),
   472  	}
   473  	m.vms[ipAddress] = vm
   474  	addressesToFree = nil
   475  	return vm, nil
   476  }
   477  
   478  func (m *Manager) becomePrimaryVmOwner(ipAddr net.IP,
   479  	authInfo *srpc.AuthInformation) error {
   480  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
   481  	if err != nil {
   482  		return err
   483  	}
   484  	defer vm.mutex.Unlock()
   485  	if vm.OwnerUsers[0] == authInfo.Username {
   486  		return errors.New("you already are the primary owner")
   487  	}
   488  	ownerUsers := make([]string, 1, len(vm.OwnerUsers))
   489  	ownerUsers[0] = authInfo.Username
   490  	ownerUsers = append(ownerUsers, vm.OwnerUsers...)
   491  	vm.OwnerUsers, vm.ownerUsers = stringutil.DeduplicateList(ownerUsers, false)
   492  	vm.writeAndSendInfo()
   493  	return nil
   494  }
   495  
   496  func (m *Manager) changeVmConsoleType(ipAddr net.IP,
   497  	authInfo *srpc.AuthInformation, consoleType proto.ConsoleType) error {
   498  	if err := consoleType.CheckValid(); err != nil {
   499  		return err
   500  	}
   501  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
   502  	if err != nil {
   503  		return err
   504  	}
   505  	defer vm.mutex.Unlock()
   506  	if vm.State != proto.StateStopped {
   507  		return errors.New("VM is not stopped")
   508  	}
   509  	vm.ConsoleType = consoleType
   510  	vm.writeAndSendInfo()
   511  	return nil
   512  }
   513  
   514  // changeVmCPUs returns true if the number of CPUs was changed.
   515  func (m *Manager) changeVmCPUs(vm *vmInfoType, req proto.ChangeVmSizeRequest) (
   516  	bool, error) {
   517  	if req.MilliCPUs < 1 {
   518  		req.MilliCPUs = vm.MilliCPUs
   519  	}
   520  	if req.VirtualCPUs < 1 {
   521  		req.VirtualCPUs = vm.VirtualCPUs
   522  	}
   523  	minimumCPUs := numSpecifiedVirtualCPUs(req.MilliCPUs, 0)
   524  	if req.VirtualCPUs > 0 && req.VirtualCPUs < minimumCPUs {
   525  		return false, fmt.Errorf("VirtualCPUs must be at least %d", minimumCPUs)
   526  	}
   527  	if req.MilliCPUs == vm.MilliCPUs && req.VirtualCPUs == vm.VirtualCPUs {
   528  		return false, nil
   529  	}
   530  	oldCPUs := numSpecifiedVirtualCPUs(vm.MilliCPUs, vm.VirtualCPUs)
   531  	newCPUs := numSpecifiedVirtualCPUs(req.MilliCPUs, req.VirtualCPUs)
   532  	if oldCPUs == newCPUs {
   533  		vm.MilliCPUs = req.MilliCPUs
   534  		vm.VirtualCPUs = req.VirtualCPUs
   535  		return true, nil
   536  	}
   537  	if vm.State != proto.StateStopped {
   538  		return false, errors.New("VM is not stopped")
   539  	}
   540  	if newCPUs <= oldCPUs {
   541  		vm.MilliCPUs = req.MilliCPUs
   542  		vm.VirtualCPUs = req.VirtualCPUs
   543  		return true, nil
   544  	}
   545  	m.mutex.Lock()
   546  	defer m.mutex.Unlock()
   547  	err := m.checkSufficientCPUWithLock(req.MilliCPUs - vm.MilliCPUs)
   548  	if err != nil {
   549  		return false, err
   550  	}
   551  	vm.MilliCPUs = req.MilliCPUs
   552  	vm.VirtualCPUs = req.VirtualCPUs
   553  	return true, nil
   554  }
   555  
   556  func (m *Manager) changeVmDestroyProtection(ipAddr net.IP,
   557  	authInfo *srpc.AuthInformation, destroyProtection bool) error {
   558  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
   559  	if err != nil {
   560  		return err
   561  	}
   562  	defer vm.mutex.Unlock()
   563  	vm.DestroyProtection = destroyProtection
   564  	vm.writeAndSendInfo()
   565  	return nil
   566  }
   567  
   568  // changeVmMemory returns true if the memory size was changed.
   569  func (m *Manager) changeVmMemory(vm *vmInfoType,
   570  	memoryInMiB uint64) (bool, error) {
   571  	if memoryInMiB == vm.MemoryInMiB {
   572  		return false, nil
   573  	}
   574  	if vm.State != proto.StateStopped {
   575  		return false, errors.New("VM is not stopped")
   576  	}
   577  	changed := false
   578  	if memoryInMiB < vm.MemoryInMiB {
   579  		vm.MemoryInMiB = memoryInMiB
   580  		changed = true
   581  	} else if memoryInMiB > vm.MemoryInMiB {
   582  		m.mutex.Lock()
   583  		err := m.checkSufficientMemoryWithLock(memoryInMiB-vm.MemoryInMiB, vm)
   584  		if err == nil {
   585  			vm.MemoryInMiB = memoryInMiB
   586  			changed = true
   587  		}
   588  		m.mutex.Unlock()
   589  		if err != nil {
   590  			return changed, err
   591  		}
   592  	}
   593  	return changed, nil
   594  }
   595  
   596  func (m *Manager) changeVmOwnerUsers(ipAddr net.IP,
   597  	authInfo *srpc.AuthInformation, extraUsers []string) error {
   598  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
   599  	if err != nil {
   600  		return err
   601  	}
   602  	defer vm.mutex.Unlock()
   603  	ownerUsers := make([]string, 1, len(extraUsers)+1)
   604  	ownerUsers[0] = vm.OwnerUsers[0]
   605  	ownerUsers = append(ownerUsers, extraUsers...)
   606  	vm.OwnerUsers, vm.ownerUsers = stringutil.DeduplicateList(ownerUsers, false)
   607  	vm.writeAndSendInfo()
   608  	return nil
   609  }
   610  
   611  func (m *Manager) changeVmSize(authInfo *srpc.AuthInformation,
   612  	req proto.ChangeVmSizeRequest) error {
   613  	vm, err := m.getVmLockAndAuth(req.IpAddress, true, authInfo, nil)
   614  	if err != nil {
   615  		return err
   616  	}
   617  	defer vm.mutex.Unlock()
   618  	changed := false
   619  	if req.MemoryInMiB > 0 {
   620  		if _changed, e := m.changeVmMemory(vm, req.MemoryInMiB); e != nil {
   621  			err = e
   622  		} else if _changed {
   623  			changed = true
   624  		}
   625  	}
   626  	if (req.MilliCPUs > 0 || req.VirtualCPUs > 0) && err == nil {
   627  		if _changed, _err := m.changeVmCPUs(vm, req); _err != nil {
   628  			err = _err
   629  		} else if _changed {
   630  			changed = true
   631  		}
   632  	}
   633  	if changed {
   634  		vm.writeAndSendInfo()
   635  	}
   636  	return err
   637  }
   638  
   639  func (m *Manager) changeVmTags(ipAddr net.IP, authInfo *srpc.AuthInformation,
   640  	tgs tags.Tags) error {
   641  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
   642  	if err != nil {
   643  		return err
   644  	}
   645  	defer vm.mutex.Unlock()
   646  	vm.Tags = tgs
   647  	vm.writeAndSendInfo()
   648  	return nil
   649  }
   650  
   651  func (m *Manager) changeVmVolumeSize(ipAddr net.IP,
   652  	authInfo *srpc.AuthInformation, index uint, size uint64) error {
   653  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
   654  	if err != nil {
   655  		return err
   656  	}
   657  	defer vm.mutex.Unlock()
   658  	if index >= uint(len(vm.Volumes)) {
   659  		return errors.New("invalid volume index")
   660  	}
   661  	volume := vm.Volumes[index]
   662  	if volume.Format != proto.VolumeFormatRaw {
   663  		return errors.New("cannot resize non-RAW volumes")
   664  	}
   665  	localVolume := vm.VolumeLocations[index]
   666  	if size == volume.Size {
   667  		return nil
   668  	}
   669  	if vm.State != proto.StateStopped {
   670  		return errors.New("VM is not stopped")
   671  	}
   672  	if size < volume.Size {
   673  		if err := shrink2fs(localVolume.Filename, size, vm.logger); err != nil {
   674  			return err
   675  		}
   676  		if err := setVolumeSize(localVolume.Filename, size); err != nil {
   677  			return err
   678  		}
   679  		vm.Volumes[index].Size = size
   680  		vm.writeAndSendInfo()
   681  		return nil
   682  	}
   683  	var statbuf syscall.Statfs_t
   684  	if err := syscall.Statfs(localVolume.Filename, &statbuf); err != nil {
   685  		return err
   686  	}
   687  	if size-volume.Size > uint64(statbuf.Bavail*uint64(statbuf.Bsize)) {
   688  		return errors.New("not enough free space")
   689  	}
   690  	if err := setVolumeSize(localVolume.Filename, size); err != nil {
   691  		return err
   692  	}
   693  	vm.Volumes[index].Size = size
   694  	vm.writeAndSendInfo()
   695  	// Try and grow an ext{2,3,4} file-system. If this fails, return the error
   696  	// to the caller, but the volume will have been expanded. Someone else can
   697  	// deal with adjusting partitions and growing file-systems.
   698  	return grow2fs(localVolume.Filename, vm.logger)
   699  }
   700  
   701  func (m *Manager) checkVmHasHealthAgent(ipAddr net.IP) (bool, error) {
   702  	vm, err := m.getVmAndLock(ipAddr, false)
   703  	if err != nil {
   704  		return false, err
   705  	}
   706  	defer vm.mutex.RUnlock()
   707  	if vm.State != proto.StateRunning {
   708  		return false, nil
   709  	}
   710  	return vm.hasHealthAgent, nil
   711  }
   712  
   713  func (m *Manager) commitImportedVm(ipAddr net.IP,
   714  	authInfo *srpc.AuthInformation) error {
   715  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
   716  	if err != nil {
   717  		return err
   718  	}
   719  	defer vm.mutex.Unlock()
   720  	if !vm.Uncommitted {
   721  		return fmt.Errorf("%s is already committed", ipAddr)
   722  	}
   723  	if err := m.registerAddress(vm.Address); err != nil {
   724  		return err
   725  	}
   726  	vm.Uncommitted = false
   727  	vm.writeAndSendInfo()
   728  	return nil
   729  }
   730  
   731  func (m *Manager) connectToVmConsole(ipAddr net.IP,
   732  	authInfo *srpc.AuthInformation) (net.Conn, error) {
   733  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
   734  	if err != nil {
   735  		return nil, err
   736  	}
   737  	defer vm.mutex.Unlock()
   738  	if vm.State != proto.StateRunning {
   739  		return nil, errors.New("VM is not running")
   740  	}
   741  	if vm.ConsoleType != proto.ConsoleVNC {
   742  		return nil, errors.New("VNC console is not enabled")
   743  	}
   744  	console, err := net.Dial("unix", filepath.Join(vm.dirname, "vnc"))
   745  	if err != nil {
   746  		return nil, err
   747  	}
   748  	return console, nil
   749  }
   750  
   751  func (m *Manager) connectToVmManager(ipAddr net.IP) (
   752  	chan<- byte, <-chan byte, error) {
   753  	input := make(chan byte, 256)
   754  	vm, err := m.getVmAndLock(ipAddr, true)
   755  	if err != nil {
   756  		return nil, nil, err
   757  	}
   758  	defer vm.mutex.Unlock()
   759  	if vm.State != proto.StateRunning {
   760  		return nil, nil, errors.New("VM is not running")
   761  	}
   762  	commandInput := vm.commandInput
   763  	if commandInput == nil {
   764  		return nil, nil, errors.New("no commandInput for VM")
   765  	}
   766  	// Drain any previous output.
   767  	for keepReading := true; keepReading; {
   768  		select {
   769  		case <-vm.commandOutput:
   770  		default:
   771  			keepReading = false
   772  			break
   773  		}
   774  	}
   775  	go func(input <-chan byte, output chan<- string) {
   776  		for char := range input {
   777  			if char == '\r' {
   778  				continue
   779  			}
   780  			buffer, gotLast := readUntilCarriageReturn(char, input,
   781  				vm.commandOutput)
   782  			output <- "\\" + string(buffer)
   783  			if gotLast {
   784  				break
   785  			}
   786  		}
   787  		vm.logger.Debugln(0, "input channel for manager closed")
   788  	}(input, vm.commandInput)
   789  	return input, vm.commandOutput, nil
   790  }
   791  
   792  func (m *Manager) connectToVmSerialPort(ipAddr net.IP,
   793  	authInfo *srpc.AuthInformation,
   794  	portNumber uint) (chan<- byte, <-chan byte, error) {
   795  	if portNumber > 0 {
   796  		return nil, nil, errors.New("only one serial port is supported")
   797  	}
   798  	input := make(chan byte, 256)
   799  	output := make(chan byte, 16<<10)
   800  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
   801  	if err != nil {
   802  		return nil, nil, err
   803  	}
   804  	defer vm.mutex.Unlock()
   805  	if vm.State != proto.StateRunning {
   806  		return nil, nil, errors.New("VM is not running")
   807  	}
   808  	serialInput := vm.serialInput
   809  	if serialInput == nil {
   810  		return nil, nil, errors.New("no serial input device for VM")
   811  	}
   812  	if vm.serialOutput != nil {
   813  		return nil, nil, errors.New("VM already has a serial port connection")
   814  	}
   815  	vm.serialOutput = output
   816  	go func(input <-chan byte, output chan<- byte) {
   817  		for char := range input {
   818  			buffer := readData(char, input)
   819  			if _, err := serialInput.Write(buffer); err != nil {
   820  				vm.logger.Printf("error writing to serial port: %s\n", err)
   821  				break
   822  			}
   823  		}
   824  		vm.logger.Debugln(0, "input channel for console closed")
   825  		vm.mutex.Lock()
   826  		if vm.serialOutput != nil {
   827  			close(vm.serialOutput)
   828  			vm.serialOutput = nil
   829  		}
   830  		vm.mutex.Unlock()
   831  	}(input, output)
   832  	return input, output, nil
   833  }
   834  
   835  func (m *Manager) copyVm(conn *srpc.Conn, request proto.CopyVmRequest) error {
   836  	m.Logger.Debugf(1, "CopyVm(%s) starting\n", conn.Username())
   837  	hypervisor, err := srpc.DialHTTP("tcp", request.SourceHypervisor, 0)
   838  	if err != nil {
   839  		return err
   840  	}
   841  	defer hypervisor.Close()
   842  	defer func() {
   843  		req := proto.DiscardVmAccessTokenRequest{
   844  			AccessToken: request.AccessToken,
   845  			IpAddress:   request.IpAddress}
   846  		var reply proto.DiscardVmAccessTokenResponse
   847  		hypervisor.RequestReply("Hypervisor.DiscardVmAccessToken",
   848  			req, &reply)
   849  	}()
   850  	getInfoRequest := proto.GetVmInfoRequest{request.IpAddress}
   851  	var getInfoReply proto.GetVmInfoResponse
   852  	err = hypervisor.RequestReply("Hypervisor.GetVmInfo", getInfoRequest,
   853  		&getInfoReply)
   854  	if err != nil {
   855  		return err
   856  	}
   857  	switch getInfoReply.VmInfo.State {
   858  	case proto.StateStopped, proto.StateRunning:
   859  	default:
   860  		return errors.New("VM is not stopped or running")
   861  	}
   862  	accessToken := request.AccessToken
   863  	ownerUsers := make([]string, 1, len(request.OwnerUsers)+1)
   864  	ownerUsers[0] = conn.Username()
   865  	if ownerUsers[0] == "" {
   866  		return errors.New("no authentication data")
   867  	}
   868  	ownerUsers = append(ownerUsers, request.OwnerUsers...)
   869  	vmInfo := request.VmInfo
   870  	vmInfo.Address = proto.Address{}
   871  	vmInfo.SecondaryAddresses = nil
   872  	vmInfo.Uncommitted = false
   873  	vmInfo.Volumes = getInfoReply.VmInfo.Volumes
   874  	vm, err := m.allocateVm(proto.CreateVmRequest{VmInfo: vmInfo},
   875  		conn.GetAuthInformation())
   876  	if err != nil {
   877  		return err
   878  	}
   879  	defer func() { // Evaluate vm at return time, not defer time.
   880  		vm.cleanup()
   881  	}()
   882  	vm.OwnerUsers, vm.ownerUsers = stringutil.DeduplicateList(ownerUsers, false)
   883  	vm.Volumes = vmInfo.Volumes
   884  	if !request.SkipMemoryCheck {
   885  		if err := <-tryAllocateMemory(vmInfo.MemoryInMiB); err != nil {
   886  			return err
   887  		}
   888  	}
   889  	var secondaryVolumes []proto.Volume
   890  	for index, volume := range vmInfo.Volumes {
   891  		if index > 0 {
   892  			secondaryVolumes = append(secondaryVolumes, volume)
   893  		}
   894  	}
   895  	err = vm.setupVolumes(vmInfo.Volumes[0].Size, vmInfo.Volumes[0].Type,
   896  		secondaryVolumes, vmInfo.SpreadVolumes)
   897  	if err != nil {
   898  		return err
   899  	}
   900  	if err := os.Mkdir(vm.dirname, fsutil.DirPerms); err != nil {
   901  		return err
   902  	}
   903  	// Begin copying over the volumes.
   904  	err = sendVmCopyMessage(conn, "initial volume(s) copy")
   905  	if err != nil {
   906  		return err
   907  	}
   908  	err = vm.migrateVmVolumes(hypervisor, request.IpAddress, accessToken, true)
   909  	if err != nil {
   910  		return err
   911  	}
   912  	if getInfoReply.VmInfo.State != proto.StateStopped {
   913  		err = sendVmCopyMessage(conn, "stopping VM")
   914  		if err != nil {
   915  			return err
   916  		}
   917  		err := hyperclient.StopVm(hypervisor, request.IpAddress,
   918  			request.AccessToken)
   919  		if err != nil {
   920  			return err
   921  		}
   922  		defer hyperclient.StartVm(hypervisor, request.IpAddress, accessToken)
   923  		err = sendVmCopyMessage(conn, "update volume(s)")
   924  		if err != nil {
   925  			return err
   926  		}
   927  		err = vm.migrateVmVolumes(hypervisor, request.IpAddress, accessToken,
   928  			false)
   929  		if err != nil {
   930  			return err
   931  		}
   932  	}
   933  	err = migratevmUserData(hypervisor,
   934  		filepath.Join(vm.dirname, UserDataFile),
   935  		request.IpAddress, accessToken)
   936  	if err != nil {
   937  		return err
   938  	}
   939  	vm.setState(proto.StateStopped)
   940  	vm.destroyTimer = time.AfterFunc(time.Second*15, vm.autoDestroy)
   941  	response := proto.CopyVmResponse{
   942  		Final:     true,
   943  		IpAddress: vm.Address.IpAddress,
   944  	}
   945  	if err := conn.Encode(response); err != nil {
   946  		return err
   947  	}
   948  	vm.setupLockWatcher()
   949  	vm = nil // Cancel cleanup.
   950  	m.Logger.Debugln(1, "CopyVm() finished")
   951  	return nil
   952  }
   953  
   954  func (m *Manager) createVm(conn *srpc.Conn) error {
   955  
   956  	sendError := func(conn *srpc.Conn, err error) error {
   957  		m.Logger.Debugf(1, "CreateVm(%s) failed: %s\n", conn.Username(), err)
   958  		return conn.Encode(proto.CreateVmResponse{Error: err.Error()})
   959  	}
   960  
   961  	var ipAddressToSend net.IP
   962  	sendUpdate := func(conn *srpc.Conn, message string) error {
   963  		response := proto.CreateVmResponse{
   964  			IpAddress:       ipAddressToSend,
   965  			ProgressMessage: message,
   966  		}
   967  		if err := conn.Encode(response); err != nil {
   968  			return err
   969  		}
   970  		return conn.Flush()
   971  	}
   972  
   973  	m.Logger.Debugf(1, "CreateVm(%s) starting\n", conn.Username())
   974  	var request proto.CreateVmRequest
   975  	if err := conn.Decode(&request); err != nil {
   976  		return err
   977  	}
   978  	if m.disabled {
   979  		if err := maybeDrainAll(conn, request); err != nil {
   980  			return err
   981  		}
   982  		return sendError(conn, errors.New("Hypervisor is disabled"))
   983  	}
   984  	ownerUsers := make([]string, 1, len(request.OwnerUsers)+1)
   985  	ownerUsers[0] = conn.Username()
   986  	if ownerUsers[0] == "" {
   987  		if err := maybeDrainAll(conn, request); err != nil {
   988  			return err
   989  		}
   990  		return sendError(conn, errors.New("no authentication data"))
   991  	}
   992  	ownerUsers = append(ownerUsers, request.OwnerUsers...)
   993  	var identityExpires time.Time
   994  	var identityName string
   995  	if len(request.IdentityCertificate) > 0 && len(request.IdentityKey) > 0 {
   996  		var err error
   997  		identityName, identityExpires, err = validateIdentityKeyPair(
   998  			request.IdentityCertificate, request.IdentityKey, ownerUsers[0])
   999  		if err != nil {
  1000  			if err := maybeDrainAll(conn, request); err != nil {
  1001  				return err
  1002  			}
  1003  			return sendError(conn, err)
  1004  		}
  1005  	}
  1006  	vm, err := m.allocateVm(request, conn.GetAuthInformation())
  1007  	if err != nil {
  1008  		if err := maybeDrainAll(conn, request); err != nil {
  1009  			return err
  1010  		}
  1011  		return sendError(conn, err)
  1012  	}
  1013  	defer func() {
  1014  		vm.cleanup() // Evaluate vm at return time, not defer time.
  1015  	}()
  1016  	vm.IdentityExpires = identityExpires
  1017  	vm.IdentityName = identityName
  1018  	var memoryError <-chan error
  1019  	if !request.SkipMemoryCheck {
  1020  		memoryError = tryAllocateMemory(getVmInfoMemoryInMiB(request.VmInfo))
  1021  	}
  1022  	vm.OwnerUsers, vm.ownerUsers = stringutil.DeduplicateList(ownerUsers, false)
  1023  	if err := os.Mkdir(vm.dirname, fsutil.DirPerms); err != nil {
  1024  		if err := maybeDrainAll(conn, request); err != nil {
  1025  			return err
  1026  		}
  1027  		return sendError(conn, err)
  1028  	}
  1029  	err = writeKeyPair(request.IdentityCertificate, request.IdentityKey,
  1030  		filepath.Join(vm.dirname, IdentityCertFile),
  1031  		filepath.Join(vm.dirname, IdentityKeyFile))
  1032  	if err != nil {
  1033  		if err := maybeDrainAll(conn, request); err != nil {
  1034  			return err
  1035  		}
  1036  		return sendError(conn, err)
  1037  	}
  1038  	var rootVolumeType proto.VolumeType
  1039  	if len(request.Volumes) > 0 {
  1040  		rootVolumeType = request.Volumes[0].Type
  1041  	}
  1042  	if request.ImageName != "" {
  1043  		if err := maybeDrainImage(conn, request.ImageDataSize); err != nil {
  1044  			return err
  1045  		}
  1046  		if err := sendUpdate(conn, "getting image"); err != nil {
  1047  			return err
  1048  		}
  1049  		client, img, imageName, err := m.getImage(request.ImageName,
  1050  			request.ImageTimeout)
  1051  		if err != nil {
  1052  			return sendError(conn, err)
  1053  		}
  1054  		defer client.Close()
  1055  		fs := img.FileSystem
  1056  		vm.ImageName = imageName
  1057  		size := computeSize(request.MinimumFreeBytes, request.RoundupPower,
  1058  			fs.EstimateUsage(0))
  1059  		err = vm.setupVolumes(size, rootVolumeType, request.SecondaryVolumes,
  1060  			request.SpreadVolumes)
  1061  		if err != nil {
  1062  			return sendError(conn, err)
  1063  		}
  1064  		if err := sendUpdate(conn, "unpacking image: "+imageName); err != nil {
  1065  			return err
  1066  		}
  1067  		writeRawOptions := util.WriteRawOptions{
  1068  			ExtraKernelOptions: request.ExtraKernelOptions,
  1069  			InitialImageName:   imageName,
  1070  			MinimumFreeBytes:   request.MinimumFreeBytes,
  1071  			OverlayDirectories: request.OverlayDirectories,
  1072  			OverlayFiles:       request.OverlayFiles,
  1073  			RootLabel:          vm.rootLabel(false),
  1074  			RoundupPower:       request.RoundupPower,
  1075  		}
  1076  		err = m.writeRaw(vm.VolumeLocations[0], "", client, fs, writeRawOptions,
  1077  			request.SkipBootloader)
  1078  		if err != nil {
  1079  			return sendError(conn, err)
  1080  		}
  1081  		if fi, err := os.Stat(vm.VolumeLocations[0].Filename); err != nil {
  1082  			return sendError(conn, err)
  1083  		} else {
  1084  			vm.Volumes = []proto.Volume{{Size: uint64(fi.Size())}}
  1085  		}
  1086  	} else if request.ImageDataSize > 0 {
  1087  		err := vm.copyRootVolume(request, conn, request.ImageDataSize,
  1088  			rootVolumeType)
  1089  		if err != nil {
  1090  			return err
  1091  		}
  1092  	} else if request.ImageURL != "" {
  1093  		if err := maybeDrainImage(conn, request.ImageDataSize); err != nil {
  1094  			return err
  1095  		}
  1096  		httpResponse, err := http.Get(request.ImageURL)
  1097  		if err != nil {
  1098  			return sendError(conn, err)
  1099  		}
  1100  		defer httpResponse.Body.Close()
  1101  		if httpResponse.StatusCode != http.StatusOK {
  1102  			return sendError(conn, errors.New(httpResponse.Status))
  1103  		}
  1104  		if httpResponse.ContentLength < 0 {
  1105  			return sendError(conn,
  1106  				errors.New("ContentLength from: "+request.ImageURL))
  1107  		}
  1108  		err = vm.copyRootVolume(request, httpResponse.Body,
  1109  			uint64(httpResponse.ContentLength), rootVolumeType)
  1110  		if err != nil {
  1111  			return sendError(conn, err)
  1112  		}
  1113  	} else if request.MinimumFreeBytes > 0 { // Create empty root volume.
  1114  		err = vm.copyRootVolume(request, nil, request.MinimumFreeBytes,
  1115  			rootVolumeType)
  1116  		if err != nil {
  1117  			return sendError(conn, err)
  1118  		}
  1119  	} else {
  1120  		return sendError(conn, errors.New("no image specified"))
  1121  	}
  1122  	vm.Volumes[0].Type = rootVolumeType
  1123  	if request.UserDataSize > 0 {
  1124  		filename := filepath.Join(vm.dirname, UserDataFile)
  1125  		if err := copyData(filename, conn, request.UserDataSize); err != nil {
  1126  			return sendError(conn, err)
  1127  		}
  1128  	}
  1129  	if len(request.SecondaryVolumes) > 0 {
  1130  		err := sendUpdate(conn, "creating secondary volumes")
  1131  		if err != nil {
  1132  			return err
  1133  		}
  1134  		for index, volume := range request.SecondaryVolumes {
  1135  			fname := vm.VolumeLocations[index+1].Filename
  1136  			var dataReader io.Reader
  1137  			if request.SecondaryVolumesData {
  1138  				dataReader = conn
  1139  			}
  1140  			if err := copyData(fname, dataReader, volume.Size); err != nil {
  1141  				return sendError(conn, err)
  1142  			}
  1143  			if dataReader == nil && index < len(request.SecondaryVolumesInit) {
  1144  				vinit := request.SecondaryVolumesInit[index]
  1145  				err := util.MakeExt4fsWithParams(fname, util.MakeExt4fsParams{
  1146  					BytesPerInode:            vinit.BytesPerInode,
  1147  					Label:                    vinit.Label,
  1148  					ReservedBlocksPercentage: vinit.ReservedBlocksPercentage,
  1149  					Size:                     volume.Size,
  1150  				},
  1151  					vm.logger)
  1152  				if err != nil {
  1153  					return sendError(conn, err)
  1154  				}
  1155  				if err := setVolumeSize(fname, volume.Size); err != nil {
  1156  					return err
  1157  				}
  1158  			}
  1159  			vm.Volumes = append(vm.Volumes, volume)
  1160  		}
  1161  	}
  1162  	if memoryError != nil {
  1163  		if len(memoryError) < 1 {
  1164  			msg := "waiting for test memory allocation"
  1165  			sendUpdate(conn, msg)
  1166  			vm.logger.Debugln(0, msg)
  1167  		}
  1168  		if err := <-memoryError; err != nil {
  1169  			return sendError(conn, err)
  1170  		}
  1171  	}
  1172  	var dhcpTimedOut bool
  1173  	if request.DoNotStart {
  1174  		vm.setState(proto.StateStopped)
  1175  	} else {
  1176  		if vm.ipAddress == "" {
  1177  			ipAddressToSend = net.ParseIP(vm.ipAddress)
  1178  			if err := sendUpdate(conn, "starting VM"); err != nil {
  1179  				return err
  1180  			}
  1181  		} else {
  1182  			ipAddressToSend = net.ParseIP(vm.ipAddress)
  1183  			if err := sendUpdate(conn, "starting VM "+vm.ipAddress); err != nil {
  1184  				return err
  1185  			}
  1186  		}
  1187  		dhcpTimedOut, err = vm.startManaging(request.DhcpTimeout,
  1188  			request.EnableNetboot, false)
  1189  		if err != nil {
  1190  			return sendError(conn, err)
  1191  		}
  1192  	}
  1193  	vm.destroyTimer = time.AfterFunc(time.Second*15, vm.autoDestroy)
  1194  	response := proto.CreateVmResponse{
  1195  		DhcpTimedOut: dhcpTimedOut,
  1196  		Final:        true,
  1197  		IpAddress:    net.ParseIP(vm.ipAddress),
  1198  	}
  1199  	if err := conn.Encode(response); err != nil {
  1200  		return err
  1201  	}
  1202  	vm.setupLockWatcher()
  1203  	m.Logger.Debugf(1, "CreateVm(%s) finished, IP=%s\n",
  1204  		conn.Username(), vm.ipAddress)
  1205  	vm = nil // Cancel cleanup.
  1206  	return nil
  1207  }
  1208  
  1209  func (m *Manager) debugVmImage(conn *srpc.Conn,
  1210  	authInfo *srpc.AuthInformation) error {
  1211  
  1212  	sendError := func(conn *srpc.Conn, err error) error {
  1213  		return conn.Encode(proto.DebugVmImageResponse{Error: err.Error()})
  1214  	}
  1215  
  1216  	sendUpdate := func(conn *srpc.Conn, message string) error {
  1217  		response := proto.DebugVmImageResponse{
  1218  			ProgressMessage: message,
  1219  		}
  1220  		if err := conn.Encode(response); err != nil {
  1221  			return err
  1222  		}
  1223  		return conn.Flush()
  1224  	}
  1225  
  1226  	var request proto.DebugVmImageRequest
  1227  	if err := conn.Decode(&request); err != nil {
  1228  		return err
  1229  	}
  1230  	m.Logger.Debugf(1, "DebugVmImage(%s) starting\n", request.IpAddress)
  1231  	vm, err := m.getVmLockAndAuth(request.IpAddress, true, authInfo, nil)
  1232  	if err != nil {
  1233  		if err := maybeDrainImage(conn, request.ImageDataSize); err != nil {
  1234  			return err
  1235  		}
  1236  		return sendError(conn, err)
  1237  	}
  1238  	vm.blockMutations = true
  1239  	switch vm.State {
  1240  	case proto.StateStopped:
  1241  	case proto.StateRunning:
  1242  		if len(vm.Address.IpAddress) < 1 {
  1243  			err = errors.New("cannot stop VM with externally managed lease")
  1244  		}
  1245  	default:
  1246  		err = errors.New("VM is not running or stopped")
  1247  	}
  1248  	if err != nil {
  1249  		vm.allowMutationsAndUnlock()
  1250  		if err := maybeDrainImage(conn, request.ImageDataSize); err != nil {
  1251  			return err
  1252  		}
  1253  		return sendError(conn, err)
  1254  	}
  1255  	rootFilename := vm.VolumeLocations[0].Filename + ".debug"
  1256  	vm.mutex.Unlock()
  1257  	haveLock := false
  1258  	doCleanup := true
  1259  	defer func() {
  1260  		if !haveLock {
  1261  			vm.mutex.Lock()
  1262  		}
  1263  		if doCleanup {
  1264  			os.Remove(rootFilename)
  1265  		}
  1266  		vm.allowMutationsAndUnlock()
  1267  	}()
  1268  	if request.ImageName != "" {
  1269  		if err := maybeDrainImage(conn, request.ImageDataSize); err != nil {
  1270  			return sendError(conn, err)
  1271  		}
  1272  		if err := sendUpdate(conn, "getting image"); err != nil {
  1273  			return sendError(conn, err)
  1274  		}
  1275  		client, img, imageName, err := m.getImage(request.ImageName,
  1276  			request.ImageTimeout)
  1277  		if err != nil {
  1278  			return sendError(conn, err)
  1279  		}
  1280  		defer client.Close()
  1281  		fs := img.FileSystem
  1282  		if err := sendUpdate(conn, "unpacking image: "+imageName); err != nil {
  1283  			return err
  1284  		}
  1285  		writeRawOptions := util.WriteRawOptions{
  1286  			InitialImageName: imageName,
  1287  			MinimumFreeBytes: request.MinimumFreeBytes,
  1288  			OverlayFiles:     request.OverlayFiles,
  1289  			RootLabel:        vm.rootLabel(true),
  1290  			RoundupPower:     request.RoundupPower,
  1291  		}
  1292  		err = m.writeRaw(vm.VolumeLocations[0], ".debug", client, fs,
  1293  			writeRawOptions, false)
  1294  		if err != nil {
  1295  			return sendError(conn, err)
  1296  		}
  1297  	} else if request.ImageDataSize > 0 {
  1298  		err := copyData(rootFilename, conn, request.ImageDataSize)
  1299  		if err != nil {
  1300  			return sendError(conn, err)
  1301  		}
  1302  	} else if request.ImageURL != "" {
  1303  		if err := maybeDrainImage(conn, request.ImageDataSize); err != nil {
  1304  			return sendError(conn, err)
  1305  		}
  1306  		httpResponse, err := http.Get(request.ImageURL)
  1307  		if err != nil {
  1308  			return sendError(conn, err)
  1309  		}
  1310  		defer httpResponse.Body.Close()
  1311  		if httpResponse.StatusCode != http.StatusOK {
  1312  			return sendError(conn, errors.New(httpResponse.Status))
  1313  		}
  1314  		if httpResponse.ContentLength < 0 {
  1315  			return sendError(conn,
  1316  				errors.New("ContentLength from: "+request.ImageURL))
  1317  		}
  1318  		err = copyData(rootFilename, httpResponse.Body,
  1319  			uint64(httpResponse.ContentLength))
  1320  		if err != nil {
  1321  			return sendError(conn, err)
  1322  		}
  1323  	} else {
  1324  		return sendError(conn, errors.New("no image specified"))
  1325  	}
  1326  	vm.mutex.Lock()
  1327  	haveLock = true
  1328  	switch vm.State {
  1329  	case proto.StateStopped:
  1330  	case proto.StateRunning:
  1331  		if err := sendUpdate(conn, "stopping VM"); err != nil {
  1332  			return err
  1333  		}
  1334  		stoppedNotifier := make(chan struct{}, 1)
  1335  		vm.stoppedNotifier = stoppedNotifier
  1336  		vm.setState(proto.StateStopping)
  1337  		vm.commandInput <- "system_powerdown"
  1338  		time.AfterFunc(time.Second*15, vm.kill)
  1339  		vm.mutex.Unlock()
  1340  		<-stoppedNotifier
  1341  		vm.mutex.Lock()
  1342  		if vm.State != proto.StateStopped {
  1343  			return sendError(conn,
  1344  				errors.New("VM is not stopped after stop attempt"))
  1345  		}
  1346  	default:
  1347  		return errors.New("VM is not running or stopped")
  1348  	}
  1349  	vm.writeAndSendInfo()
  1350  	vm.setState(proto.StateStarting)
  1351  	vm.mutex.Unlock()
  1352  	haveLock = false
  1353  	sendUpdate(conn, "starting VM")
  1354  	_, err = vm.startManaging(0, false, false)
  1355  	if err != nil {
  1356  		sendError(conn, err)
  1357  	}
  1358  	response := proto.DebugVmImageResponse{
  1359  		Final: true,
  1360  	}
  1361  	if err := conn.Encode(response); err != nil {
  1362  		return err
  1363  	}
  1364  	doCleanup = false
  1365  	return nil
  1366  }
  1367  
  1368  func (m *Manager) deleteVmVolume(ipAddr net.IP, authInfo *srpc.AuthInformation,
  1369  	accessToken []byte, volumeIndex uint) error {
  1370  	if volumeIndex < 1 {
  1371  		return errors.New("cannot delete root volume")
  1372  	}
  1373  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, accessToken)
  1374  	if err != nil {
  1375  		return err
  1376  	}
  1377  	defer vm.mutex.Unlock()
  1378  	if volumeIndex >= uint(len(vm.VolumeLocations)) {
  1379  		return errors.New("volume index too large")
  1380  	}
  1381  	if vm.State != proto.StateStopped {
  1382  		return errors.New("VM is not stopped")
  1383  	}
  1384  	if err := os.Remove(vm.VolumeLocations[volumeIndex].Filename); err != nil {
  1385  		return err
  1386  	}
  1387  	os.Remove(vm.VolumeLocations[volumeIndex].DirectoryToCleanup)
  1388  	volumeLocations := make([]proto.LocalVolume, 0, len(vm.VolumeLocations)-1)
  1389  	volumes := make([]proto.Volume, 0, len(vm.VolumeLocations)-1)
  1390  	for index, volume := range vm.VolumeLocations {
  1391  		if uint(index) != volumeIndex {
  1392  			volumeLocations = append(volumeLocations, volume)
  1393  			volumes = append(volumes, vm.Volumes[index])
  1394  		}
  1395  	}
  1396  	vm.VolumeLocations = volumeLocations
  1397  	vm.Volumes = volumes
  1398  	vm.writeAndSendInfo()
  1399  	return nil
  1400  }
  1401  
  1402  func (m *Manager) destroyVm(ipAddr net.IP, authInfo *srpc.AuthInformation,
  1403  	accessToken []byte) error {
  1404  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, accessToken)
  1405  	if err != nil {
  1406  		return err
  1407  	}
  1408  	defer vm.mutex.Unlock()
  1409  	switch vm.State {
  1410  	case proto.StateStarting:
  1411  		return errors.New("VM is starting")
  1412  	case proto.StateRunning, proto.StateDebugging:
  1413  		if vm.DestroyProtection {
  1414  			return errors.New("cannot destroy running VM when protected")
  1415  		}
  1416  		vm.setState(proto.StateDestroying)
  1417  		vm.commandInput <- "quit"
  1418  	case proto.StateStopping:
  1419  		return errors.New("VM is stopping")
  1420  	case proto.StateStopped, proto.StateFailedToStart, proto.StateMigrating,
  1421  		proto.StateExporting, proto.StateCrashed:
  1422  		vm.delete()
  1423  	case proto.StateDestroying:
  1424  		return errors.New("VM is already destroying")
  1425  	default:
  1426  		return errors.New("unknown state: " + vm.State.String())
  1427  	}
  1428  	return nil
  1429  }
  1430  
  1431  func (m *Manager) discardVmAccessToken(ipAddr net.IP,
  1432  	authInfo *srpc.AuthInformation, accessToken []byte) error {
  1433  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, accessToken)
  1434  	if err != nil {
  1435  		return err
  1436  	}
  1437  	defer vm.mutex.Unlock()
  1438  	for index := range vm.accessToken { // Scrub token.
  1439  		vm.accessToken[index] = 0
  1440  	}
  1441  	vm.accessToken = nil
  1442  	return nil
  1443  }
  1444  
  1445  func (m *Manager) discardVmOldImage(ipAddr net.IP,
  1446  	authInfo *srpc.AuthInformation) error {
  1447  	extension := ".old"
  1448  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
  1449  	if err != nil {
  1450  		return err
  1451  	}
  1452  	defer vm.mutex.Unlock()
  1453  	if err := removeFile(vm.getInitrdPath() + extension); err != nil {
  1454  		return err
  1455  	}
  1456  	if err := removeFile(vm.getKernelPath() + extension); err != nil {
  1457  		return err
  1458  	}
  1459  	return removeFile(vm.VolumeLocations[0].Filename + extension)
  1460  }
  1461  
  1462  func (m *Manager) discardVmOldUserData(ipAddr net.IP,
  1463  	authInfo *srpc.AuthInformation) error {
  1464  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
  1465  	if err != nil {
  1466  		return err
  1467  	}
  1468  	defer vm.mutex.Unlock()
  1469  	return removeFile(filepath.Join(vm.dirname, UserDataFile+".old"))
  1470  }
  1471  
  1472  func (m *Manager) discardVmSnapshot(ipAddr net.IP,
  1473  	authInfo *srpc.AuthInformation) error {
  1474  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
  1475  	if err != nil {
  1476  		return err
  1477  	}
  1478  	defer vm.mutex.Unlock()
  1479  	return vm.discardSnapshot()
  1480  }
  1481  
  1482  func (m *Manager) exportLocalVm(authInfo *srpc.AuthInformation,
  1483  	request proto.ExportLocalVmRequest) (*proto.ExportLocalVmInfo, error) {
  1484  	if !bytes.Equal(m.rootCookie, request.VerificationCookie) {
  1485  		return nil, fmt.Errorf("bad verification cookie: you are not root")
  1486  	}
  1487  	vm, err := m.getVmLockAndAuth(request.IpAddress, true, authInfo, nil)
  1488  	if err != nil {
  1489  		return nil, err
  1490  	}
  1491  	defer vm.mutex.Unlock()
  1492  	if vm.State != proto.StateStopped {
  1493  		return nil, errors.New("VM is not stopped")
  1494  	}
  1495  	bridges, _, err := vm.getBridgesAndOptions(false)
  1496  	if err != nil {
  1497  		return nil, err
  1498  	}
  1499  	vm.setState(proto.StateExporting)
  1500  	vmInfo := proto.ExportLocalVmInfo{
  1501  		Bridges:     bridges,
  1502  		LocalVmInfo: vm.LocalVmInfo,
  1503  	}
  1504  	return &vmInfo, nil
  1505  }
  1506  
  1507  func (m *Manager) getImage(searchName string, imageTimeout time.Duration) (
  1508  	*srpc.Client, *image.Image, string, error) {
  1509  	client, err := srpc.DialHTTP("tcp", m.ImageServerAddress, 0)
  1510  	if err != nil {
  1511  		return nil, nil, "",
  1512  			fmt.Errorf("error connecting to image server: %s: %s",
  1513  				m.ImageServerAddress, err)
  1514  	}
  1515  	doClose := true
  1516  	defer func() {
  1517  		if doClose {
  1518  			client.Close()
  1519  		}
  1520  	}()
  1521  	if isDir, err := imclient.CheckDirectory(client, searchName); err != nil {
  1522  		return nil, nil, "", err
  1523  	} else if isDir {
  1524  		imageName, err := imclient.FindLatestImage(client, searchName, false)
  1525  		if err != nil {
  1526  			return nil, nil, "", err
  1527  		}
  1528  		if imageName == "" {
  1529  			return nil, nil, "",
  1530  				errors.New("no images in directory: " + searchName)
  1531  		}
  1532  		img, err := imclient.GetImage(client, imageName)
  1533  		if err != nil {
  1534  			return nil, nil, "", err
  1535  		}
  1536  		img.FileSystem.RebuildInodePointers()
  1537  		doClose = false
  1538  		return client, img, imageName, nil
  1539  	}
  1540  	img, err := imclient.GetImageWithTimeout(client, searchName, imageTimeout)
  1541  	if err != nil {
  1542  		return nil, nil, "", err
  1543  	}
  1544  	if img == nil {
  1545  		return nil, nil, "", errors.New("timeout getting image")
  1546  	}
  1547  	if err := img.FileSystem.RebuildInodePointers(); err != nil {
  1548  		return nil, nil, "", err
  1549  	}
  1550  	doClose = false
  1551  	return client, img, searchName, nil
  1552  }
  1553  
  1554  func (m *Manager) getNumVMs() (uint, uint) {
  1555  	m.mutex.RLock()
  1556  	defer m.mutex.RUnlock()
  1557  	return m.getNumVMsWithLock()
  1558  }
  1559  
  1560  func (m *Manager) getNumVMsWithLock() (uint, uint) {
  1561  	var numRunning, numStopped uint
  1562  	for _, vm := range m.vms {
  1563  		if vm.State == proto.StateRunning {
  1564  			numRunning++
  1565  		} else {
  1566  			numStopped++
  1567  		}
  1568  	}
  1569  	return numRunning, numStopped
  1570  }
  1571  
  1572  func (m *Manager) getVmAccessToken(ipAddr net.IP,
  1573  	authInfo *srpc.AuthInformation, lifetime time.Duration) ([]byte, error) {
  1574  	if lifetime < time.Minute {
  1575  		return nil, errors.New("lifetime is less than 1 minute")
  1576  	}
  1577  	if lifetime > time.Hour*24 {
  1578  		return nil, errors.New("lifetime is greater than 1 day")
  1579  	}
  1580  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
  1581  	if err != nil {
  1582  		return nil, err
  1583  	}
  1584  	defer vm.mutex.Unlock()
  1585  	if vm.accessToken != nil {
  1586  		return nil, errors.New("someone else has the access token")
  1587  	}
  1588  	vm.accessToken = nil
  1589  	token := make([]byte, 32)
  1590  	if _, err := rand.Read(token); err != nil {
  1591  		return nil, err
  1592  	}
  1593  	vm.accessToken = token
  1594  	cleanupNotifier := make(chan struct{}, 1)
  1595  	vm.accessTokenCleanupNotifier = cleanupNotifier
  1596  	go func() {
  1597  		timer := time.NewTimer(lifetime)
  1598  		select {
  1599  		case <-timer.C:
  1600  		case <-cleanupNotifier:
  1601  		}
  1602  		vm.mutex.Lock()
  1603  		defer vm.mutex.Unlock()
  1604  		for index := 0; index < len(vm.accessToken); index++ {
  1605  			vm.accessToken[index] = 0 // Scrub sensitive data.
  1606  		}
  1607  		vm.accessToken = nil
  1608  	}()
  1609  	return token, nil
  1610  }
  1611  
  1612  func (m *Manager) getVmAndLock(ipAddr net.IP, write bool) (*vmInfoType, error) {
  1613  	ipStr := ipAddr.String()
  1614  	m.mutex.RLock()
  1615  	if vm := m.vms[ipStr]; vm == nil {
  1616  		m.mutex.RUnlock()
  1617  		return nil, fmt.Errorf("no VM with IP address: %s found", ipStr)
  1618  	} else {
  1619  		if write {
  1620  			vm.mutex.Lock()
  1621  		} else {
  1622  			vm.mutex.RLock()
  1623  		}
  1624  		m.mutex.RUnlock()
  1625  		return vm, nil
  1626  	}
  1627  }
  1628  
  1629  func (m *Manager) getVmLockAndAuth(ipAddr net.IP, write bool,
  1630  	authInfo *srpc.AuthInformation, accessToken []byte) (*vmInfoType, error) {
  1631  	vm, err := m.getVmAndLock(ipAddr, write)
  1632  	if err != nil {
  1633  		return nil, err
  1634  	}
  1635  	if err := vm.checkAuth(authInfo, accessToken); err != nil {
  1636  		if write {
  1637  			vm.mutex.Unlock()
  1638  		} else {
  1639  			vm.mutex.RUnlock()
  1640  		}
  1641  		return nil, err
  1642  	}
  1643  	if write {
  1644  		if vm.blockMutations {
  1645  			vm.mutex.Unlock()
  1646  			return nil, errors.New("mutations blocked")
  1647  		}
  1648  	}
  1649  	return vm, nil
  1650  }
  1651  
  1652  func (m *Manager) getVmBootLog(ipAddr net.IP) (io.ReadCloser, error) {
  1653  	vm, err := m.getVmAndLock(ipAddr, false)
  1654  	if err != nil {
  1655  		return nil, err
  1656  	}
  1657  	filename := filepath.Join(vm.dirname, "bootlog")
  1658  	vm.mutex.RUnlock()
  1659  	return os.Open(filename)
  1660  }
  1661  
  1662  func (m *Manager) getVmFileReader(ipAddr net.IP, authInfo *srpc.AuthInformation,
  1663  	accessToken []byte, filename string) (io.ReadCloser, uint64, error) {
  1664  	filename = filepath.Clean(filename)
  1665  	vm, err := m.getVmLockAndAuth(ipAddr, false, authInfo, accessToken)
  1666  	if err != nil {
  1667  		return nil, 0, err
  1668  	}
  1669  	pathname := filepath.Join(vm.dirname, filename)
  1670  	vm.mutex.RUnlock()
  1671  	if file, err := os.Open(pathname); err != nil {
  1672  		return nil, 0, err
  1673  	} else if fi, err := file.Stat(); err != nil {
  1674  		return nil, 0, err
  1675  	} else {
  1676  		return file, uint64(fi.Size()), nil
  1677  	}
  1678  }
  1679  
  1680  func (m *Manager) getVmInfo(ipAddr net.IP) (proto.VmInfo, error) {
  1681  	vm, err := m.getVmAndLock(ipAddr, false)
  1682  	if err != nil {
  1683  		return proto.VmInfo{}, err
  1684  	}
  1685  	defer vm.mutex.RUnlock()
  1686  	return vm.VmInfo, nil
  1687  }
  1688  
  1689  func (m *Manager) getVmLastPatchLog(ipAddr net.IP) (
  1690  	io.ReadCloser, uint64, time.Time, error) {
  1691  	vm, err := m.getVmAndLock(ipAddr, false)
  1692  	if err != nil {
  1693  		return nil, 0, time.Time{}, err
  1694  	}
  1695  	defer vm.mutex.RUnlock()
  1696  	file, err := openBufferedFile(filepath.Join(
  1697  		vm.VolumeLocations[0].DirectoryToCleanup, lastPatchLogFilename))
  1698  	if err != nil {
  1699  		return nil, 0, time.Time{}, err
  1700  	}
  1701  	fi, err := file.Stat()
  1702  	if err != nil {
  1703  		file.Close()
  1704  		return nil, 0, time.Time{}, err
  1705  	}
  1706  	return file, uint64(fi.Size()), fi.ModTime(), nil
  1707  }
  1708  
  1709  func (m *Manager) getVmLockWatcher(ipAddr net.IP) (
  1710  	*lockwatcher.LockWatcher, error) {
  1711  	vm, err := m.getVmAndLock(ipAddr, false)
  1712  	if err != nil {
  1713  		return nil, err
  1714  	}
  1715  	defer vm.mutex.RUnlock()
  1716  	return vm.lockWatcher, nil
  1717  }
  1718  
  1719  func (m *Manager) getVmVolume(conn *srpc.Conn) error {
  1720  	var request proto.GetVmVolumeRequest
  1721  	if err := conn.Decode(&request); err != nil {
  1722  		return err
  1723  	}
  1724  	vm, err := m.getVmLockAndAuth(request.IpAddress, true,
  1725  		conn.GetAuthInformation(), request.AccessToken)
  1726  	if err != nil {
  1727  		return conn.Encode(proto.GetVmVolumeResponse{Error: err.Error()})
  1728  	}
  1729  	vm.blockMutations = true
  1730  	vm.mutex.Unlock()
  1731  	defer func() {
  1732  		vm.mutex.Lock()
  1733  		vm.allowMutationsAndUnlock()
  1734  	}()
  1735  	var initrd, kernel []byte
  1736  	if request.VolumeIndex == 0 {
  1737  		if initrdPath := vm.getActiveInitrdPath(); initrdPath != "" {
  1738  			if !request.GetExtraFiles && !request.IgnoreExtraFiles {
  1739  				return conn.Encode(proto.GetVmVolumeResponse{
  1740  					Error: "cannot get root volume with separate initrd"})
  1741  			}
  1742  			if request.GetExtraFiles {
  1743  				initrd, err = ioutil.ReadFile(initrdPath)
  1744  				if err != nil {
  1745  					return conn.Encode(
  1746  						proto.GetVmVolumeResponse{Error: err.Error()})
  1747  				}
  1748  			}
  1749  		}
  1750  		if kernelPath := vm.getActiveKernelPath(); kernelPath != "" {
  1751  			if !request.GetExtraFiles && !request.IgnoreExtraFiles {
  1752  				return conn.Encode(proto.GetVmVolumeResponse{
  1753  					Error: "cannot get root volume with separate kernel"})
  1754  			}
  1755  			if request.GetExtraFiles {
  1756  				kernel, err = ioutil.ReadFile(kernelPath)
  1757  				if err != nil {
  1758  					return conn.Encode(
  1759  						proto.GetVmVolumeResponse{Error: err.Error()})
  1760  				}
  1761  			}
  1762  		}
  1763  	}
  1764  	response := proto.GetVmVolumeResponse{}
  1765  	if len(initrd) > 0 || len(kernel) > 0 {
  1766  		response.ExtraFiles = make(map[string][]byte)
  1767  		response.ExtraFiles["initrd"] = initrd
  1768  		response.ExtraFiles["kernel"] = kernel
  1769  	}
  1770  	if request.VolumeIndex >= uint(len(vm.VolumeLocations)) {
  1771  		return conn.Encode(proto.GetVmVolumeResponse{
  1772  			Error: "index too large"})
  1773  	}
  1774  	file, err := os.Open(vm.VolumeLocations[request.VolumeIndex].Filename)
  1775  	if err != nil {
  1776  		return conn.Encode(proto.GetVmVolumeResponse{Error: err.Error()})
  1777  	}
  1778  	defer file.Close()
  1779  	if err := conn.Encode(response); err != nil {
  1780  		return err
  1781  	}
  1782  	if err := conn.Flush(); err != nil {
  1783  		return err
  1784  	}
  1785  	return rsync.ServeBlocks(conn, conn, conn, file,
  1786  		vm.Volumes[request.VolumeIndex].Size)
  1787  }
  1788  
  1789  func (m *Manager) holdVmLock(ipAddr net.IP, timeout time.Duration,
  1790  	writeLock bool, authInfo *srpc.AuthInformation) error {
  1791  	if timeout > time.Minute {
  1792  		return fmt.Errorf("timeout: %s exceeds one minute", timeout)
  1793  	}
  1794  	if authInfo == nil {
  1795  		return fmt.Errorf("no authentication information")
  1796  	}
  1797  	vm, err := m.getVmAndLock(ipAddr, writeLock)
  1798  	if err != nil {
  1799  		return err
  1800  	}
  1801  	if writeLock {
  1802  		vm.logger.Printf("HoldVmLock(%s) by %s for writing\n",
  1803  			format.Duration(timeout), authInfo.Username)
  1804  		time.Sleep(timeout)
  1805  		vm.mutex.Unlock()
  1806  	} else {
  1807  		vm.logger.Printf("HoldVmLock(%s) by %s for reading\n",
  1808  			format.Duration(timeout), authInfo.Username)
  1809  		time.Sleep(timeout)
  1810  		vm.mutex.RUnlock()
  1811  	}
  1812  	return nil
  1813  }
  1814  
  1815  func (m *Manager) importLocalVm(authInfo *srpc.AuthInformation,
  1816  	request proto.ImportLocalVmRequest) error {
  1817  	requestedIpAddrs := make(map[string]struct{},
  1818  		1+len(request.SecondaryAddresses))
  1819  	requestedMacAddrs := make(map[string]struct{},
  1820  		1+len(request.SecondaryAddresses))
  1821  	requestedIpAddrs[request.Address.IpAddress.String()] = struct{}{}
  1822  	requestedMacAddrs[request.Address.MacAddress] = struct{}{}
  1823  	for _, addr := range request.SecondaryAddresses {
  1824  		ipAddr := addr.IpAddress.String()
  1825  		if _, ok := requestedIpAddrs[ipAddr]; ok {
  1826  			return fmt.Errorf("duplicate address: %s", ipAddr)
  1827  		}
  1828  		requestedIpAddrs[ipAddr] = struct{}{}
  1829  		if _, ok := requestedMacAddrs[addr.MacAddress]; ok {
  1830  			return fmt.Errorf("duplicate address: %s", addr.MacAddress)
  1831  		}
  1832  		requestedIpAddrs[addr.MacAddress] = struct{}{}
  1833  	}
  1834  	if !bytes.Equal(m.rootCookie, request.VerificationCookie) {
  1835  		return fmt.Errorf("bad verification cookie: you are not root")
  1836  	}
  1837  	request.VmInfo.OwnerUsers = []string{authInfo.Username}
  1838  	request.VmInfo.Uncommitted = true
  1839  	volumeDirectories := stringutil.ConvertListToMap(m.volumeDirectories, false)
  1840  	volumes := make([]proto.Volume, 0, len(request.VolumeFilenames))
  1841  	for index, filename := range request.VolumeFilenames {
  1842  		dirname := filepath.Dir(filepath.Dir(filepath.Dir(filename)))
  1843  		if _, ok := volumeDirectories[dirname]; !ok {
  1844  			return fmt.Errorf("%s not in a volume directory", filename)
  1845  		}
  1846  		if fi, err := os.Lstat(filename); err != nil {
  1847  			return err
  1848  		} else if fi.Mode()&os.ModeType != 0 {
  1849  			return fmt.Errorf("%s is not a regular file", filename)
  1850  		} else {
  1851  			var volumeFormat proto.VolumeFormat
  1852  			if index < len(request.VmInfo.Volumes) {
  1853  				volumeFormat = request.VmInfo.Volumes[index].Format
  1854  			}
  1855  			volumes = append(volumes, proto.Volume{
  1856  				Size:   uint64(fi.Size()),
  1857  				Format: volumeFormat,
  1858  			})
  1859  		}
  1860  	}
  1861  	request.Volumes = volumes
  1862  	if !request.SkipMemoryCheck {
  1863  		err := <-tryAllocateMemory(getVmInfoMemoryInMiB(request.VmInfo))
  1864  		if err != nil {
  1865  			return err
  1866  		}
  1867  	}
  1868  	ipAddress := request.Address.IpAddress.String()
  1869  	vm := &vmInfoType{
  1870  		LocalVmInfo: proto.LocalVmInfo{
  1871  			VmInfo: request.VmInfo,
  1872  		},
  1873  		manager:          m,
  1874  		dirname:          filepath.Join(m.StateDir, "VMs", ipAddress),
  1875  		ipAddress:        ipAddress,
  1876  		ownerUsers:       map[string]struct{}{authInfo.Username: {}},
  1877  		logger:           prefixlogger.New(ipAddress+": ", m.Logger),
  1878  		metadataChannels: make(map[chan<- string]struct{}),
  1879  	}
  1880  	vm.VmInfo.State = proto.StateStarting
  1881  	m.mutex.Lock()
  1882  	defer m.mutex.Unlock()
  1883  	if _, ok := m.vms[ipAddress]; ok {
  1884  		return fmt.Errorf("%s already exists", ipAddress)
  1885  	}
  1886  	for _, poolAddress := range m.addressPool.Registered {
  1887  		ipAddr := poolAddress.IpAddress.String()
  1888  		if _, ok := requestedIpAddrs[ipAddr]; ok {
  1889  			return fmt.Errorf("%s is in address pool", ipAddr)
  1890  		}
  1891  		if _, ok := requestedMacAddrs[poolAddress.MacAddress]; ok {
  1892  			return fmt.Errorf("%s is in address pool", poolAddress.MacAddress)
  1893  		}
  1894  	}
  1895  	subnetId := m.getMatchingSubnet(request.Address.IpAddress)
  1896  	if subnetId == "" {
  1897  		return fmt.Errorf("no matching subnet for: %s\n", ipAddress)
  1898  	}
  1899  	vm.VmInfo.SubnetId = subnetId
  1900  	vm.VmInfo.SecondarySubnetIDs = nil
  1901  	for _, addr := range request.SecondaryAddresses {
  1902  		subnetId := m.getMatchingSubnet(addr.IpAddress)
  1903  		if subnetId == "" {
  1904  			return fmt.Errorf("no matching subnet for: %s\n", addr.IpAddress)
  1905  		}
  1906  		vm.VmInfo.SecondarySubnetIDs = append(vm.VmInfo.SecondarySubnetIDs,
  1907  			subnetId)
  1908  	}
  1909  	defer func() {
  1910  		if vm == nil {
  1911  			return
  1912  		}
  1913  		delete(m.vms, vm.ipAddress)
  1914  		m.sendVmInfo(vm.ipAddress, nil)
  1915  		os.RemoveAll(vm.dirname)
  1916  		for _, volume := range vm.VolumeLocations {
  1917  			os.RemoveAll(volume.DirectoryToCleanup)
  1918  		}
  1919  	}()
  1920  	if err := os.MkdirAll(vm.dirname, fsutil.DirPerms); err != nil {
  1921  		return err
  1922  	}
  1923  	for index, sourceFilename := range request.VolumeFilenames {
  1924  		dirname := filepath.Join(filepath.Dir(filepath.Dir(
  1925  			filepath.Dir(sourceFilename))),
  1926  			ipAddress)
  1927  		if err := os.MkdirAll(dirname, fsutil.DirPerms); err != nil {
  1928  			return err
  1929  		}
  1930  		destFilename := filepath.Join(dirname, indexToName(index))
  1931  		if err := os.Link(sourceFilename, destFilename); err != nil {
  1932  			return err
  1933  		}
  1934  		vm.VolumeLocations = append(vm.VolumeLocations, proto.LocalVolume{
  1935  			dirname, destFilename})
  1936  	}
  1937  	m.vms[ipAddress] = vm
  1938  	if _, err := vm.startManaging(0, false, true); err != nil {
  1939  		return err
  1940  	}
  1941  	vm.setupLockWatcher()
  1942  	vm = nil // Cancel cleanup.
  1943  	return nil
  1944  }
  1945  
  1946  func (m *Manager) listVMs(request proto.ListVMsRequest) []string {
  1947  	ownerGroups := stringutil.ConvertListToMap(request.OwnerGroups, false)
  1948  	vmTagMatcher := tagmatcher.New(request.VmTagsToMatch, false)
  1949  	m.mutex.RLock()
  1950  	ipAddrs := make([]string, 0, len(m.vms))
  1951  	for ipAddr, vm := range m.vms {
  1952  		if request.IgnoreStateMask&(1<<vm.State) != 0 {
  1953  			continue
  1954  		}
  1955  		if !vmTagMatcher.MatchEach(vm.Tags) {
  1956  			continue
  1957  		}
  1958  		include := true
  1959  		if len(ownerGroups) > 0 {
  1960  			include = false
  1961  			for _, ownerGroup := range vm.OwnerGroups {
  1962  				if _, ok := ownerGroups[ownerGroup]; ok {
  1963  					include = true
  1964  					break
  1965  				}
  1966  			}
  1967  		}
  1968  		if len(request.OwnerUsers) > 0 {
  1969  			include = false
  1970  			for _, ownerUser := range request.OwnerUsers {
  1971  				if _, ok := vm.ownerUsers[ownerUser]; ok {
  1972  					include = true
  1973  					break
  1974  				}
  1975  			}
  1976  		}
  1977  		if include {
  1978  			ipAddrs = append(ipAddrs, ipAddr)
  1979  		}
  1980  	}
  1981  	m.mutex.RUnlock()
  1982  	if request.Sort {
  1983  		verstr.Sort(ipAddrs)
  1984  	}
  1985  	return ipAddrs
  1986  }
  1987  
  1988  func (m *Manager) migrateVm(conn *srpc.Conn) error {
  1989  	var request proto.MigrateVmRequest
  1990  	if err := conn.Decode(&request); err != nil {
  1991  		return err
  1992  	}
  1993  	hypervisor, err := srpc.DialHTTP("tcp", request.SourceHypervisor, 0)
  1994  	if err != nil {
  1995  		return err
  1996  	}
  1997  	defer hypervisor.Close()
  1998  	defer func() {
  1999  		req := proto.DiscardVmAccessTokenRequest{
  2000  			AccessToken: request.AccessToken,
  2001  			IpAddress:   request.IpAddress}
  2002  		var reply proto.DiscardVmAccessTokenResponse
  2003  		hypervisor.RequestReply("Hypervisor.DiscardVmAccessToken",
  2004  			req, &reply)
  2005  	}()
  2006  	ipAddress := request.IpAddress.String()
  2007  	m.mutex.RLock()
  2008  	_, ok := m.vms[ipAddress]
  2009  	subnetId := m.getMatchingSubnet(request.IpAddress)
  2010  	m.mutex.RUnlock()
  2011  	if ok {
  2012  		return errors.New("cannot migrate to the same hypervisor")
  2013  	}
  2014  	if subnetId == "" {
  2015  		return fmt.Errorf("no matching subnet for: %s\n", request.IpAddress)
  2016  	}
  2017  	getInfoRequest := proto.GetVmInfoRequest{request.IpAddress}
  2018  	var getInfoReply proto.GetVmInfoResponse
  2019  	err = hypervisor.RequestReply("Hypervisor.GetVmInfo", getInfoRequest,
  2020  		&getInfoReply)
  2021  	if err != nil {
  2022  		return err
  2023  	}
  2024  	accessToken := request.AccessToken
  2025  	vmInfo := getInfoReply.VmInfo
  2026  	if subnetId != vmInfo.SubnetId {
  2027  		return fmt.Errorf("subnet ID changing from: %s to: %s",
  2028  			vmInfo.SubnetId, subnetId)
  2029  	}
  2030  	if !request.IpAddress.Equal(vmInfo.Address.IpAddress) {
  2031  		return fmt.Errorf("inconsistent IP address: %s",
  2032  			vmInfo.Address.IpAddress)
  2033  	}
  2034  	if err := m.migrateVmChecks(vmInfo, request.SkipMemoryCheck); err != nil {
  2035  		return err
  2036  	}
  2037  	volumeDirectories, err := m.getVolumeDirectories(vmInfo.Volumes[0].Size,
  2038  		vmInfo.Volumes[0].Type, vmInfo.Volumes[1:], vmInfo.SpreadVolumes)
  2039  	if err != nil {
  2040  		return err
  2041  	}
  2042  	vm := &vmInfoType{
  2043  		LocalVmInfo: proto.LocalVmInfo{
  2044  			VmInfo: vmInfo,
  2045  			VolumeLocations: make([]proto.LocalVolume, 0,
  2046  				len(volumeDirectories)),
  2047  		},
  2048  		manager:          m,
  2049  		dirname:          filepath.Join(m.StateDir, "VMs", ipAddress),
  2050  		doNotWriteOrSend: true,
  2051  		ipAddress:        ipAddress,
  2052  		logger:           prefixlogger.New(ipAddress+": ", m.Logger),
  2053  		metadataChannels: make(map[chan<- string]struct{}),
  2054  	}
  2055  	vm.Uncommitted = true
  2056  	defer func() { // Evaluate vm at return time, not defer time.
  2057  		vm.cleanup()
  2058  		hyperclient.PrepareVmForMigration(hypervisor, request.IpAddress,
  2059  			accessToken, false)
  2060  		if vmInfo.State == proto.StateRunning {
  2061  			hyperclient.StartVm(hypervisor, request.IpAddress, accessToken)
  2062  		}
  2063  	}()
  2064  	vm.ownerUsers = stringutil.ConvertListToMap(vm.OwnerUsers, false)
  2065  	if err := os.MkdirAll(vm.dirname, fsutil.DirPerms); err != nil {
  2066  		return err
  2067  	}
  2068  	for index, _dirname := range volumeDirectories {
  2069  		dirname := filepath.Join(_dirname, ipAddress)
  2070  		if err := os.MkdirAll(dirname, fsutil.DirPerms); err != nil {
  2071  			return err
  2072  		}
  2073  		vm.VolumeLocations = append(vm.VolumeLocations, proto.LocalVolume{
  2074  			DirectoryToCleanup: dirname,
  2075  			Filename:           filepath.Join(dirname, indexToName(index)),
  2076  		})
  2077  	}
  2078  	if vmInfo.State == proto.StateStopped {
  2079  		err := hyperclient.PrepareVmForMigration(hypervisor, request.IpAddress,
  2080  			request.AccessToken, true)
  2081  		if err != nil {
  2082  			return err
  2083  		}
  2084  	}
  2085  	// Begin copying over the volumes.
  2086  	err = sendVmMigrationMessage(conn, "initial volume(s) copy")
  2087  	if err != nil {
  2088  		return err
  2089  	}
  2090  	err = vm.migrateVmVolumes(hypervisor, vm.Address.IpAddress, accessToken,
  2091  		true)
  2092  	if err != nil {
  2093  		return err
  2094  	}
  2095  	if vmInfo.State != proto.StateStopped {
  2096  		err = sendVmMigrationMessage(conn, "stopping VM")
  2097  		if err != nil {
  2098  			return err
  2099  		}
  2100  		err := hyperclient.StopVm(hypervisor, request.IpAddress,
  2101  			request.AccessToken)
  2102  		if err != nil {
  2103  			return err
  2104  		}
  2105  		err = hyperclient.PrepareVmForMigration(hypervisor, request.IpAddress,
  2106  			request.AccessToken, true)
  2107  		if err != nil {
  2108  			return err
  2109  		}
  2110  		err = sendVmMigrationMessage(conn, "update volume(s)")
  2111  		if err != nil {
  2112  			return err
  2113  		}
  2114  		err = vm.migrateVmVolumes(hypervisor, vm.Address.IpAddress, accessToken,
  2115  			false)
  2116  		if err != nil {
  2117  			return err
  2118  		}
  2119  	}
  2120  	err = migratevmUserData(hypervisor,
  2121  		filepath.Join(vm.dirname, UserDataFile),
  2122  		request.IpAddress, accessToken)
  2123  	if err != nil {
  2124  		return err
  2125  	}
  2126  	if err := sendVmMigrationMessage(conn, "starting VM"); err != nil {
  2127  		return err
  2128  	}
  2129  	vm.State = proto.StateStarting
  2130  	m.mutex.Lock()
  2131  	m.vms[ipAddress] = vm
  2132  	m.mutex.Unlock()
  2133  	dhcpTimedOut, err := vm.startManaging(request.DhcpTimeout, false, false)
  2134  	if err != nil {
  2135  		return err
  2136  	}
  2137  	if dhcpTimedOut {
  2138  		return fmt.Errorf("DHCP timed out")
  2139  	}
  2140  	err = conn.Encode(proto.MigrateVmResponse{RequestCommit: true})
  2141  	if err != nil {
  2142  		return err
  2143  	}
  2144  	if err := conn.Flush(); err != nil {
  2145  		return err
  2146  	}
  2147  	var reply proto.MigrateVmResponseResponse
  2148  	if err := conn.Decode(&reply); err != nil {
  2149  		return err
  2150  	}
  2151  	if !reply.Commit {
  2152  		return fmt.Errorf("VM migration abandoned")
  2153  	}
  2154  	if err := m.registerAddress(vm.Address); err != nil {
  2155  		return err
  2156  	}
  2157  	for _, address := range vm.SecondaryAddresses {
  2158  		if err := m.registerAddress(address); err != nil {
  2159  			return err
  2160  		}
  2161  	}
  2162  	vm.doNotWriteOrSend = false
  2163  	vm.Uncommitted = false
  2164  	vm.writeAndSendInfo()
  2165  	err = hyperclient.DestroyVm(hypervisor, request.IpAddress, accessToken)
  2166  	if err != nil {
  2167  		m.Logger.Printf("error cleaning up old migrated VM: %s\n", ipAddress)
  2168  	}
  2169  	vm.setupLockWatcher()
  2170  	vm = nil // Cancel cleanup.
  2171  	return nil
  2172  }
  2173  
  2174  func sendVmCopyMessage(conn *srpc.Conn, message string) error {
  2175  	request := proto.CopyVmResponse{ProgressMessage: message}
  2176  	if err := conn.Encode(request); err != nil {
  2177  		return err
  2178  	}
  2179  	return conn.Flush()
  2180  }
  2181  
  2182  func sendVmMigrationMessage(conn *srpc.Conn, message string) error {
  2183  	request := proto.MigrateVmResponse{ProgressMessage: message}
  2184  	if err := conn.Encode(request); err != nil {
  2185  		return err
  2186  	}
  2187  	return conn.Flush()
  2188  }
  2189  
  2190  func sendVmPatchImageMessage(conn *srpc.Conn, message string) error {
  2191  	request := proto.PatchVmImageResponse{ProgressMessage: message}
  2192  	if err := conn.Encode(request); err != nil {
  2193  		return err
  2194  	}
  2195  	return conn.Flush()
  2196  }
  2197  
  2198  func (m *Manager) migrateVmChecks(vmInfo proto.VmInfo,
  2199  	skipMemoryCheck bool) error {
  2200  	switch vmInfo.State {
  2201  	case proto.StateStopped:
  2202  	case proto.StateRunning:
  2203  	default:
  2204  		return fmt.Errorf("VM state: %s is not stopped/running", vmInfo.State)
  2205  	}
  2206  	m.mutex.RLock()
  2207  	defer m.mutex.RUnlock()
  2208  	for index, address := range vmInfo.SecondaryAddresses {
  2209  		subnetId := m.getMatchingSubnet(address.IpAddress)
  2210  		if subnetId == "" {
  2211  			return fmt.Errorf("no matching subnet for: %s\n", address.IpAddress)
  2212  		}
  2213  		if subnetId != vmInfo.SecondarySubnetIDs[index] {
  2214  			return fmt.Errorf("subnet ID changing from: %s to: %s",
  2215  				vmInfo.SecondarySubnetIDs[index], subnetId)
  2216  		}
  2217  	}
  2218  	if err := m.checkSufficientCPUWithLock(vmInfo.MilliCPUs); err != nil {
  2219  		return err
  2220  	}
  2221  	err := m.checkSufficientMemoryWithLock(vmInfo.MemoryInMiB, nil)
  2222  	if err != nil {
  2223  		return err
  2224  	}
  2225  	if !skipMemoryCheck {
  2226  		err := <-tryAllocateMemory(getVmInfoMemoryInMiB(vmInfo))
  2227  		if err != nil {
  2228  			return err
  2229  		}
  2230  	}
  2231  	return nil
  2232  }
  2233  
  2234  func migratevmUserData(hypervisor *srpc.Client, filename string,
  2235  	ipAddr net.IP, accessToken []byte) error {
  2236  	conn, err := hypervisor.Call("Hypervisor.GetVmUserData")
  2237  	if err != nil {
  2238  		return err
  2239  	}
  2240  	defer conn.Close()
  2241  	request := proto.GetVmUserDataRequest{
  2242  		AccessToken: accessToken,
  2243  		IpAddress:   ipAddr,
  2244  	}
  2245  	if err := conn.Encode(request); err != nil {
  2246  		return fmt.Errorf("error encoding request: %s", err)
  2247  	}
  2248  	if err := conn.Flush(); err != nil {
  2249  		return err
  2250  	}
  2251  	var reply proto.GetVmUserDataResponse
  2252  	if err := conn.Decode(&reply); err != nil {
  2253  		return err
  2254  	}
  2255  	if err := errors.New(reply.Error); err != nil {
  2256  		return err
  2257  	}
  2258  	if reply.Length < 1 {
  2259  		return nil
  2260  	}
  2261  	writer, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_EXCL,
  2262  		fsutil.PrivateFilePerms)
  2263  	if err != nil {
  2264  		io.CopyN(ioutil.Discard, conn, int64(reply.Length))
  2265  		return err
  2266  	}
  2267  	defer writer.Close()
  2268  	if _, err := io.CopyN(writer, conn, int64(reply.Length)); err != nil {
  2269  		return err
  2270  	}
  2271  	return nil
  2272  }
  2273  
  2274  func (vm *vmInfoType) makeExtraLogger(filename string) (
  2275  	*filelogger.Logger, error) {
  2276  	debugLevel := int16(-1)
  2277  	if levelGetter, ok := vm.logger.(log.DebugLogLevelGetter); ok {
  2278  		debugLevel = levelGetter.GetLevel()
  2279  	}
  2280  	return filelogger.New(filepath.Join(
  2281  		vm.VolumeLocations[0].DirectoryToCleanup, filename),
  2282  		filelogger.Options{
  2283  			Flags:      serverlogger.GetStandardFlags(),
  2284  			DebugLevel: debugLevel,
  2285  		})
  2286  }
  2287  
  2288  func (vm *vmInfoType) migrateVmVolumes(hypervisor *srpc.Client,
  2289  	sourceIpAddr net.IP, accessToken []byte, getExtraFiles bool) error {
  2290  	for index, volume := range vm.VolumeLocations {
  2291  		_, err := migrateVmVolume(hypervisor, volume.DirectoryToCleanup,
  2292  			volume.Filename, uint(index), vm.Volumes[index].Size, sourceIpAddr,
  2293  			accessToken, getExtraFiles)
  2294  		if err != nil {
  2295  			return err
  2296  		}
  2297  	}
  2298  	return nil
  2299  }
  2300  
  2301  func migrateVmVolume(hypervisor *srpc.Client, directory, filename string,
  2302  	volumeIndex uint, size uint64, ipAddr net.IP, accessToken []byte,
  2303  	getExtraFiles bool) (
  2304  	*rsync.Stats, error) {
  2305  	var initialFileSize uint64
  2306  	reader, err := os.OpenFile(filename, os.O_RDONLY, 0)
  2307  	if err != nil {
  2308  		if !os.IsNotExist(err) {
  2309  			return nil, err
  2310  		}
  2311  	} else {
  2312  		defer reader.Close()
  2313  		if fi, err := reader.Stat(); err != nil {
  2314  			return nil, err
  2315  		} else {
  2316  			initialFileSize = uint64(fi.Size())
  2317  			if initialFileSize > size {
  2318  				return nil, errors.New("file larger than volume")
  2319  			}
  2320  		}
  2321  	}
  2322  	writer, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE,
  2323  		fsutil.PrivateFilePerms)
  2324  	if err != nil {
  2325  		return nil, err
  2326  	}
  2327  	defer writer.Close()
  2328  	request := proto.GetVmVolumeRequest{
  2329  		AccessToken:      accessToken,
  2330  		GetExtraFiles:    getExtraFiles,
  2331  		IgnoreExtraFiles: !getExtraFiles,
  2332  		IpAddress:        ipAddr,
  2333  		VolumeIndex:      volumeIndex,
  2334  	}
  2335  	conn, err := hypervisor.Call("Hypervisor.GetVmVolume")
  2336  	if err != nil {
  2337  		if reader == nil {
  2338  			os.Remove(filename)
  2339  		}
  2340  		return nil, err
  2341  	}
  2342  	defer conn.Close()
  2343  	if err := conn.Encode(request); err != nil {
  2344  		return nil, fmt.Errorf("error encoding request: %s", err)
  2345  	}
  2346  	if err := conn.Flush(); err != nil {
  2347  		return nil, err
  2348  	}
  2349  	var response proto.GetVmVolumeResponse
  2350  	if err := conn.Decode(&response); err != nil {
  2351  		return nil, err
  2352  	}
  2353  	if err := errors.New(response.Error); err != nil {
  2354  		return nil, err
  2355  	}
  2356  	stats, err := rsync.GetBlocks(conn, conn, conn, reader, writer, size,
  2357  		initialFileSize)
  2358  	if err != nil {
  2359  		return nil, err
  2360  	}
  2361  	if !getExtraFiles {
  2362  		return &stats, nil
  2363  	}
  2364  	for name, data := range response.ExtraFiles {
  2365  		if name != "initrd" && name != "kernel" {
  2366  			return nil, fmt.Errorf("received unsupported extra file: %s", name)
  2367  		}
  2368  		err := ioutil.WriteFile(filepath.Join(directory, name), data,
  2369  			fsutil.PrivateFilePerms)
  2370  		if err != nil {
  2371  			return nil, err
  2372  		}
  2373  	}
  2374  	return &stats, nil
  2375  }
  2376  
  2377  func (m *Manager) notifyVmMetadataRequest(ipAddr net.IP, path string) {
  2378  	addr := ipAddr.String()
  2379  	m.mutex.RLock()
  2380  	vm, ok := m.vms[addr]
  2381  	m.mutex.RUnlock()
  2382  	if !ok {
  2383  		return
  2384  	}
  2385  	vm.mutex.Lock()
  2386  	defer vm.mutex.Unlock()
  2387  	for ch := range vm.metadataChannels {
  2388  		select {
  2389  		case ch <- path:
  2390  		default:
  2391  		}
  2392  	}
  2393  }
  2394  
  2395  func (m *Manager) patchVmImage(conn *srpc.Conn,
  2396  	request proto.PatchVmImageRequest) error {
  2397  	client, img, imageName, err := m.getImage(request.ImageName,
  2398  		request.ImageTimeout)
  2399  	if err != nil {
  2400  		return err
  2401  	}
  2402  	if img.Filter == nil {
  2403  		return fmt.Errorf("%s contains no filter", imageName)
  2404  	}
  2405  	img.FileSystem.InodeToFilenamesTable()
  2406  	img.FileSystem.FilenameToInodeTable()
  2407  	hashToInodesTable := img.FileSystem.HashToInodesTable()
  2408  	img.FileSystem.BuildEntryMap()
  2409  	var objectsGetter objectserver.ObjectsGetter
  2410  	vm, err := m.getVmLockAndAuth(request.IpAddress, true,
  2411  		conn.GetAuthInformation(), nil)
  2412  	if err != nil {
  2413  		return err
  2414  	}
  2415  	vm.blockMutations = true
  2416  	haveLock := true
  2417  	defer func() {
  2418  		if !haveLock {
  2419  			vm.mutex.Lock()
  2420  		}
  2421  		vm.allowMutationsAndUnlock()
  2422  	}()
  2423  	restart := vm.State == proto.StateRunning
  2424  	switch vm.State {
  2425  	case proto.StateStopped:
  2426  	case proto.StateRunning:
  2427  		if len(vm.Address.IpAddress) < 1 {
  2428  			return errors.New("cannot stop VM with externally managed lease")
  2429  		}
  2430  	default:
  2431  		return errors.New("VM is not running or stopped")
  2432  	}
  2433  	vm.mutex.Unlock()
  2434  	haveLock = false
  2435  	if m.objectCache == nil {
  2436  		objectClient := objclient.AttachObjectClient(client)
  2437  		defer objectClient.Close()
  2438  		objectsGetter = objectClient
  2439  	} else if restart {
  2440  		hashes := make([]hash.Hash, 0, len(hashToInodesTable))
  2441  		for hashVal := range hashToInodesTable {
  2442  			hashes = append(hashes, hashVal)
  2443  		}
  2444  		if err := sendVmPatchImageMessage(conn, "prefetching"); err != nil {
  2445  			return err
  2446  		}
  2447  		if err := m.objectCache.FetchObjects(hashes); err != nil {
  2448  			return err
  2449  		}
  2450  		objectsGetter = m.objectCache
  2451  	} else {
  2452  		objectsGetter = m.objectCache
  2453  	}
  2454  	bootInfo, err := util.GetBootInfo(img.FileSystem, vm.rootLabel(false),
  2455  		"net.ifnames=0")
  2456  	if err != nil {
  2457  		return err
  2458  	}
  2459  	vm.mutex.Lock()
  2460  	haveLock = true
  2461  	switch vm.State {
  2462  	case proto.StateStopped:
  2463  	case proto.StateRunning:
  2464  		if err := sendVmPatchImageMessage(conn, "stopping VM"); err != nil {
  2465  			return err
  2466  		}
  2467  		stoppedNotifier := make(chan struct{}, 1)
  2468  		vm.stoppedNotifier = stoppedNotifier
  2469  		vm.setState(proto.StateStopping)
  2470  		vm.commandInput <- "system_powerdown"
  2471  		time.AfterFunc(time.Second*15, vm.kill)
  2472  		vm.mutex.Unlock()
  2473  		<-stoppedNotifier
  2474  		vm.mutex.Lock()
  2475  		if vm.State != proto.StateStopped {
  2476  			return errors.New("VM is not stopped after stop attempt")
  2477  		}
  2478  	default:
  2479  		return errors.New("VM is not running or stopped")
  2480  	}
  2481  	vm.mutex.Unlock()
  2482  	haveLock = false
  2483  	rootFilename := vm.VolumeLocations[0].Filename
  2484  	tmpRootFilename := rootFilename + ".new"
  2485  	if request.SkipBackup {
  2486  		if err := os.Link(rootFilename, tmpRootFilename); err != nil {
  2487  			return err
  2488  		}
  2489  	} else {
  2490  		if err := sendVmPatchImageMessage(conn, "copying root"); err != nil {
  2491  			return err
  2492  		}
  2493  		err = fsutil.CopyFile(tmpRootFilename, rootFilename,
  2494  			fsutil.PrivateFilePerms)
  2495  		if err != nil {
  2496  			return err
  2497  		}
  2498  	}
  2499  	defer os.Remove(tmpRootFilename)
  2500  	rootDir, err := ioutil.TempDir(vm.dirname, "root")
  2501  	if err != nil {
  2502  		return err
  2503  	}
  2504  	defer os.Remove(rootDir)
  2505  	partition := "p1"
  2506  	loopDevice, err := fsutil.LoopbackSetupAndWaitForPartition(tmpRootFilename,
  2507  		partition, time.Minute, vm.logger)
  2508  	if err != nil {
  2509  		return err
  2510  	}
  2511  	defer fsutil.LoopbackDeleteAndWaitForPartition(loopDevice, partition,
  2512  		time.Minute, vm.logger)
  2513  	vm.logger.Debugf(0, "mounting: %s onto: %s\n", loopDevice, rootDir)
  2514  	err = wsyscall.Mount(loopDevice+partition, rootDir, "ext4", 0, "")
  2515  	if err != nil {
  2516  		return err
  2517  	}
  2518  	defer syscall.Unmount(rootDir, 0)
  2519  	if err := sendVmPatchImageMessage(conn, "scanning root"); err != nil {
  2520  		return err
  2521  	}
  2522  	fs, err := scanner.ScanFileSystem(rootDir, nil, img.Filter, nil, nil, nil)
  2523  	if err != nil {
  2524  		return err
  2525  	}
  2526  	if err := fs.FileSystem.RebuildInodePointers(); err != nil {
  2527  		return err
  2528  	}
  2529  	fs.FileSystem.BuildEntryMap()
  2530  	initrdFilename := vm.getInitrdPath()
  2531  	tmpInitrdFilename := initrdFilename + ".new"
  2532  	defer os.Remove(tmpInitrdFilename)
  2533  	kernelFilename := vm.getKernelPath()
  2534  	tmpKernelFilename := kernelFilename + ".new"
  2535  	defer os.Remove(tmpKernelFilename)
  2536  	writeBootloaderConfig := false
  2537  	if _, err := os.Stat(vm.getKernelPath()); err == nil { // No bootloader.
  2538  		err := extractKernel(vm.VolumeLocations[0], ".new", objectsGetter,
  2539  			img.FileSystem, bootInfo)
  2540  		if err != nil {
  2541  			return err
  2542  		}
  2543  	} else { // Requires a bootloader.
  2544  		writeBootloaderConfig = true
  2545  	}
  2546  	patchLogger, err := vm.makeExtraLogger(lastPatchLogFilename)
  2547  	if err != nil {
  2548  		return err
  2549  	}
  2550  	defer patchLogger.Close()
  2551  	subObj := domlib.Sub{FileSystem: &fs.FileSystem}
  2552  	fetchMap, _ := domlib.BuildMissingLists(subObj, img, false, true,
  2553  		patchLogger)
  2554  	objectsToFetch := objectcache.ObjectMapToCache(fetchMap)
  2555  	objectsDir := filepath.Join(rootDir, ".subd", "objects")
  2556  	defer os.RemoveAll(objectsDir)
  2557  	startTime := time.Now()
  2558  	objectsReader, err := objectsGetter.GetObjects(objectsToFetch)
  2559  	if err != nil {
  2560  		return err
  2561  	}
  2562  	defer objectsReader.Close()
  2563  	err = sendVmPatchImageMessage(conn, "pre-deleting unneeded files")
  2564  	if err != nil {
  2565  		return err
  2566  	}
  2567  	err = deleteFilesNotInImage(img.FileSystem, &fs.FileSystem, rootDir,
  2568  		patchLogger)
  2569  	if err != nil {
  2570  		return err
  2571  	}
  2572  	msg := fmt.Sprintf("fetching(%s) %d objects",
  2573  		imageName, len(objectsToFetch))
  2574  	if err := sendVmPatchImageMessage(conn, msg); err != nil {
  2575  		return err
  2576  	}
  2577  	vm.logger.Debugln(0, msg)
  2578  	for _, hashVal := range objectsToFetch {
  2579  		length, reader, err := objectsReader.NextObject()
  2580  		if err != nil {
  2581  			vm.logger.Println(err)
  2582  			return err
  2583  		}
  2584  		err = readOne(objectsDir, hashVal, length, reader)
  2585  		reader.Close()
  2586  		if err != nil {
  2587  			vm.logger.Println(err)
  2588  			return err
  2589  		}
  2590  	}
  2591  	msg = fmt.Sprintf("fetched(%s) %d objects in %s",
  2592  		imageName, len(objectsToFetch), format.Duration(time.Since(startTime)))
  2593  	if err := sendVmPatchImageMessage(conn, msg); err != nil {
  2594  		return err
  2595  	}
  2596  	vm.logger.Debugln(0, msg)
  2597  	subObj.ObjectCache = append(subObj.ObjectCache, objectsToFetch...)
  2598  	var subRequest subproto.UpdateRequest
  2599  	if domlib.BuildUpdateRequest(subObj, img, &subRequest, false, true,
  2600  		patchLogger) {
  2601  		return errors.New("failed building update: missing computed files")
  2602  	}
  2603  	subRequest.ImageName = imageName
  2604  	subRequest.Triggers = nil
  2605  	if err := sendVmPatchImageMessage(conn, "starting update"); err != nil {
  2606  		return err
  2607  	}
  2608  	vm.logger.Debugf(0, "update(%s) starting\n", imageName)
  2609  	patchLogger.Printf("update(%s) starting\n", imageName)
  2610  	startTime = time.Now()
  2611  	_, _, err = sublib.Update(subRequest, rootDir, objectsDir, nil, nil, nil,
  2612  		patchLogger)
  2613  	if err != nil {
  2614  		return err
  2615  	}
  2616  	msg = fmt.Sprintf("updated(%s) in %s",
  2617  		imageName, format.Duration(time.Since(startTime)))
  2618  	if err := sendVmPatchImageMessage(conn, msg); err != nil {
  2619  		return err
  2620  	}
  2621  	if writeBootloaderConfig {
  2622  		err := bootInfo.WriteBootloaderConfig(rootDir, vm.logger)
  2623  		if err != nil {
  2624  			return err
  2625  		}
  2626  	}
  2627  	if !request.SkipBackup {
  2628  		oldRootFilename := rootFilename + ".old"
  2629  		if err := os.Rename(rootFilename, oldRootFilename); err != nil {
  2630  			return err
  2631  		}
  2632  		if err := os.Rename(tmpRootFilename, rootFilename); err != nil {
  2633  			os.Rename(oldRootFilename, rootFilename)
  2634  			return err
  2635  		}
  2636  		os.Rename(initrdFilename, initrdFilename+".old")
  2637  		os.Rename(kernelFilename, kernelFilename+".old")
  2638  	}
  2639  	os.Rename(tmpInitrdFilename, initrdFilename)
  2640  	os.Rename(tmpKernelFilename, kernelFilename)
  2641  	vm.mutex.Lock()
  2642  	haveLock = true
  2643  	vm.ImageName = imageName
  2644  	vm.writeAndSendInfo()
  2645  	if restart && vm.State == proto.StateStopped {
  2646  		vm.setState(proto.StateStarting)
  2647  		sendVmPatchImageMessage(conn, "starting VM")
  2648  		vm.mutex.Unlock()
  2649  		_, err := vm.startManaging(0, false, false)
  2650  		vm.mutex.Lock()
  2651  		if err != nil {
  2652  			return err
  2653  		}
  2654  	}
  2655  	return nil
  2656  }
  2657  
  2658  func (m *Manager) prepareVmForMigration(ipAddr net.IP,
  2659  	authInfoP *srpc.AuthInformation, accessToken []byte, enable bool) error {
  2660  	authInfo := *authInfoP
  2661  	authInfo.HaveMethodAccess = false // Require VM ownership or token.
  2662  	vm, err := m.getVmLockAndAuth(ipAddr, true, &authInfo, accessToken)
  2663  	if err != nil {
  2664  		return nil
  2665  	}
  2666  	defer vm.mutex.Unlock()
  2667  	if enable {
  2668  		if vm.Uncommitted {
  2669  			return errors.New("VM is uncommitted")
  2670  		}
  2671  		if vm.State != proto.StateStopped {
  2672  			return errors.New("VM is not stopped")
  2673  		}
  2674  		// Block reallocation of addresses until VM is destroyed, then release
  2675  		// claims on addresses.
  2676  		vm.Uncommitted = true
  2677  		vm.setState(proto.StateMigrating)
  2678  		if err := m.unregisterAddress(vm.Address, true); err != nil {
  2679  			vm.Uncommitted = false
  2680  			vm.setState(proto.StateStopped)
  2681  			return err
  2682  		}
  2683  		for _, address := range vm.SecondaryAddresses {
  2684  			if err := m.unregisterAddress(address, true); err != nil {
  2685  				vm.logger.Printf("error unregistering address: %s\n",
  2686  					address.IpAddress)
  2687  				vm.Uncommitted = false
  2688  				vm.setState(proto.StateStopped)
  2689  				return err
  2690  			}
  2691  		}
  2692  	} else {
  2693  		if vm.State != proto.StateMigrating {
  2694  			return errors.New("VM is not migrating")
  2695  		}
  2696  		// Reclaim addresses and then allow reallocation if VM is later
  2697  		// destroyed.
  2698  		if err := m.registerAddress(vm.Address); err != nil {
  2699  			vm.setState(proto.StateStopped)
  2700  			return err
  2701  		}
  2702  		for _, address := range vm.SecondaryAddresses {
  2703  			if err := m.registerAddress(address); err != nil {
  2704  				vm.logger.Printf("error registering address: %s\n",
  2705  					address.IpAddress)
  2706  				vm.setState(proto.StateStopped)
  2707  				return err
  2708  			}
  2709  		}
  2710  		vm.Uncommitted = false
  2711  		vm.setState(proto.StateStopped)
  2712  	}
  2713  	return nil
  2714  }
  2715  
  2716  // rebootVm returns true if the DHCP check timed out.
  2717  func (m *Manager) rebootVm(ipAddr net.IP, authInfo *srpc.AuthInformation,
  2718  	dhcpTimeout time.Duration) (bool, error) {
  2719  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
  2720  	if err != nil {
  2721  		return false, err
  2722  	}
  2723  	doUnlock := true
  2724  	defer func() {
  2725  		if doUnlock {
  2726  			vm.mutex.Unlock()
  2727  		}
  2728  	}()
  2729  	switch vm.State {
  2730  	case proto.StateStarting:
  2731  		return false, errors.New("VM is starting")
  2732  	case proto.StateRunning:
  2733  		vm.commandInput <- "reboot" // Not a QMP command: interpreted locally.
  2734  		vm.mutex.Unlock()
  2735  		doUnlock = false
  2736  		if dhcpTimeout > 0 {
  2737  			ackChan := vm.manager.DhcpServer.MakeAcknowledgmentChannel(
  2738  				vm.Address.IpAddress)
  2739  			timer := time.NewTimer(dhcpTimeout)
  2740  			select {
  2741  			case <-ackChan:
  2742  				timer.Stop()
  2743  			case <-timer.C:
  2744  				return true, nil
  2745  			}
  2746  		}
  2747  		return false, nil
  2748  	case proto.StateStopping:
  2749  		return false, errors.New("VM is stopping")
  2750  	case proto.StateStopped:
  2751  		return false, errors.New("VM is stopped")
  2752  	case proto.StateFailedToStart:
  2753  		return false, errors.New("VM failed to start")
  2754  	case proto.StateExporting:
  2755  		return false, errors.New("VM is exporting")
  2756  	case proto.StateCrashed:
  2757  		return false, errors.New("VM has crashed")
  2758  	case proto.StateDestroying:
  2759  		return false, errors.New("VM is destroying")
  2760  	case proto.StateMigrating:
  2761  		return false, errors.New("VM is migrating")
  2762  	case proto.StateDebugging:
  2763  		return false, errors.New("VM is debugging")
  2764  	default:
  2765  		return false, errors.New("unknown state: " + vm.State.String())
  2766  	}
  2767  }
  2768  
  2769  func (m *Manager) registerVmMetadataNotifier(ipAddr net.IP,
  2770  	authInfo *srpc.AuthInformation, pathChannel chan<- string) error {
  2771  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
  2772  	if err != nil {
  2773  		return err
  2774  	}
  2775  	defer vm.mutex.Unlock()
  2776  	vm.metadataChannels[pathChannel] = struct{}{}
  2777  	return nil
  2778  }
  2779  
  2780  func (m *Manager) replaceVmImage(conn *srpc.Conn,
  2781  	authInfo *srpc.AuthInformation) error {
  2782  
  2783  	sendError := func(conn *srpc.Conn, err error) error {
  2784  		return conn.Encode(proto.ReplaceVmImageResponse{Error: err.Error()})
  2785  	}
  2786  
  2787  	sendUpdate := func(conn *srpc.Conn, message string) error {
  2788  		response := proto.ReplaceVmImageResponse{ProgressMessage: message}
  2789  		if err := conn.Encode(response); err != nil {
  2790  			return err
  2791  		}
  2792  		return conn.Flush()
  2793  	}
  2794  
  2795  	var request proto.ReplaceVmImageRequest
  2796  	if err := conn.Decode(&request); err != nil {
  2797  		return err
  2798  	}
  2799  	vm, err := m.getVmLockAndAuth(request.IpAddress, true, authInfo, nil)
  2800  	if err != nil {
  2801  		if err := maybeDrainImage(conn, request.ImageDataSize); err != nil {
  2802  			return err
  2803  		}
  2804  		return sendError(conn, err)
  2805  	}
  2806  	vm.blockMutations = true
  2807  	switch vm.State {
  2808  	case proto.StateStopped:
  2809  	case proto.StateRunning:
  2810  		if len(vm.Address.IpAddress) < 1 {
  2811  			err = errors.New("cannot stop VM with externally managed lease")
  2812  		}
  2813  	default:
  2814  		err = errors.New("VM is not running or stopped")
  2815  	}
  2816  	if err != nil {
  2817  		vm.allowMutationsAndUnlock()
  2818  		if err := maybeDrainImage(conn, request.ImageDataSize); err != nil {
  2819  			return err
  2820  		}
  2821  		return sendError(conn, err)
  2822  	}
  2823  	restart := vm.State == proto.StateRunning
  2824  	vm.mutex.Unlock()
  2825  	haveLock := false
  2826  	defer func() {
  2827  		if !haveLock {
  2828  			vm.mutex.Lock()
  2829  		}
  2830  		vm.allowMutationsAndUnlock()
  2831  	}()
  2832  	initrdFilename := vm.getInitrdPath()
  2833  	tmpInitrdFilename := initrdFilename + ".new"
  2834  	defer os.Remove(tmpInitrdFilename)
  2835  	kernelFilename := vm.getKernelPath()
  2836  	tmpKernelFilename := kernelFilename + ".new"
  2837  	defer os.Remove(tmpKernelFilename)
  2838  	tmpRootFilename := vm.VolumeLocations[0].Filename + ".new"
  2839  	defer os.Remove(tmpRootFilename)
  2840  	var newSize uint64
  2841  	if request.ImageName != "" {
  2842  		if err := maybeDrainImage(conn, request.ImageDataSize); err != nil {
  2843  			return err
  2844  		}
  2845  		if err := sendUpdate(conn, "getting image"); err != nil {
  2846  			return err
  2847  		}
  2848  		client, img, imageName, err := m.getImage(request.ImageName,
  2849  			request.ImageTimeout)
  2850  		if err != nil {
  2851  			return sendError(conn, err)
  2852  		}
  2853  		defer client.Close()
  2854  		request.ImageName = imageName
  2855  		err = sendUpdate(conn, "unpacking image: "+imageName)
  2856  		if err != nil {
  2857  			return err
  2858  		}
  2859  		writeRawOptions := util.WriteRawOptions{
  2860  			ExtraKernelOptions: vm.ExtraKernelOptions,
  2861  			InitialImageName:   imageName,
  2862  			MinimumFreeBytes:   request.MinimumFreeBytes,
  2863  			OverlayFiles:       request.OverlayFiles,
  2864  			RootLabel:          vm.rootLabel(false),
  2865  			RoundupPower:       request.RoundupPower,
  2866  		}
  2867  		err = m.writeRaw(vm.VolumeLocations[0], ".new", client, img.FileSystem,
  2868  			writeRawOptions, request.SkipBootloader)
  2869  		if err != nil {
  2870  			return sendError(conn, err)
  2871  		}
  2872  		if fi, err := os.Stat(tmpRootFilename); err != nil {
  2873  			return sendError(conn, err)
  2874  		} else {
  2875  			newSize = uint64(fi.Size())
  2876  		}
  2877  	} else if request.ImageDataSize > 0 {
  2878  		err := copyData(tmpRootFilename, conn, request.ImageDataSize)
  2879  		if err != nil {
  2880  			return err
  2881  		}
  2882  		newSize = computeSize(request.MinimumFreeBytes, request.RoundupPower,
  2883  			request.ImageDataSize)
  2884  		if err := setVolumeSize(tmpRootFilename, newSize); err != nil {
  2885  			return sendError(conn, err)
  2886  		}
  2887  	} else if request.ImageURL != "" {
  2888  		if err := maybeDrainImage(conn, request.ImageDataSize); err != nil {
  2889  			return err
  2890  		}
  2891  		httpResponse, err := http.Get(request.ImageURL)
  2892  		if err != nil {
  2893  			return sendError(conn, err)
  2894  		}
  2895  		defer httpResponse.Body.Close()
  2896  		if httpResponse.StatusCode != http.StatusOK {
  2897  			return sendError(conn, errors.New(httpResponse.Status))
  2898  		}
  2899  		if httpResponse.ContentLength < 0 {
  2900  			return sendError(conn,
  2901  				errors.New("ContentLength from: "+request.ImageURL))
  2902  		}
  2903  		err = copyData(tmpRootFilename, httpResponse.Body,
  2904  			uint64(httpResponse.ContentLength))
  2905  		if err != nil {
  2906  			return sendError(conn, err)
  2907  		}
  2908  		newSize = computeSize(request.MinimumFreeBytes, request.RoundupPower,
  2909  			uint64(httpResponse.ContentLength))
  2910  		if err := setVolumeSize(tmpRootFilename, newSize); err != nil {
  2911  			return sendError(conn, err)
  2912  		}
  2913  	} else {
  2914  		return sendError(conn, errors.New("no image specified"))
  2915  	}
  2916  	vm.mutex.Lock()
  2917  	haveLock = true
  2918  	switch vm.State {
  2919  	case proto.StateStopped:
  2920  	case proto.StateRunning:
  2921  		if err := sendUpdate(conn, "stopping VM"); err != nil {
  2922  			return err
  2923  		}
  2924  		stoppedNotifier := make(chan struct{}, 1)
  2925  		vm.stoppedNotifier = stoppedNotifier
  2926  		vm.setState(proto.StateStopping)
  2927  		vm.commandInput <- "system_powerdown"
  2928  		time.AfterFunc(time.Second*15, vm.kill)
  2929  		vm.mutex.Unlock()
  2930  		<-stoppedNotifier
  2931  		vm.mutex.Lock()
  2932  		if vm.State != proto.StateStopped {
  2933  			return sendError(conn,
  2934  				errors.New("VM is not stopped after stop attempt"))
  2935  		}
  2936  	default:
  2937  		return sendError(conn, errors.New("VM is not running or stopped"))
  2938  	}
  2939  	rootFilename := vm.VolumeLocations[0].Filename
  2940  	if request.SkipBackup {
  2941  		if err := os.Rename(tmpRootFilename, rootFilename); err != nil {
  2942  			return sendError(conn, err)
  2943  		}
  2944  	} else {
  2945  		oldRootFilename := vm.VolumeLocations[0].Filename + ".old"
  2946  		if err := os.Rename(rootFilename, oldRootFilename); err != nil {
  2947  			return sendError(conn, err)
  2948  		}
  2949  		if err := os.Rename(tmpRootFilename, rootFilename); err != nil {
  2950  			os.Rename(oldRootFilename, rootFilename)
  2951  			return sendError(conn, err)
  2952  		}
  2953  		os.Rename(initrdFilename, initrdFilename+".old")
  2954  		os.Rename(kernelFilename, kernelFilename+".old")
  2955  	}
  2956  	os.Rename(tmpInitrdFilename, initrdFilename)
  2957  	os.Rename(tmpKernelFilename, kernelFilename)
  2958  	if request.ImageName != "" {
  2959  		vm.ImageName = request.ImageName
  2960  	}
  2961  	vm.Volumes[0].Size = newSize
  2962  	vm.writeAndSendInfo()
  2963  	if restart && vm.State == proto.StateStopped {
  2964  		vm.setState(proto.StateStarting)
  2965  		sendUpdate(conn, "starting VM")
  2966  		vm.mutex.Unlock()
  2967  		_, err := vm.startManaging(0, false, false)
  2968  		vm.mutex.Lock()
  2969  		if err != nil {
  2970  			sendError(conn, err)
  2971  		}
  2972  	}
  2973  	response := proto.ReplaceVmImageResponse{
  2974  		Final: true,
  2975  	}
  2976  	if err := conn.Encode(response); err != nil {
  2977  		return err
  2978  	}
  2979  	return nil
  2980  }
  2981  
  2982  func (m *Manager) replaceVmUserData(ipAddr net.IP, reader io.Reader,
  2983  	size uint64, authInfo *srpc.AuthInformation) error {
  2984  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
  2985  	if err != nil {
  2986  		return err
  2987  	}
  2988  	defer vm.mutex.Unlock()
  2989  	filename := filepath.Join(vm.dirname, UserDataFile)
  2990  	oldFilename := filename + ".old"
  2991  	newFilename := filename + ".new"
  2992  	err = fsutil.CopyToFile(newFilename, fsutil.PrivateFilePerms, reader, size)
  2993  	if err != nil {
  2994  		return err
  2995  	}
  2996  	defer os.Remove(newFilename)
  2997  	if err := os.Rename(filename, oldFilename); err != nil {
  2998  		if !os.IsNotExist(err) {
  2999  			return err
  3000  		}
  3001  	}
  3002  	if err := os.Rename(newFilename, filename); err != nil {
  3003  		os.Rename(oldFilename, filename)
  3004  		return err
  3005  	}
  3006  	return nil
  3007  }
  3008  
  3009  func (m *Manager) restoreVmFromSnapshot(ipAddr net.IP,
  3010  	authInfo *srpc.AuthInformation, forceIfNotStopped bool) error {
  3011  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
  3012  	if err != nil {
  3013  		return err
  3014  	}
  3015  	defer vm.mutex.Unlock()
  3016  	if vm.State != proto.StateStopped {
  3017  		if !forceIfNotStopped {
  3018  			return errors.New("VM is not stopped")
  3019  		}
  3020  	}
  3021  	for _, volume := range vm.VolumeLocations {
  3022  		snapshotFilename := volume.Filename + ".snapshot"
  3023  		if err := os.Rename(snapshotFilename, volume.Filename); err != nil {
  3024  			if !os.IsNotExist(err) {
  3025  				return err
  3026  			}
  3027  		}
  3028  	}
  3029  	return nil
  3030  }
  3031  
  3032  func (m *Manager) restoreVmImage(ipAddr net.IP,
  3033  	authInfo *srpc.AuthInformation) error {
  3034  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
  3035  	if err != nil {
  3036  		return err
  3037  	}
  3038  	defer vm.mutex.Unlock()
  3039  	if vm.State != proto.StateStopped {
  3040  		return errors.New("VM is not stopped")
  3041  	}
  3042  	rootFilename := vm.VolumeLocations[0].Filename
  3043  	oldRootFilename := vm.VolumeLocations[0].Filename + ".old"
  3044  	fi, err := os.Stat(oldRootFilename)
  3045  	if err != nil {
  3046  		return err
  3047  	}
  3048  	if err := os.Rename(oldRootFilename, rootFilename); err != nil {
  3049  		return err
  3050  	}
  3051  	initrdFilename := vm.getInitrdPath()
  3052  	os.Rename(initrdFilename+".old", initrdFilename)
  3053  	kernelFilename := vm.getKernelPath()
  3054  	os.Rename(kernelFilename+".old", kernelFilename)
  3055  	vm.Volumes[0].Size = uint64(fi.Size())
  3056  	vm.writeAndSendInfo()
  3057  	return nil
  3058  }
  3059  
  3060  func (m *Manager) restoreVmUserData(ipAddr net.IP,
  3061  	authInfo *srpc.AuthInformation) error {
  3062  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
  3063  	if err != nil {
  3064  		return err
  3065  	}
  3066  	defer vm.mutex.Unlock()
  3067  	filename := filepath.Join(vm.dirname, UserDataFile)
  3068  	oldFilename := filename + ".old"
  3069  	return os.Rename(oldFilename, filename)
  3070  }
  3071  
  3072  func (m *Manager) reorderVmVolumes(ipAddr net.IP,
  3073  	authInfo *srpc.AuthInformation, accessToken []byte,
  3074  	_volumeIndices []uint) error {
  3075  	// If root volume isn't listed, insert default "keep in place" entry.
  3076  	var volumeIndices []uint
  3077  	for _, oldIndex := range _volumeIndices {
  3078  		if oldIndex == 0 {
  3079  			volumeIndices = _volumeIndices
  3080  			break
  3081  		}
  3082  	}
  3083  	if volumeIndices == nil {
  3084  		volumeIndices = make([]uint, 1) // Map 0->0.
  3085  		volumeIndices = append(volumeIndices, _volumeIndices...)
  3086  	}
  3087  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, accessToken)
  3088  	if err != nil {
  3089  		return err
  3090  	}
  3091  	defer vm.mutex.Unlock()
  3092  	if volumeIndices[0] != 0 {
  3093  		if vm.getActiveInitrdPath() != "" {
  3094  			return errors.New("cannot reorder root volume with separate initrd")
  3095  		}
  3096  		if vm.getActiveKernelPath() != "" {
  3097  			return errors.New("cannot reorder root volume with separate kernel")
  3098  		}
  3099  	}
  3100  	if len(volumeIndices) != len(vm.VolumeLocations) {
  3101  		return fmt.Errorf(
  3102  			"number of volume indices: %d != number of volumes: %d",
  3103  			len(volumeIndices), len(vm.VolumeLocations))
  3104  	}
  3105  	if vm.State != proto.StateStopped {
  3106  		return errors.New("VM is not stopped")
  3107  	}
  3108  	var pathsToRename []string
  3109  	defer func() {
  3110  		for _, path := range pathsToRename {
  3111  			os.Remove(path + "~")
  3112  		}
  3113  	}()
  3114  	indexMap := make(map[uint]struct{}, len(volumeIndices))
  3115  	volumeLocations := make([]proto.LocalVolume, len(volumeIndices))
  3116  	volumes := make([]proto.Volume, len(volumeIndices))
  3117  	for newIndex, oldIndex := range volumeIndices {
  3118  		if oldIndex >= uint(len(vm.VolumeLocations)) {
  3119  			return fmt.Errorf("volume index: %d too large", oldIndex)
  3120  		}
  3121  		if _, ok := indexMap[oldIndex]; ok {
  3122  			return fmt.Errorf("duplicate volume index: %d", oldIndex)
  3123  		}
  3124  		indexMap[oldIndex] = struct{}{}
  3125  		vl := vm.VolumeLocations[oldIndex]
  3126  		if newIndex != int(oldIndex) {
  3127  			newName := filepath.Join(vl.DirectoryToCleanup,
  3128  				indexToName(newIndex))
  3129  			if err := os.Link(vl.Filename, newName+"~"); err != nil {
  3130  				return err
  3131  			}
  3132  			pathsToRename = append(pathsToRename, newName)
  3133  			vl.Filename = newName
  3134  		}
  3135  		volumeLocations[newIndex] = vl
  3136  		volumes[newIndex] = vm.Volumes[oldIndex]
  3137  	}
  3138  	for _, path := range pathsToRename {
  3139  		os.Rename(path+"~", path)
  3140  	}
  3141  	pathsToRename = nil
  3142  	vm.VolumeLocations = volumeLocations
  3143  	vm.Volumes = volumes
  3144  	vm.writeAndSendInfo()
  3145  	return nil
  3146  }
  3147  
  3148  func (m *Manager) scanVmRoot(ipAddr net.IP, authInfo *srpc.AuthInformation,
  3149  	scanFilter *filter.Filter) (*filesystem.FileSystem, error) {
  3150  	vm, err := m.getVmLockAndAuth(ipAddr, false, authInfo, nil)
  3151  	if err != nil {
  3152  		return nil, err
  3153  	}
  3154  	defer vm.mutex.RUnlock()
  3155  	return vm.scanVmRoot(scanFilter)
  3156  }
  3157  
  3158  func (vm *vmInfoType) scanVmRoot(scanFilter *filter.Filter) (
  3159  	*filesystem.FileSystem, error) {
  3160  	if vm.State != proto.StateStopped {
  3161  		return nil, errors.New("VM is not stopped")
  3162  	}
  3163  	rootDir, err := ioutil.TempDir(vm.dirname, "root")
  3164  	if err != nil {
  3165  		return nil, err
  3166  	}
  3167  	defer os.Remove(rootDir)
  3168  	partition := "p1"
  3169  	loopDevice, err := fsutil.LoopbackSetupAndWaitForPartition(
  3170  		vm.VolumeLocations[0].Filename, partition, time.Minute, vm.logger)
  3171  	if err != nil {
  3172  		return nil, err
  3173  	}
  3174  	defer fsutil.LoopbackDeleteAndWaitForPartition(loopDevice, partition,
  3175  		time.Minute, vm.logger)
  3176  	blockDevice := loopDevice + partition
  3177  	vm.logger.Debugf(0, "mounting: %s onto: %s\n", blockDevice, rootDir)
  3178  	err = wsyscall.Mount(blockDevice, rootDir, "ext4", 0, "")
  3179  	if err != nil {
  3180  		return nil, fmt.Errorf("error mounting: %s: %s", blockDevice, err)
  3181  	}
  3182  	defer syscall.Unmount(rootDir, 0)
  3183  	sfs, err := scanner.ScanFileSystem(rootDir, nil, scanFilter, nil, nil, nil)
  3184  	if err != nil {
  3185  		return nil, err
  3186  	}
  3187  	return &sfs.FileSystem, nil
  3188  }
  3189  
  3190  func (m *Manager) sendVmInfo(ipAddress string, vm *proto.VmInfo) {
  3191  	if ipAddress != "0.0.0.0" {
  3192  		if vm == nil { // GOB cannot encode a nil value in a map.
  3193  			vm = new(proto.VmInfo)
  3194  		}
  3195  		m.sendUpdate(proto.Update{
  3196  			HaveVMs: true,
  3197  			VMs:     map[string]*proto.VmInfo{ipAddress: vm},
  3198  		})
  3199  	}
  3200  }
  3201  
  3202  func (m *Manager) snapshotVm(ipAddr net.IP, authInfo *srpc.AuthInformation,
  3203  	forceIfNotStopped, snapshotRootOnly bool) error {
  3204  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, nil)
  3205  	if err != nil {
  3206  		return err
  3207  	}
  3208  	defer vm.mutex.Unlock()
  3209  	// TODO(rgooch): First check for sufficient free space.
  3210  	if vm.State != proto.StateStopped {
  3211  		if !forceIfNotStopped {
  3212  			return errors.New("VM is not stopped")
  3213  		}
  3214  	}
  3215  	if err := vm.discardSnapshot(); err != nil {
  3216  		return err
  3217  	}
  3218  	doCleanup := true
  3219  	defer func() {
  3220  		if doCleanup {
  3221  			vm.discardSnapshot()
  3222  		}
  3223  	}()
  3224  	for index, volume := range vm.VolumeLocations {
  3225  		snapshotFilename := volume.Filename + ".snapshot"
  3226  		if index == 0 || !snapshotRootOnly {
  3227  			err := fsutil.CopyFile(snapshotFilename, volume.Filename,
  3228  				fsutil.PrivateFilePerms)
  3229  			if err != nil {
  3230  				return err
  3231  			}
  3232  		}
  3233  	}
  3234  	doCleanup = false
  3235  	return nil
  3236  }
  3237  
  3238  // startVm returns true if the DHCP check timed out.
  3239  func (m *Manager) startVm(ipAddr net.IP, authInfo *srpc.AuthInformation,
  3240  	accessToken []byte, dhcpTimeout time.Duration) (bool, error) {
  3241  	if m.disabled {
  3242  		return false, errors.New("Hypervisor is disabled")
  3243  	}
  3244  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, accessToken)
  3245  	if err != nil {
  3246  		return false, err
  3247  	}
  3248  	doUnlock := true
  3249  	defer func() {
  3250  		if doUnlock {
  3251  			vm.mutex.Unlock()
  3252  		}
  3253  	}()
  3254  	if err := checkAvailableMemory(vm.MemoryInMiB); err != nil {
  3255  		return false, err
  3256  	}
  3257  	switch vm.State {
  3258  	case proto.StateStarting:
  3259  		return false, errors.New("VM is already starting")
  3260  	case proto.StateRunning:
  3261  		return false, errors.New("VM is running")
  3262  	case proto.StateStopping:
  3263  		return false, errors.New("VM is stopping")
  3264  	case proto.StateStopped, proto.StateFailedToStart, proto.StateExporting,
  3265  		proto.StateCrashed:
  3266  		vm.setState(proto.StateStarting)
  3267  		vm.mutex.Unlock()
  3268  		doUnlock = false
  3269  		return vm.startManaging(dhcpTimeout, false, false)
  3270  	case proto.StateDestroying:
  3271  		return false, errors.New("VM is destroying")
  3272  	case proto.StateMigrating:
  3273  		return false, errors.New("VM is migrating")
  3274  	case proto.StateDebugging:
  3275  		debugRoot := vm.getDebugRoot()
  3276  		if debugRoot == "" {
  3277  			return false, errors.New("debugging volume missing")
  3278  		}
  3279  		stoppedNotifier := make(chan struct{}, 1)
  3280  		vm.stoppedNotifier = stoppedNotifier
  3281  		vm.setState(proto.StateStopping)
  3282  		vm.commandInput <- "system_powerdown"
  3283  		time.AfterFunc(time.Second*15, vm.kill)
  3284  		vm.mutex.Unlock()
  3285  		<-stoppedNotifier
  3286  		vm.mutex.Lock()
  3287  		if vm.State != proto.StateStopped {
  3288  			return false, errors.New("VM is not stopped after stop attempt")
  3289  		}
  3290  		if err := os.Remove(debugRoot); err != nil {
  3291  			return false, err
  3292  		}
  3293  		vm.writeAndSendInfo()
  3294  		vm.setState(proto.StateStarting)
  3295  		vm.mutex.Unlock()
  3296  		doUnlock = false
  3297  		return vm.startManaging(dhcpTimeout, false, false)
  3298  	default:
  3299  		return false, errors.New("unknown state: " + vm.State.String())
  3300  	}
  3301  }
  3302  
  3303  func (m *Manager) stopVm(ipAddr net.IP, authInfo *srpc.AuthInformation,
  3304  	accessToken []byte) error {
  3305  	vm, err := m.getVmLockAndAuth(ipAddr, true, authInfo, accessToken)
  3306  	if err != nil {
  3307  		return err
  3308  	}
  3309  	doUnlock := true
  3310  	defer func() {
  3311  		if doUnlock {
  3312  			vm.mutex.Unlock()
  3313  		}
  3314  	}()
  3315  	switch vm.State {
  3316  	case proto.StateStarting:
  3317  		return errors.New("VM is starting")
  3318  	case proto.StateRunning, proto.StateDebugging:
  3319  		if len(vm.Address.IpAddress) < 1 {
  3320  			return errors.New("cannot stop VM with externally managed lease")
  3321  		}
  3322  		if debugRoot := vm.getDebugRoot(); debugRoot != "" {
  3323  			if err := os.Remove(debugRoot); err != nil {
  3324  				return err
  3325  			}
  3326  		}
  3327  		stoppedNotifier := make(chan struct{}, 1)
  3328  		vm.stoppedNotifier = stoppedNotifier
  3329  		vm.setState(proto.StateStopping)
  3330  		vm.commandInput <- "system_powerdown"
  3331  		time.AfterFunc(time.Second*15, vm.kill)
  3332  		vm.mutex.Unlock()
  3333  		doUnlock = false
  3334  		<-stoppedNotifier
  3335  	case proto.StateFailedToStart:
  3336  		vm.setState(proto.StateStopped)
  3337  	case proto.StateStopping:
  3338  		return errors.New("VM is stopping")
  3339  	case proto.StateStopped:
  3340  		return errors.New("VM is already stopped")
  3341  	case proto.StateDestroying:
  3342  		return errors.New("VM is destroying")
  3343  	case proto.StateMigrating:
  3344  		return errors.New("VM is migrating")
  3345  	case proto.StateExporting:
  3346  		return errors.New("VM is exporting")
  3347  	case proto.StateCrashed:
  3348  		vm.setState(proto.StateStopped)
  3349  	default:
  3350  		return errors.New("unknown state: " + vm.State.String())
  3351  	}
  3352  	return nil
  3353  }
  3354  
  3355  func (m *Manager) unregisterVmMetadataNotifier(ipAddr net.IP,
  3356  	pathChannel chan<- string) error {
  3357  	vm, err := m.getVmAndLock(ipAddr, true)
  3358  	if err != nil {
  3359  		return err
  3360  	}
  3361  	defer vm.mutex.Unlock()
  3362  	delete(vm.metadataChannels, pathChannel)
  3363  	return nil
  3364  }
  3365  
  3366  func (m *Manager) writeRaw(volume proto.LocalVolume, extension string,
  3367  	client *srpc.Client, fs *filesystem.FileSystem,
  3368  	writeRawOptions util.WriteRawOptions, skipBootloader bool) error {
  3369  	startTime := time.Now()
  3370  	var objectsGetter objectserver.ObjectsGetter
  3371  	if m.objectCache == nil {
  3372  		objectClient := objclient.AttachObjectClient(client)
  3373  		defer objectClient.Close()
  3374  		objectsGetter = objectClient
  3375  	} else {
  3376  		objectsGetter = m.objectCache
  3377  	}
  3378  	writeRawOptions.AllocateBlocks = true
  3379  	if skipBootloader {
  3380  		bootInfo, err := util.GetBootInfo(fs, writeRawOptions.RootLabel, "")
  3381  		if err != nil {
  3382  			return err
  3383  		}
  3384  		err = extractKernel(volume, extension, objectsGetter, fs, bootInfo)
  3385  		if err != nil {
  3386  			return err
  3387  		}
  3388  	} else {
  3389  		writeRawOptions.InstallBootloader = true
  3390  	}
  3391  	writeRawOptions.WriteFstab = true
  3392  	err := util.WriteRawWithOptions(fs, objectsGetter,
  3393  		volume.Filename+extension, fsutil.PrivateFilePerms,
  3394  		mbr.TABLE_TYPE_MSDOS, writeRawOptions, m.Logger)
  3395  	if err != nil {
  3396  		return err
  3397  	}
  3398  	m.Logger.Debugf(1, "Wrote root volume in %s\n",
  3399  		format.Duration(time.Since(startTime)))
  3400  	return nil
  3401  }
  3402  
  3403  func (vm *vmInfoType) autoDestroy() {
  3404  	vm.logger.Println("VM was not acknowledged, destroying")
  3405  	authInfo := &srpc.AuthInformation{HaveMethodAccess: true}
  3406  	err := vm.manager.destroyVm(vm.Address.IpAddress, authInfo, nil)
  3407  	if err != nil {
  3408  		vm.logger.Println(err)
  3409  	}
  3410  }
  3411  
  3412  func (vm *vmInfoType) changeIpAddress(ipAddress string) error {
  3413  	dirname := filepath.Join(vm.manager.StateDir, "VMs", ipAddress)
  3414  	if err := os.Rename(vm.dirname, dirname); err != nil {
  3415  		return err
  3416  	}
  3417  	vm.dirname = dirname
  3418  	for index, volume := range vm.VolumeLocations {
  3419  		parent := filepath.Dir(volume.DirectoryToCleanup)
  3420  		dirname := filepath.Join(parent, ipAddress)
  3421  		if err := os.Rename(volume.DirectoryToCleanup, dirname); err != nil {
  3422  			return err
  3423  		}
  3424  		vm.VolumeLocations[index] = proto.LocalVolume{
  3425  			DirectoryToCleanup: dirname,
  3426  			Filename: filepath.Join(dirname,
  3427  				filepath.Base(volume.Filename)),
  3428  		}
  3429  	}
  3430  	vm.logger.Printf("changing to new address: %s\n", ipAddress)
  3431  	vm.logger = prefixlogger.New(ipAddress+": ", vm.manager.Logger)
  3432  	vm.writeInfo()
  3433  	vm.manager.mutex.Lock()
  3434  	defer vm.manager.mutex.Unlock()
  3435  	delete(vm.manager.vms, vm.ipAddress)
  3436  	vm.ipAddress = ipAddress
  3437  	vm.manager.vms[vm.ipAddress] = vm
  3438  	vm.manager.sendUpdate(proto.Update{
  3439  		HaveVMs: true,
  3440  		VMs:     map[string]*proto.VmInfo{ipAddress: &vm.VmInfo},
  3441  	})
  3442  	return nil
  3443  }
  3444  
  3445  func (vm *vmInfoType) checkAuth(authInfo *srpc.AuthInformation,
  3446  	accessToken []byte) error {
  3447  	if authInfo.HaveMethodAccess {
  3448  		return nil
  3449  	}
  3450  	if _, ok := vm.ownerUsers[authInfo.Username]; ok {
  3451  		return nil
  3452  	}
  3453  	if len(vm.accessToken) >= 32 && bytes.Equal(vm.accessToken, accessToken) {
  3454  		return nil
  3455  	}
  3456  	for _, ownerGroup := range vm.OwnerGroups {
  3457  		if _, ok := authInfo.GroupList[ownerGroup]; ok {
  3458  			return nil
  3459  		}
  3460  	}
  3461  	return errorNoAccessToResource
  3462  }
  3463  
  3464  func (vm *vmInfoType) cleanup() {
  3465  	if vm == nil {
  3466  		return
  3467  	}
  3468  	select {
  3469  	case vm.commandInput <- "quit":
  3470  	default:
  3471  	}
  3472  	m := vm.manager
  3473  	m.mutex.Lock()
  3474  	delete(m.vms, vm.ipAddress)
  3475  	if !vm.doNotWriteOrSend {
  3476  		m.sendVmInfo(vm.ipAddress, nil)
  3477  	}
  3478  	if !vm.Uncommitted {
  3479  		if err := m.releaseAddressInPoolWithLock(vm.Address); err != nil {
  3480  			m.Logger.Println(err)
  3481  		}
  3482  		for _, address := range vm.SecondaryAddresses {
  3483  			if err := m.releaseAddressInPoolWithLock(address); err != nil {
  3484  				m.Logger.Println(err)
  3485  			}
  3486  		}
  3487  	}
  3488  	os.RemoveAll(vm.dirname)
  3489  	for _, volume := range vm.VolumeLocations {
  3490  		os.RemoveAll(volume.DirectoryToCleanup)
  3491  	}
  3492  	m.mutex.Unlock()
  3493  }
  3494  
  3495  func (vm *vmInfoType) copyRootVolume(request proto.CreateVmRequest,
  3496  	reader io.Reader, dataSize uint64, volumeType proto.VolumeType) error {
  3497  	err := vm.setupVolumes(dataSize, volumeType, request.SecondaryVolumes,
  3498  		request.SpreadVolumes)
  3499  	if err != nil {
  3500  		return err
  3501  	}
  3502  	err = copyData(vm.VolumeLocations[0].Filename, reader, dataSize)
  3503  	if err != nil {
  3504  		return err
  3505  	}
  3506  	vm.Volumes = []proto.Volume{{Size: dataSize}}
  3507  	return nil
  3508  }
  3509  
  3510  // delete deletes external VM state (files, leases, IPs). The VM lock will be
  3511  // released and later grabbed. The Manager lock will be grabbed and released
  3512  // while the VM lock is not held.
  3513  func (vm *vmInfoType) delete() {
  3514  	vm.logger.Debugln(2, "delete(): starting")
  3515  	vm.State = proto.StateDestroying
  3516  	select {
  3517  	case vm.accessTokenCleanupNotifier <- struct{}{}:
  3518  	default:
  3519  	}
  3520  	for ch := range vm.metadataChannels {
  3521  		close(ch)
  3522  	}
  3523  	vm.mutex.Unlock()
  3524  	for _, volume := range vm.VolumeLocations {
  3525  		os.Remove(volume.Filename)
  3526  		if volume.DirectoryToCleanup != "" {
  3527  			os.RemoveAll(volume.DirectoryToCleanup)
  3528  		}
  3529  	}
  3530  	os.RemoveAll(vm.dirname)
  3531  	vm.manager.DhcpServer.RemoveLease(vm.Address.IpAddress)
  3532  	for _, address := range vm.SecondaryAddresses {
  3533  		vm.manager.DhcpServer.RemoveLease(address.IpAddress)
  3534  	}
  3535  	vm.manager.mutex.Lock()
  3536  	delete(vm.manager.vms, vm.ipAddress)
  3537  	var err error
  3538  	if vm.State == proto.StateExporting {
  3539  		err = vm.manager.unregisterAddress(vm.Address, false)
  3540  		for _, address := range vm.SecondaryAddresses {
  3541  			err := vm.manager.unregisterAddress(address, false)
  3542  			if err != nil {
  3543  				vm.manager.Logger.Println(err)
  3544  			}
  3545  		}
  3546  	} else if !vm.Uncommitted {
  3547  		err = vm.manager.releaseAddressInPoolWithLock(vm.Address)
  3548  		for _, address := range vm.SecondaryAddresses {
  3549  			err := vm.manager.releaseAddressInPoolWithLock(address)
  3550  			if err != nil {
  3551  				vm.manager.Logger.Println(err)
  3552  			}
  3553  		}
  3554  	}
  3555  	vm.manager.mutex.Unlock()
  3556  	if err != nil {
  3557  		vm.manager.Logger.Println(err)
  3558  	}
  3559  	vm.manager.sendVmInfo(vm.ipAddress, nil) // Send now that VM is really gone.
  3560  	if vm.lockWatcher != nil {
  3561  		vm.lockWatcher.Stop()
  3562  	}
  3563  	vm.mutex.Lock()
  3564  	vm.logger.Debugln(2, "delete(): returning")
  3565  }
  3566  
  3567  func (vm *vmInfoType) destroy() {
  3568  	vm.mutex.Lock()
  3569  	defer vm.mutex.Unlock()
  3570  	select {
  3571  	case vm.commandInput <- "quit":
  3572  	default:
  3573  	}
  3574  	vm.delete()
  3575  }
  3576  
  3577  func (vm *vmInfoType) discardSnapshot() error {
  3578  	for _, volume := range vm.VolumeLocations {
  3579  		if err := removeFile(volume.Filename + ".snapshot"); err != nil {
  3580  			return err
  3581  		}
  3582  	}
  3583  	return nil
  3584  }
  3585  
  3586  func (vm *vmInfoType) getActiveInitrdPath() string {
  3587  	initrdPath := vm.getInitrdPath()
  3588  	if _, err := os.Stat(initrdPath); err == nil {
  3589  		return initrdPath
  3590  	}
  3591  	return ""
  3592  }
  3593  
  3594  func (vm *vmInfoType) getActiveKernelPath() string {
  3595  	kernelPath := vm.getKernelPath()
  3596  	if _, err := os.Stat(kernelPath); err == nil {
  3597  		return kernelPath
  3598  	}
  3599  	return ""
  3600  }
  3601  
  3602  func (vm *vmInfoType) getDebugRoot() string {
  3603  	filename := vm.VolumeLocations[0].Filename + ".debug"
  3604  	if _, err := os.Stat(filename); err == nil {
  3605  		return filename
  3606  	}
  3607  	return ""
  3608  }
  3609  
  3610  func (vm *vmInfoType) getInitrdPath() string {
  3611  	return filepath.Join(vm.VolumeLocations[0].DirectoryToCleanup, "initrd")
  3612  }
  3613  
  3614  func (vm *vmInfoType) getKernelPath() string {
  3615  	return filepath.Join(vm.VolumeLocations[0].DirectoryToCleanup, "kernel")
  3616  }
  3617  
  3618  func (vm *vmInfoType) kill() {
  3619  	vm.mutex.RLock()
  3620  	defer vm.mutex.RUnlock()
  3621  	if vm.State == proto.StateStopping {
  3622  		vm.commandInput <- "quit"
  3623  	}
  3624  }
  3625  
  3626  func (vm *vmInfoType) monitor(monitorSock net.Conn,
  3627  	commandInput <-chan string, commandOutput chan<- byte) {
  3628  	vm.hasHealthAgent = false
  3629  	defer monitorSock.Close()
  3630  	go vm.processMonitorResponses(monitorSock, commandOutput)
  3631  	cancelChannel := make(chan struct{}, 1)
  3632  	go vm.probeHealthAgent(cancelChannel)
  3633  	go vm.serialManager()
  3634  	for command := range commandInput {
  3635  		var err error
  3636  		if command == "reboot" { // Not a QMP command: convert to ctrl-alt-del.
  3637  			_, err = monitorSock.Write([]byte(rebootJson))
  3638  		} else if command[0] == '\\' {
  3639  			_, err = fmt.Fprintln(monitorSock, command[1:])
  3640  		} else {
  3641  			_, err = fmt.Fprintf(monitorSock, "{\"execute\":\"%s\"}\n",
  3642  				command)
  3643  		}
  3644  		if err != nil {
  3645  			vm.logger.Println(err)
  3646  		} else if command[0] == '\\' {
  3647  			vm.logger.Debugf(0, "sent JSON: %s", command[1:])
  3648  		} else {
  3649  			vm.logger.Debugf(0, "sent %s command", command)
  3650  		}
  3651  	}
  3652  	cancelChannel <- struct{}{}
  3653  }
  3654  
  3655  func (vm *vmInfoType) probeHealthAgent(cancel <-chan struct{}) {
  3656  	stopTime := time.Now().Add(time.Minute * 5)
  3657  	for time.Until(stopTime) > 0 {
  3658  		select {
  3659  		case <-cancel:
  3660  			return
  3661  		default:
  3662  		}
  3663  		sleepUntil := time.Now().Add(time.Second)
  3664  		if vm.ipAddress == "0.0.0.0" {
  3665  			time.Sleep(time.Until(sleepUntil))
  3666  			continue
  3667  		}
  3668  		conn, err := net.DialTimeout("tcp", vm.ipAddress+":6910", time.Second*5)
  3669  		if err == nil {
  3670  			conn.Close()
  3671  			vm.mutex.Lock()
  3672  			vm.hasHealthAgent = true
  3673  			vm.mutex.Unlock()
  3674  			return
  3675  		}
  3676  		time.Sleep(time.Until(sleepUntil))
  3677  	}
  3678  }
  3679  
  3680  func (vm *vmInfoType) rootLabel(debug bool) string {
  3681  	ipAddr := vm.Address.IpAddress
  3682  	var prefix string
  3683  	if debug {
  3684  		prefix = "debugfs" // 16 characters: the limit.
  3685  	} else {
  3686  		prefix = "rootfs"
  3687  	}
  3688  	return fmt.Sprintf("%s@%02x%02x%02x%02x",
  3689  		prefix, ipAddr[0], ipAddr[1], ipAddr[2], ipAddr[3])
  3690  }
  3691  
  3692  func (vm *vmInfoType) serialManager() {
  3693  	bootlogFile, err := os.OpenFile(filepath.Join(vm.dirname, bootlogFilename),
  3694  		os.O_CREATE|os.O_WRONLY|os.O_APPEND, fsutil.PublicFilePerms)
  3695  	if err != nil {
  3696  		vm.logger.Printf("error opening bootlog file: %s\n", err)
  3697  		return
  3698  	}
  3699  	defer bootlogFile.Close()
  3700  	serialSock, err := net.Dial("unix",
  3701  		filepath.Join(vm.dirname, serialSockFilename))
  3702  	if err != nil {
  3703  		vm.logger.Printf("error connecting to console: %s\n", err)
  3704  		return
  3705  	}
  3706  	defer serialSock.Close()
  3707  	vm.mutex.Lock()
  3708  	vm.serialInput = serialSock
  3709  	vm.mutex.Unlock()
  3710  	buffer := make([]byte, 256)
  3711  	for {
  3712  		if nRead, err := serialSock.Read(buffer); err != nil {
  3713  			if err != io.EOF {
  3714  				vm.logger.Printf("error reading from serial port: %s\n", err)
  3715  			} else {
  3716  				vm.logger.Debugln(0, "serial port closed")
  3717  			}
  3718  			break
  3719  		} else if nRead > 0 {
  3720  			vm.mutex.RLock()
  3721  			if vm.serialOutput != nil {
  3722  				for _, char := range buffer[:nRead] {
  3723  					vm.serialOutput <- char
  3724  				}
  3725  				vm.mutex.RUnlock()
  3726  			} else {
  3727  				vm.mutex.RUnlock()
  3728  				bootlogFile.Write(buffer[:nRead])
  3729  			}
  3730  		}
  3731  	}
  3732  	vm.mutex.Lock()
  3733  	vm.serialInput = nil
  3734  	if vm.serialOutput != nil {
  3735  		close(vm.serialOutput)
  3736  		vm.serialOutput = nil
  3737  	}
  3738  	vm.mutex.Unlock()
  3739  }
  3740  
  3741  func (vm *vmInfoType) setState(state proto.State) {
  3742  	if state != vm.State {
  3743  		vm.ChangedStateOn = time.Now()
  3744  		vm.State = state
  3745  	}
  3746  	if !vm.doNotWriteOrSend {
  3747  		vm.writeAndSendInfo()
  3748  	}
  3749  }
  3750  
  3751  // This may grab and release the VM lock.
  3752  // If dhcpTimeout <0: no DHCP lease is set up, if 0, do not wait for DHCP ACK,
  3753  // else wait for DHCP ACK.
  3754  // It returns true if there was a timeout waiting for the DHCP request, else
  3755  // false.
  3756  func (vm *vmInfoType) startManaging(dhcpTimeout time.Duration,
  3757  	enableNetboot, haveManagerLock bool) (bool, error) {
  3758  	vm.monitorSockname = filepath.Join(vm.dirname, "monitor.sock")
  3759  	vm.logger.Debugln(1, "startManaging() starting")
  3760  	switch vm.State {
  3761  	case proto.StateStarting:
  3762  	case proto.StateRunning:
  3763  	case proto.StateFailedToStart:
  3764  	case proto.StateStopping:
  3765  		monitorSock, err := net.Dial("unix", vm.monitorSockname)
  3766  		if err == nil {
  3767  			commandInput := make(chan string, 2)
  3768  			commandOutput := make(chan byte, 16<<10)
  3769  			vm.commandInput = commandInput
  3770  			vm.commandOutput = commandOutput
  3771  			go vm.monitor(monitorSock, commandInput, commandOutput)
  3772  			commandInput <- "qmp_capabilities"
  3773  			commandInput <- "quit"
  3774  		} else {
  3775  			vm.setState(proto.StateStopped)
  3776  			vm.logger.Println(err)
  3777  		}
  3778  		return false, nil
  3779  	case proto.StateStopped:
  3780  		return false, nil
  3781  	case proto.StateDestroying:
  3782  		vm.mutex.Lock()
  3783  		vm.delete()
  3784  		vm.mutex.Unlock()
  3785  		return false, nil
  3786  	case proto.StateMigrating:
  3787  		return false, nil
  3788  	case proto.StateCrashed:
  3789  	case proto.StateDebugging:
  3790  	default:
  3791  		vm.logger.Println("unknown state: " + vm.State.String())
  3792  		return false, nil
  3793  	}
  3794  	if err := vm.checkVolumes(true); err != nil {
  3795  		vm.setState(proto.StateFailedToStart)
  3796  		return false, err
  3797  	}
  3798  	if dhcpTimeout >= 0 {
  3799  		err := vm.manager.DhcpServer.AddLease(vm.Address, vm.Hostname)
  3800  		if err != nil {
  3801  			return false, err
  3802  		}
  3803  		for _, address := range vm.SecondaryAddresses {
  3804  			err := vm.manager.DhcpServer.AddLease(address, vm.Hostname)
  3805  			if err != nil {
  3806  				vm.logger.Println(err)
  3807  			}
  3808  		}
  3809  	}
  3810  	monitorSock, err := net.Dial("unix", vm.monitorSockname)
  3811  	if err != nil {
  3812  		vm.logger.Debugf(1, "error connecting to: %s: %s\n",
  3813  			vm.monitorSockname, err)
  3814  		if err := vm.startVm(enableNetboot, haveManagerLock); err != nil {
  3815  			vm.logger.Println(err)
  3816  			vm.setState(proto.StateFailedToStart)
  3817  			return false, err
  3818  		}
  3819  		monitorSock, err = net.Dial("unix", vm.monitorSockname)
  3820  	}
  3821  	if err != nil {
  3822  		vm.logger.Println(err)
  3823  		vm.setState(proto.StateFailedToStart)
  3824  		return false, err
  3825  	}
  3826  	commandInput := make(chan string, 1)
  3827  	vm.commandInput = commandInput
  3828  	commandOutput := make(chan byte, 16<<10)
  3829  	vm.commandOutput = commandOutput
  3830  	go vm.monitor(monitorSock, commandInput, commandOutput)
  3831  	commandInput <- "qmp_capabilities"
  3832  	if vm.getDebugRoot() == "" {
  3833  		vm.setState(proto.StateRunning)
  3834  	} else {
  3835  		vm.setState(proto.StateDebugging)
  3836  	}
  3837  	if len(vm.Address.IpAddress) < 1 {
  3838  		// Must wait to see what IP address is given by external DHCP server.
  3839  		reqCh := vm.manager.DhcpServer.MakeRequestChannel(vm.Address.MacAddress)
  3840  		if dhcpTimeout < time.Minute {
  3841  			dhcpTimeout = time.Minute
  3842  		}
  3843  		timer := time.NewTimer(dhcpTimeout)
  3844  		select {
  3845  		case ipAddr := <-reqCh:
  3846  			timer.Stop()
  3847  			return false, vm.changeIpAddress(ipAddr.String())
  3848  		case <-timer.C:
  3849  			return true, errors.New("timed out on external lease")
  3850  		}
  3851  	}
  3852  	if dhcpTimeout > 0 {
  3853  		ackChan := vm.manager.DhcpServer.MakeAcknowledgmentChannel(
  3854  			vm.Address.IpAddress)
  3855  		timer := time.NewTimer(dhcpTimeout)
  3856  		select {
  3857  		case <-ackChan:
  3858  			timer.Stop()
  3859  		case <-timer.C:
  3860  			return true, nil
  3861  		}
  3862  	}
  3863  	return false, nil
  3864  }
  3865  
  3866  func (vm *vmInfoType) allowMutationsAndUnlock() {
  3867  	if !vm.blockMutations {
  3868  		panic(vm.Address.IpAddress.String() +
  3869  			": blockMutations flag already unset")
  3870  	}
  3871  	vm.blockMutations = false
  3872  	vm.mutex.Unlock()
  3873  }
  3874  
  3875  func (vm *vmInfoType) getBridgesAndOptions(haveManagerLock bool) (
  3876  	[]string, []string, error) {
  3877  	if !haveManagerLock {
  3878  		vm.manager.mutex.RLock()
  3879  		defer vm.manager.mutex.RUnlock()
  3880  	}
  3881  	addresses := make([]proto.Address, 1, len(vm.SecondarySubnetIDs)+1)
  3882  	addresses[0] = vm.Address
  3883  	subnetIDs := make([]string, 1, len(vm.SecondarySubnetIDs)+1)
  3884  	subnetIDs[0] = vm.SubnetId
  3885  	for index, subnetId := range vm.SecondarySubnetIDs {
  3886  		addresses = append(addresses, vm.SecondaryAddresses[index])
  3887  		subnetIDs = append(subnetIDs, subnetId)
  3888  	}
  3889  	var bridges, options []string
  3890  	deviceDriver := "virtio-net-pci"
  3891  	if vm.DisableVirtIO {
  3892  		deviceDriver = "e1000"
  3893  	}
  3894  	for index, address := range addresses {
  3895  		subnet, ok := vm.manager.subnets[subnetIDs[index]]
  3896  		if !ok {
  3897  			return nil, nil,
  3898  				fmt.Errorf("subnet: %s not found", subnetIDs[index])
  3899  		}
  3900  		bridge, vlanOption, err := vm.manager.getBridgeForSubnet(subnet)
  3901  		if err != nil {
  3902  			return nil, nil, err
  3903  		}
  3904  		bridges = append(bridges, bridge)
  3905  		options = append(options,
  3906  			"-netdev", fmt.Sprintf("tap,id=net%d,fd=%d%s",
  3907  				index, index+3, vlanOption),
  3908  			"-device", fmt.Sprintf("%s,netdev=net%d,mac=%s",
  3909  				deviceDriver, index, address.MacAddress))
  3910  	}
  3911  	return bridges, options, nil
  3912  }
  3913  
  3914  func (vm *vmInfoType) setupLockWatcher() {
  3915  	vm.lockWatcher = lockwatcher.New(&vm.mutex,
  3916  		lockwatcher.LockWatcherOptions{
  3917  			CheckInterval: vm.manager.LockCheckInterval,
  3918  			Logger:        vm.logger,
  3919  			LogTimeout:    vm.manager.LockLogTimeout,
  3920  		})
  3921  }
  3922  
  3923  func (vm *vmInfoType) startVm(enableNetboot, haveManagerLock bool) error {
  3924  	if err := checkAvailableMemory(vm.MemoryInMiB); err != nil {
  3925  		return err
  3926  	}
  3927  	nCpus := numSpecifiedVirtualCPUs(vm.MilliCPUs, vm.VirtualCPUs)
  3928  	if nCpus > uint(runtime.NumCPU()) && runtime.NumCPU() > 0 {
  3929  		nCpus = uint(runtime.NumCPU())
  3930  	}
  3931  	bridges, netOptions, err := vm.getBridgesAndOptions(haveManagerLock)
  3932  	if err != nil {
  3933  		return err
  3934  	}
  3935  	var tapFiles []*os.File
  3936  	for _, bridge := range bridges {
  3937  		tapFile, err := createTapDevice(bridge)
  3938  		if err != nil {
  3939  			return fmt.Errorf("error creating tap device: %s", err)
  3940  		}
  3941  		defer tapFile.Close()
  3942  		tapFiles = append(tapFiles, tapFile)
  3943  	}
  3944  	cmd := exec.Command("qemu-system-x86_64", "-machine", "pc,accel=kvm",
  3945  		"-cpu", "host", // Allow the VM to take full advantage of host CPU.
  3946  		"-nodefaults",
  3947  		"-name", vm.ipAddress,
  3948  		"-m", fmt.Sprintf("%dM", vm.MemoryInMiB),
  3949  		"-smp", fmt.Sprintf("cpus=%d", nCpus),
  3950  		"-serial",
  3951  		"unix:"+filepath.Join(vm.dirname, serialSockFilename)+",server,nowait",
  3952  		"-chroot", "/tmp",
  3953  		"-runas", vm.manager.Username,
  3954  		"-qmp", "unix:"+vm.monitorSockname+",server,nowait",
  3955  		"-daemonize")
  3956  	var interfaceDriver string
  3957  	if !vm.DisableVirtIO {
  3958  		interfaceDriver = ",if=virtio"
  3959  	}
  3960  	if debugRoot := vm.getDebugRoot(); debugRoot != "" {
  3961  		options := interfaceDriver + ",discard=off"
  3962  		cmd.Args = append(cmd.Args,
  3963  			"-drive", "file="+debugRoot+",format=raw"+options)
  3964  	} else if kernelPath := vm.getActiveKernelPath(); kernelPath != "" {
  3965  		kernelOptions := []string{"net.ifnames=0"}
  3966  		if vm.ExtraKernelOptions != "" {
  3967  			kernelOptions = append(kernelOptions, vm.ExtraKernelOptions)
  3968  		}
  3969  		kernelOptionsString := strings.Join(kernelOptions, " ")
  3970  		cmd.Args = append(cmd.Args, "-kernel", kernelPath)
  3971  		if initrdPath := vm.getActiveInitrdPath(); initrdPath != "" {
  3972  			cmd.Args = append(cmd.Args,
  3973  				"-initrd", initrdPath,
  3974  				"-append", util.MakeKernelOptions("LABEL="+vm.rootLabel(false),
  3975  					kernelOptionsString),
  3976  			)
  3977  		} else {
  3978  			cmd.Args = append(cmd.Args,
  3979  				"-append", util.MakeKernelOptions("/dev/vda1",
  3980  					kernelOptionsString),
  3981  			)
  3982  		}
  3983  	} else if enableNetboot {
  3984  		cmd.Args = append(cmd.Args, "-boot", "order=n")
  3985  	}
  3986  	cmd.Args = append(cmd.Args, netOptions...)
  3987  	if vm.manager.ShowVgaConsole {
  3988  		cmd.Args = append(cmd.Args, "-vga", "std")
  3989  	} else {
  3990  		switch vm.ConsoleType {
  3991  		case proto.ConsoleNone:
  3992  			cmd.Args = append(cmd.Args, "-nographic")
  3993  		case proto.ConsoleDummy:
  3994  			cmd.Args = append(cmd.Args, "-display", "none", "-vga", "std")
  3995  		case proto.ConsoleVNC:
  3996  			cmd.Args = append(cmd.Args,
  3997  				"-display", "vnc=unix:"+filepath.Join(vm.dirname, "vnc"),
  3998  				"-vga", "std",
  3999  				"-usb", "-device", "usb-tablet",
  4000  			)
  4001  		}
  4002  	}
  4003  	for index, volume := range vm.VolumeLocations {
  4004  		var volumeFormat proto.VolumeFormat
  4005  		if index < len(vm.Volumes) {
  4006  			volumeFormat = vm.Volumes[index].Format
  4007  		}
  4008  		options := interfaceDriver + ",discard=off"
  4009  		cmd.Args = append(cmd.Args,
  4010  			"-drive", "file="+volume.Filename+",format="+volumeFormat.String()+
  4011  				options)
  4012  	}
  4013  	if cid, err := vm.manager.GetVmCID(vm.Address.IpAddress); err != nil {
  4014  		return err
  4015  	} else if cid > 2 {
  4016  		cmd.Args = append(cmd.Args,
  4017  			"-device",
  4018  			fmt.Sprintf("vhost-vsock-pci,id=vhost-vsock-pci0,guest-cid=%d",
  4019  				cid))
  4020  	}
  4021  	os.Remove(filepath.Join(vm.dirname, "bootlog"))
  4022  	cmd.ExtraFiles = tapFiles // Start at fd=3 for QEMU.
  4023  	if output, err := cmd.CombinedOutput(); err != nil {
  4024  		return fmt.Errorf("error starting QEMU: %s: %s", err, output)
  4025  	} else if len(output) > 0 {
  4026  		vm.logger.Printf("QEMU started. Output: \"%s\"\n", string(output))
  4027  	} else {
  4028  		vm.logger.Println("QEMU started.")
  4029  	}
  4030  	return nil
  4031  }
  4032  
  4033  func (vm *vmInfoType) writeAndSendInfo() {
  4034  	if err := vm.writeInfo(); err != nil {
  4035  		vm.logger.Println(err)
  4036  		return
  4037  	}
  4038  	vm.manager.sendVmInfo(vm.ipAddress, &vm.VmInfo)
  4039  }
  4040  
  4041  func (vm *vmInfoType) writeInfo() error {
  4042  	filename := filepath.Join(vm.dirname, "info.json")
  4043  	return json.WriteToFile(filename, fsutil.PublicFilePerms, "    ", vm)
  4044  }