github.com/lazyboychen7/engine@v17.12.1-ce-rc2+incompatible/daemon/graphdriver/lcow/lcow_svm.go (about) 1 // +build windows 2 3 package lcow 4 5 import ( 6 "errors" 7 "fmt" 8 "io" 9 "strings" 10 "sync" 11 "time" 12 13 "github.com/Microsoft/hcsshim" 14 "github.com/Microsoft/opengcs/client" 15 "github.com/sirupsen/logrus" 16 ) 17 18 // Code for all the service VM management for the LCOW graphdriver 19 20 var errVMisTerminating = errors.New("service VM is shutting down") 21 var errVMUnknown = errors.New("service vm id is unknown") 22 var errVMStillHasReference = errors.New("Attemping to delete a VM that is still being used") 23 24 // serviceVMMap is the struct representing the id -> service VM mapping. 25 type serviceVMMap struct { 26 sync.Mutex 27 svms map[string]*serviceVMMapItem 28 } 29 30 // serviceVMMapItem is our internal structure representing an item in our 31 // map of service VMs we are maintaining. 32 type serviceVMMapItem struct { 33 svm *serviceVM // actual service vm object 34 refCount int // refcount for VM 35 } 36 37 type serviceVM struct { 38 sync.Mutex // Serialises operations being performed in this service VM. 39 scratchAttached bool // Has a scratch been attached? 40 config *client.Config // Represents the service VM item. 41 42 // Indicates that the vm is started 43 startStatus chan interface{} 44 startError error 45 46 // Indicates that the vm is stopped 47 stopStatus chan interface{} 48 stopError error 49 50 attachedVHDs map[string]int // Map ref counting all the VHDS we've hot-added/hot-removed. 51 unionMounts map[string]int // Map ref counting all the union filesystems we mounted. 52 } 53 54 // add will add an id to the service vm map. There are three cases: 55 // - entry doesn't exist: 56 // - add id to map and return a new vm that the caller can manually configure+start 57 // - entry does exist 58 // - return vm in map and increment ref count 59 // - entry does exist but the ref count is 0 60 // - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop 61 func (svmMap *serviceVMMap) add(id string) (svm *serviceVM, alreadyExists bool, err error) { 62 svmMap.Lock() 63 defer svmMap.Unlock() 64 if svm, ok := svmMap.svms[id]; ok { 65 if svm.refCount == 0 { 66 return svm.svm, true, errVMisTerminating 67 } 68 svm.refCount++ 69 return svm.svm, true, nil 70 } 71 72 // Doesn't exist, so create an empty svm to put into map and return 73 newSVM := &serviceVM{ 74 startStatus: make(chan interface{}), 75 stopStatus: make(chan interface{}), 76 attachedVHDs: make(map[string]int), 77 unionMounts: make(map[string]int), 78 config: &client.Config{}, 79 } 80 svmMap.svms[id] = &serviceVMMapItem{ 81 svm: newSVM, 82 refCount: 1, 83 } 84 return newSVM, false, nil 85 } 86 87 // get will get the service vm from the map. There are three cases: 88 // - entry doesn't exist: 89 // - return errVMUnknown 90 // - entry does exist 91 // - return vm with no error 92 // - entry does exist but the ref count is 0 93 // - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop 94 func (svmMap *serviceVMMap) get(id string) (*serviceVM, error) { 95 svmMap.Lock() 96 defer svmMap.Unlock() 97 svm, ok := svmMap.svms[id] 98 if !ok { 99 return nil, errVMUnknown 100 } 101 if svm.refCount == 0 { 102 return svm.svm, errVMisTerminating 103 } 104 return svm.svm, nil 105 } 106 107 // decrementRefCount decrements the ref count of the given ID from the map. There are four cases: 108 // - entry doesn't exist: 109 // - return errVMUnknown 110 // - entry does exist but the ref count is 0 111 // - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop 112 // - entry does exist but ref count is 1 113 // - return vm and set lastRef to true. The caller can then stop the vm, delete the id from this map 114 // - and execute svm.signalStopFinished to signal the threads that the svm has been terminated. 115 // - entry does exist and ref count > 1 116 // - just reduce ref count and return svm 117 func (svmMap *serviceVMMap) decrementRefCount(id string) (_ *serviceVM, lastRef bool, _ error) { 118 svmMap.Lock() 119 defer svmMap.Unlock() 120 121 svm, ok := svmMap.svms[id] 122 if !ok { 123 return nil, false, errVMUnknown 124 } 125 if svm.refCount == 0 { 126 return svm.svm, false, errVMisTerminating 127 } 128 svm.refCount-- 129 return svm.svm, svm.refCount == 0, nil 130 } 131 132 // setRefCountZero works the same way as decrementRefCount, but sets ref count to 0 instead of decrementing it. 133 func (svmMap *serviceVMMap) setRefCountZero(id string) (*serviceVM, error) { 134 svmMap.Lock() 135 defer svmMap.Unlock() 136 137 svm, ok := svmMap.svms[id] 138 if !ok { 139 return nil, errVMUnknown 140 } 141 if svm.refCount == 0 { 142 return svm.svm, errVMisTerminating 143 } 144 svm.refCount = 0 145 return svm.svm, nil 146 } 147 148 // deleteID deletes the given ID from the map. If the refcount is not 0 or the 149 // VM does not exist, then this function returns an error. 150 func (svmMap *serviceVMMap) deleteID(id string) error { 151 svmMap.Lock() 152 defer svmMap.Unlock() 153 svm, ok := svmMap.svms[id] 154 if !ok { 155 return errVMUnknown 156 } 157 if svm.refCount != 0 { 158 return errVMStillHasReference 159 } 160 delete(svmMap.svms, id) 161 return nil 162 } 163 164 func (svm *serviceVM) signalStartFinished(err error) { 165 svm.Lock() 166 svm.startError = err 167 svm.Unlock() 168 close(svm.startStatus) 169 } 170 171 func (svm *serviceVM) getStartError() error { 172 <-svm.startStatus 173 svm.Lock() 174 defer svm.Unlock() 175 return svm.startError 176 } 177 178 func (svm *serviceVM) signalStopFinished(err error) { 179 svm.Lock() 180 svm.stopError = err 181 svm.Unlock() 182 close(svm.stopStatus) 183 } 184 185 func (svm *serviceVM) getStopError() error { 186 <-svm.stopStatus 187 svm.Lock() 188 defer svm.Unlock() 189 return svm.stopError 190 } 191 192 // hotAddVHDs waits for the service vm to start and then attaches the vhds. 193 func (svm *serviceVM) hotAddVHDs(mvds ...hcsshim.MappedVirtualDisk) error { 194 if err := svm.getStartError(); err != nil { 195 return err 196 } 197 return svm.hotAddVHDsAtStart(mvds...) 198 } 199 200 // hotAddVHDsAtStart works the same way as hotAddVHDs but does not wait for the VM to start. 201 func (svm *serviceVM) hotAddVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error { 202 svm.Lock() 203 defer svm.Unlock() 204 for i, mvd := range mvds { 205 if _, ok := svm.attachedVHDs[mvd.HostPath]; ok { 206 svm.attachedVHDs[mvd.HostPath]++ 207 continue 208 } 209 210 if err := svm.config.HotAddVhd(mvd.HostPath, mvd.ContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil { 211 svm.hotRemoveVHDsAtStart(mvds[:i]...) 212 return err 213 } 214 svm.attachedVHDs[mvd.HostPath] = 1 215 } 216 return nil 217 } 218 219 // hotRemoveVHDs waits for the service vm to start and then removes the vhds. 220 func (svm *serviceVM) hotRemoveVHDs(mvds ...hcsshim.MappedVirtualDisk) error { 221 if err := svm.getStartError(); err != nil { 222 return err 223 } 224 return svm.hotRemoveVHDsAtStart(mvds...) 225 } 226 227 // hotRemoveVHDsAtStart works the same way as hotRemoveVHDs but does not wait for the VM to start. 228 func (svm *serviceVM) hotRemoveVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error { 229 svm.Lock() 230 defer svm.Unlock() 231 var retErr error 232 for _, mvd := range mvds { 233 if _, ok := svm.attachedVHDs[mvd.HostPath]; !ok { 234 // We continue instead of returning an error if we try to hot remove a non-existent VHD. 235 // This is because one of the callers of the function is graphdriver.Put(). Since graphdriver.Get() 236 // defers the VM start to the first operation, it's possible that nothing have been hot-added 237 // when Put() is called. To avoid Put returning an error in that case, we simply continue if we 238 // don't find the vhd attached. 239 continue 240 } 241 242 if svm.attachedVHDs[mvd.HostPath] > 1 { 243 svm.attachedVHDs[mvd.HostPath]-- 244 continue 245 } 246 247 // last VHD, so remove from VM and map 248 if err := svm.config.HotRemoveVhd(mvd.HostPath); err == nil { 249 delete(svm.attachedVHDs, mvd.HostPath) 250 } else { 251 // Take note of the error, but still continue to remove the other VHDs 252 logrus.Warnf("Failed to hot remove %s: %s", mvd.HostPath, err) 253 if retErr == nil { 254 retErr = err 255 } 256 } 257 } 258 return retErr 259 } 260 261 func (svm *serviceVM) createExt4VHDX(destFile string, sizeGB uint32, cacheFile string) error { 262 if err := svm.getStartError(); err != nil { 263 return err 264 } 265 266 svm.Lock() 267 defer svm.Unlock() 268 return svm.config.CreateExt4Vhdx(destFile, sizeGB, cacheFile) 269 } 270 271 func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedVirtualDisk) (err error) { 272 if len(mvds) == 0 { 273 return fmt.Errorf("createUnionMount: error must have at least 1 layer") 274 } 275 276 if err = svm.getStartError(); err != nil { 277 return err 278 } 279 280 svm.Lock() 281 defer svm.Unlock() 282 if _, ok := svm.unionMounts[mountName]; ok { 283 svm.unionMounts[mountName]++ 284 return nil 285 } 286 287 var lowerLayers []string 288 if mvds[0].ReadOnly { 289 lowerLayers = append(lowerLayers, mvds[0].ContainerPath) 290 } 291 292 for i := 1; i < len(mvds); i++ { 293 lowerLayers = append(lowerLayers, mvds[i].ContainerPath) 294 } 295 296 logrus.Debugf("Doing the overlay mount with union directory=%s", mountName) 297 if err = svm.runProcess(fmt.Sprintf("mkdir -p %s", mountName), nil, nil, nil); err != nil { 298 return err 299 } 300 301 var cmd string 302 if mvds[0].ReadOnly { 303 // Readonly overlay 304 cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s %s", 305 strings.Join(lowerLayers, ","), 306 mountName) 307 } else { 308 upper := fmt.Sprintf("%s/upper", mvds[0].ContainerPath) 309 work := fmt.Sprintf("%s/work", mvds[0].ContainerPath) 310 311 if err = svm.runProcess(fmt.Sprintf("mkdir -p %s %s", upper, work), nil, nil, nil); err != nil { 312 return err 313 } 314 315 cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s,upperdir=%s,workdir=%s %s", 316 strings.Join(lowerLayers, ":"), 317 upper, 318 work, 319 mountName) 320 } 321 322 logrus.Debugf("createUnionMount: Executing mount=%s", cmd) 323 if err = svm.runProcess(cmd, nil, nil, nil); err != nil { 324 return err 325 } 326 327 svm.unionMounts[mountName] = 1 328 return nil 329 } 330 331 func (svm *serviceVM) deleteUnionMount(mountName string, disks ...hcsshim.MappedVirtualDisk) error { 332 if err := svm.getStartError(); err != nil { 333 return err 334 } 335 336 svm.Lock() 337 defer svm.Unlock() 338 if _, ok := svm.unionMounts[mountName]; !ok { 339 return nil 340 } 341 342 if svm.unionMounts[mountName] > 1 { 343 svm.unionMounts[mountName]-- 344 return nil 345 } 346 347 logrus.Debugf("Removing union mount %s", mountName) 348 if err := svm.runProcess(fmt.Sprintf("umount %s", mountName), nil, nil, nil); err != nil { 349 return err 350 } 351 352 delete(svm.unionMounts, mountName) 353 return nil 354 } 355 356 func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { 357 process, err := svm.config.RunProcess(command, stdin, stdout, stderr) 358 if err != nil { 359 return err 360 } 361 defer process.Close() 362 363 process.WaitTimeout(time.Duration(int(time.Second) * svm.config.UvmTimeoutSeconds)) 364 exitCode, err := process.ExitCode() 365 if err != nil { 366 return err 367 } 368 369 if exitCode != 0 { 370 return fmt.Errorf("svm.runProcess: command %s failed with exit code %d", command, exitCode) 371 } 372 return nil 373 }