github.com/kata-containers/runtime@v0.0.0-20210505125100-04f29832a923/virtcontainers/acrn.go (about) 1 // Copyright (c) 2019 Intel Corporation 2 // 3 // SPDX-License-Identifier: Apache-2.0 4 // 5 6 package virtcontainers 7 8 import ( 9 "context" 10 "encoding/json" 11 "fmt" 12 "os" 13 "os/exec" 14 "path/filepath" 15 "strings" 16 "syscall" 17 "time" 18 "unsafe" 19 20 opentracing "github.com/opentracing/opentracing-go" 21 "github.com/pkg/errors" 22 "github.com/sirupsen/logrus" 23 24 "github.com/kata-containers/runtime/virtcontainers/device/config" 25 persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" 26 vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" 27 "github.com/kata-containers/runtime/virtcontainers/pkg/uuid" 28 "github.com/kata-containers/runtime/virtcontainers/types" 29 "github.com/kata-containers/runtime/virtcontainers/utils" 30 ) 31 32 // Since ACRN is using the store in a quite abnormal way, let's first draw it back from store to here 33 34 // UUIDPathSuffix is the suffix used for uuid storage 35 const ( 36 UUIDPathSuffix = "uuid" 37 uuidFile = "uuid.json" 38 ) 39 40 // ACRN currently supports only known UUIDs for security 41 // reasons (FuSa). When launching VM, only these pre-defined 42 // UUID should be used else VM launch will fail. The main 43 // of purpose UUID is is not used for image identification 44 // but generating vSeed (virtual seed which takes UUID 45 // as one of the parameter) which is used during VM boot. 46 47 // acrnUUIDsToIdx lists Idx corresponding to the UUID 48 var acrnUUIDsToIdx = map[string]uint8{ 49 "a7ada506-1ab0-4b6b-a0da-e513ca9b8c2f": 0, 50 "dbeae168-26e4-4084-9227-622193e56325": 1, 51 "18ed60cd-e9ea-4bf4-8f87-8523fc8347a3": 2, 52 "3f90b6f8-449a-4e72-b99c-063a889fc422": 3, 53 "1ae8587b-e599-4b59-8260-6d14ac166a55": 4, 54 "75f3b94b-49ed-48fc-b019-577ef45adf2b": 5, 55 "ca62cf3c-8359-47e8-a3f7-de2d682dfb02": 6, 56 "e3189497-c3f6-4b97-9e2c-18ac0ab9064d": 7, 57 } 58 59 // acrnIdxToUUIDs lists UUIDs corresponding to the Idx 60 var acrnIdxToUUIDs = map[uint8]string{ 61 0: "a7ada506-1ab0-4b6b-a0da-e513ca9b8c2f", 62 1: "dbeae168-26e4-4084-9227-622193e56325", 63 2: "18ed60cd-e9ea-4bf4-8f87-8523fc8347a3", 64 3: "3f90b6f8-449a-4e72-b99c-063a889fc422", 65 4: "1ae8587b-e599-4b59-8260-6d14ac166a55", 66 5: "75f3b94b-49ed-48fc-b019-577ef45adf2b", 67 6: "ca62cf3c-8359-47e8-a3f7-de2d682dfb02", 68 7: "e3189497-c3f6-4b97-9e2c-18ac0ab9064d", 69 } 70 71 // AcrnInfo keeps track of UUID availability 72 type AcrnInfo struct { 73 UUIDAvailability [8]uint8 74 } 75 76 // AcrnState keeps track of VM UUID, PID. 77 type AcrnState struct { 78 UUID string 79 PID int 80 } 81 82 // Acrn is an Hypervisor interface implementation for the Linux acrn hypervisor. 83 type Acrn struct { 84 id string 85 config HypervisorConfig 86 acrnConfig Config 87 state AcrnState 88 info AcrnInfo 89 arch acrnArch 90 ctx context.Context 91 store persistapi.PersistDriver 92 } 93 94 type acrnPlatformInfo struct { 95 cpuNum uint16 //nolint 96 reserved0 [126]uint8 //nolint 97 maxVCPUsPerVM uint16 //nolint 98 maxKataContainers uint8 99 reserved1 [125]uint8 //nolint 100 } 101 102 const acrnDevice = "/dev/acrn_vhm" 103 104 // ioctl_ACRN_CREATE_VM is the IOCTL to create VM in ACRN. 105 // Current Linux mainstream kernel doesn't have support for ACRN. 106 // Due to this several macros are not defined in Linux headers. 107 // Until the support is available, directly use the value instead 108 // of macros. 109 //https://github.com/kata-containers/runtime/issues/1784 110 const ioctl_ACRN_GET_PLATFORM_INFO = 0x43000003 //nolint 111 112 const ( 113 acrnConsoleSocket = "console.sock" 114 acrnStopSandboxTimeoutSecs = 15 115 ) 116 117 //UUIDBusy marks a particular UUID as busy 118 const UUIDBusy = 1 119 120 //UUIDFree marks a particular UUID as free 121 const UUIDFree = 0 122 123 // agnostic list of kernel parameters 124 var acrnDefaultKernelParameters = []Param{ 125 {"panic", "1"}, 126 } 127 128 func (a *Acrn) kernelParameters() string { 129 // get a list of arch kernel parameters 130 params := a.arch.kernelParameters(a.config.Debug) 131 132 // use default parameters 133 params = append(params, acrnDefaultKernelParameters...) 134 135 // set the maximum number of vCPUs 136 params = append(params, Param{"maxcpus", fmt.Sprintf("%d", a.config.DefaultMaxVCPUs)}) 137 138 // add the params specified by the provided config. As the kernel 139 // honours the last parameter value set and since the config-provided 140 // params are added here, they will take priority over the defaults. 141 params = append(params, a.config.KernelParams...) 142 143 paramsStr := SerializeParams(params, "=") 144 145 return strings.Join(paramsStr, " ") 146 } 147 148 // Adds all capabilities supported by Acrn implementation of hypervisor interface 149 func (a *Acrn) capabilities() types.Capabilities { 150 span, _ := a.trace("capabilities") 151 defer span.Finish() 152 153 return a.arch.capabilities() 154 } 155 156 func (a *Acrn) hypervisorConfig() HypervisorConfig { 157 return a.config 158 } 159 160 // get the acrn binary path 161 func (a *Acrn) acrnPath() (string, error) { 162 p, err := a.config.HypervisorAssetPath() 163 if err != nil { 164 return "", err 165 } 166 167 if p == "" { 168 p, err = a.arch.acrnPath() 169 if err != nil { 170 return "", err 171 } 172 } 173 174 if _, err = os.Stat(p); os.IsNotExist(err) { 175 return "", fmt.Errorf("acrn path (%s) does not exist", p) 176 } 177 178 return p, nil 179 } 180 181 // get the ACRNCTL binary path 182 func (a *Acrn) acrnctlPath() (string, error) { 183 ctlpath, err := a.config.HypervisorCtlAssetPath() 184 if err != nil { 185 return "", err 186 } 187 188 if ctlpath == "" { 189 ctlpath, err = a.arch.acrnctlPath() 190 if err != nil { 191 return "", err 192 } 193 } 194 195 if _, err = os.Stat(ctlpath); os.IsNotExist(err) { 196 return "", fmt.Errorf("acrnctl path (%s) does not exist", ctlpath) 197 } 198 199 return ctlpath, nil 200 } 201 202 // Logger returns a logrus logger appropriate for logging acrn messages 203 func (a *Acrn) Logger() *logrus.Entry { 204 return virtLog.WithField("subsystem", "acrn") 205 } 206 207 func (a *Acrn) trace(name string) (opentracing.Span, context.Context) { 208 if a.ctx == nil { 209 a.Logger().WithField("type", "bug").Error("trace called before context set") 210 a.ctx = context.Background() 211 } 212 213 span, ctx := opentracing.StartSpanFromContext(a.ctx, name) 214 215 span.SetTag("subsystem", "hypervisor") 216 span.SetTag("type", "acrn") 217 218 return span, ctx 219 } 220 221 func (a *Acrn) memoryTopology() (Memory, error) { 222 memMb := uint64(a.config.MemorySize) 223 224 return a.arch.memoryTopology(memMb), nil 225 } 226 227 func (a *Acrn) appendImage(devices []Device, imagePath string) ([]Device, error) { 228 if imagePath == "" { 229 return nil, fmt.Errorf("Image path is empty: %s", imagePath) 230 } 231 232 // Get sandbox and increment the globalIndex. 233 // This is to make sure the VM rootfs occupies 234 // the first Index which is /dev/vda. 235 sandbox, err := globalSandboxList.lookupSandbox(a.id) 236 if sandbox == nil && err != nil { 237 return nil, err 238 } 239 if _, err = sandbox.GetAndSetSandboxBlockIndex(); err != nil { 240 return nil, err 241 } 242 243 devices, err = a.arch.appendImage(devices, imagePath) 244 if err != nil { 245 return nil, err 246 } 247 248 return devices, nil 249 } 250 251 func (a *Acrn) buildDevices(imagePath string) ([]Device, error) { 252 var devices []Device 253 254 if imagePath == "" { 255 return nil, fmt.Errorf("Image Path should not be empty: %s", imagePath) 256 } 257 258 console, err := a.getSandboxConsole(a.id) 259 if err != nil { 260 return nil, err 261 } 262 263 // Add bridges before any other devices. This way we make sure that 264 // bridge gets the first available PCI address. 265 devices = a.arch.appendBridges(devices) 266 267 //Add LPC device to the list of other devices. 268 devices = a.arch.appendLPC(devices) 269 270 devices = a.arch.appendConsole(devices, console) 271 272 devices, err = a.appendImage(devices, imagePath) 273 if err != nil { 274 return nil, err 275 } 276 277 // Create virtio blk devices with dummy backend as a place 278 // holder for container rootfs (as acrn doesn't support hot-plug). 279 // Once the container rootfs is known, replace the dummy backend 280 // with actual path (using block rescan feature in acrn) 281 devices, err = a.createDummyVirtioBlkDev(devices) 282 if err != nil { 283 return nil, err 284 } 285 286 return devices, nil 287 } 288 289 // setup sets the Acrn structure up. 290 func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig) error { 291 span, _ := a.trace("setup") 292 defer span.Finish() 293 294 err := hypervisorConfig.valid() 295 if err != nil { 296 return err 297 } 298 299 a.id = id 300 a.config = *hypervisorConfig 301 a.arch = newAcrnArch(a.config) 302 303 var create bool 304 var uuid string 305 306 if a.state.UUID == "" { 307 create = true 308 } 309 310 if create { 311 a.Logger().Debug("Setting UUID") 312 if uuid, err = a.GetNextAvailableUUID(); err != nil { 313 return err 314 } 315 a.state.UUID = uuid 316 Idx := acrnUUIDsToIdx[uuid] 317 a.info.UUIDAvailability[Idx] = UUIDBusy 318 319 // The path might already exist, but in case of VM templating, 320 // we have to create it since the sandbox has not created it yet. 321 if err = os.MkdirAll(filepath.Join(a.store.RunStoragePath(), id), DirMode); err != nil { 322 return err 323 } 324 325 if err = a.storeInfo(); err != nil { 326 return err 327 } 328 } 329 330 return nil 331 } 332 333 func (a *Acrn) createDummyVirtioBlkDev(devices []Device) ([]Device, error) { 334 span, _ := a.trace("createDummyVirtioBlkDev") 335 defer span.Finish() 336 337 // Since acrn doesn't support hot-plug, dummy virtio-blk 338 // devices are added and later replaced with container-rootfs. 339 // Starting from driveIndex 1, as 0 is allocated for VM rootfs. 340 for driveIndex := 1; driveIndex <= AcrnBlkDevPoolSz; driveIndex++ { 341 drive := config.BlockDrive{ 342 File: "nodisk", 343 Index: driveIndex, 344 } 345 346 devices = a.arch.appendBlockDevice(devices, drive) 347 } 348 349 return devices, nil 350 } 351 352 // createSandbox is the Hypervisor sandbox creation. 353 func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error { 354 // Save the tracing context 355 a.ctx = ctx 356 357 span, _ := a.trace("createSandbox") 358 defer span.Finish() 359 360 if err := a.setup(id, hypervisorConfig); err != nil { 361 return err 362 } 363 364 memory, err := a.memoryTopology() 365 if err != nil { 366 return err 367 } 368 369 kernelPath, err := a.config.KernelAssetPath() 370 if err != nil { 371 return err 372 } 373 374 imagePath, err := a.config.ImageAssetPath() 375 if err != nil { 376 return err 377 } 378 379 kernel := Kernel{ 380 Path: kernelPath, 381 ImagePath: imagePath, 382 Params: a.kernelParameters(), 383 } 384 385 if a.state.UUID == "" { 386 return fmt.Errorf("ACRN UUID should not be empty") 387 } 388 389 devices, err := a.buildDevices(imagePath) 390 if err != nil { 391 return err 392 } 393 394 acrnPath, err := a.acrnPath() 395 if err != nil { 396 return err 397 } 398 399 acrnctlPath, err := a.acrnctlPath() 400 if err != nil { 401 return err 402 } 403 404 acrnConfig := Config{ 405 UUID: a.state.UUID, 406 ACPIVirt: true, 407 Path: acrnPath, 408 CtlPath: acrnctlPath, 409 Memory: memory, 410 Devices: devices, 411 Kernel: kernel, 412 Name: fmt.Sprintf("sandbox-%s", a.id), 413 } 414 415 a.acrnConfig = acrnConfig 416 417 return nil 418 } 419 420 // startSandbox will start the Sandbox's VM. 421 func (a *Acrn) startSandbox(timeoutSecs int) error { 422 span, _ := a.trace("startSandbox") 423 defer span.Finish() 424 425 if a.config.Debug { 426 params := a.arch.kernelParameters(a.config.Debug) 427 strParams := SerializeParams(params, "=") 428 formatted := strings.Join(strParams, " ") 429 430 // The name of this field matches a similar one generated by 431 // the runtime and allows users to identify which parameters 432 // are set here, which come from the runtime and which are set 433 // by the user. 434 a.Logger().WithField("default-kernel-parameters", formatted).Debug() 435 } 436 437 vmPath := filepath.Join(a.store.RunVMStoragePath(), a.id) 438 err := os.MkdirAll(vmPath, DirMode) 439 if err != nil { 440 return err 441 } 442 defer func() { 443 if err != nil { 444 if err := os.RemoveAll(vmPath); err != nil { 445 a.Logger().WithError(err).Error("Failed to clean up vm directory") 446 } 447 } 448 }() 449 450 var strErr string 451 var PID int 452 PID, strErr, err = LaunchAcrn(a.acrnConfig, virtLog.WithField("subsystem", "acrn-dm")) 453 if err != nil { 454 return fmt.Errorf("%s", strErr) 455 } 456 a.state.PID = PID 457 458 if err = a.waitSandbox(timeoutSecs); err != nil { 459 a.Logger().WithField("acrn wait failed:", err).Debug() 460 return err 461 } 462 463 return nil 464 } 465 466 // waitSandbox will wait for the Sandbox's VM to be up and running. 467 func (a *Acrn) waitSandbox(timeoutSecs int) error { 468 span, _ := a.trace("waitSandbox") 469 defer span.Finish() 470 471 if timeoutSecs < 0 { 472 return fmt.Errorf("Invalid timeout %ds", timeoutSecs) 473 } 474 475 time.Sleep(time.Duration(timeoutSecs) * time.Second) 476 477 return nil 478 } 479 480 // stopSandbox will stop the Sandbox's VM. 481 func (a *Acrn) stopSandbox() (err error) { 482 span, _ := a.trace("stopSandbox") 483 defer span.Finish() 484 485 a.Logger().Info("Stopping acrn VM") 486 487 defer func() { 488 if err != nil { 489 a.Logger().Info("stopSandbox failed") 490 } else { 491 a.Logger().Info("acrn VM stopped") 492 } 493 }() 494 495 // Mark the UUID as free 496 uuid := a.state.UUID 497 Idx := acrnUUIDsToIdx[uuid] 498 499 if err = a.loadInfo(); err != nil { 500 a.Logger().Info("Failed to load UUID availabiity info") 501 return err 502 } 503 504 a.info.UUIDAvailability[Idx] = UUIDFree 505 506 if err = a.storeInfo(); err != nil { 507 a.Logger().Info("Failed to store UUID availabiity info") 508 return err 509 } 510 511 pid := a.state.PID 512 513 // Send signal to the VM process to try to stop it properly 514 if err = syscall.Kill(pid, syscall.SIGINT); err != nil { 515 if err == syscall.ESRCH { 516 return nil 517 } 518 a.Logger().Info("Sending signal to stop acrn VM failed") 519 return err 520 } 521 522 // Wait for the VM process to terminate 523 tInit := time.Now() 524 for { 525 if err = syscall.Kill(pid, syscall.Signal(0)); err != nil { 526 a.Logger().Info("acrn VM stopped after sending signal") 527 return nil 528 } 529 530 if time.Since(tInit).Seconds() >= acrnStopSandboxTimeoutSecs { 531 a.Logger().Warnf("VM still running after waiting %ds", acrnStopSandboxTimeoutSecs) 532 break 533 } 534 535 // Let's avoid to run a too busy loop 536 time.Sleep(time.Duration(50) * time.Millisecond) 537 } 538 539 // Let's try with a hammer now, a SIGKILL should get rid of the 540 // VM process. 541 return syscall.Kill(pid, syscall.SIGKILL) 542 543 } 544 545 func (a *Acrn) updateBlockDevice(drive *config.BlockDrive) error { 546 var err error 547 if drive.File == "" || drive.Index >= AcrnBlkDevPoolSz { 548 return fmt.Errorf("Empty filepath or invalid drive index, Dive ID:%s, Drive Index:%d", 549 drive.ID, drive.Index) 550 } 551 552 slot := AcrnBlkdDevSlot[drive.Index] 553 554 //Explicitly set PCIPath to NULL, so that VirtPath can be used 555 drive.PCIPath = vcTypes.PciPath{} 556 557 args := []string{"blkrescan", a.acrnConfig.Name, fmt.Sprintf("%d,%s", slot, drive.File)} 558 559 a.Logger().WithFields(logrus.Fields{ 560 "drive": drive, 561 "path": a.config.HypervisorCtlPath, 562 }).Info("updateBlockDevice with acrnctl path") 563 cmd := exec.Command(a.config.HypervisorCtlPath, args...) 564 if err := cmd.Run(); err != nil { 565 a.Logger().WithError(err).Error("updating Block device with newFile path") 566 } 567 568 return err 569 } 570 571 func (a *Acrn) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) { 572 span, _ := a.trace("hotplugAddDevice") 573 defer span.Finish() 574 575 switch devType { 576 case blockDev: 577 //The drive placeholder has to exist prior to Update 578 return nil, a.updateBlockDevice(devInfo.(*config.BlockDrive)) 579 default: 580 return nil, fmt.Errorf("hotplugAddDevice: unsupported device: devInfo:%v, deviceType%v", 581 devInfo, devType) 582 } 583 } 584 585 func (a *Acrn) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) { 586 span, _ := a.trace("hotplugRemoveDevice") 587 defer span.Finish() 588 589 // Not supported. return success 590 591 return nil, nil 592 } 593 594 func (a *Acrn) pauseSandbox() error { 595 span, _ := a.trace("pauseSandbox") 596 defer span.Finish() 597 598 // Not supported. return success 599 600 return nil 601 } 602 603 func (a *Acrn) resumeSandbox() error { 604 span, _ := a.trace("resumeSandbox") 605 defer span.Finish() 606 607 // Not supported. return success 608 609 return nil 610 } 611 612 // addDevice will add extra devices to acrn command line. 613 func (a *Acrn) addDevice(devInfo interface{}, devType deviceType) error { 614 var err error 615 span, _ := a.trace("addDevice") 616 defer span.Finish() 617 618 switch v := devInfo.(type) { 619 case types.Volume: 620 // Not supported. return success 621 err = nil 622 case types.Socket: 623 a.acrnConfig.Devices = a.arch.appendSocket(a.acrnConfig.Devices, v) 624 case types.VSock: 625 // Not supported. return success 626 err = nil 627 case Endpoint: 628 a.acrnConfig.Devices = a.arch.appendNetwork(a.acrnConfig.Devices, v) 629 case config.BlockDrive: 630 a.acrnConfig.Devices = a.arch.appendBlockDevice(a.acrnConfig.Devices, v) 631 case config.VhostUserDeviceAttrs: 632 // Not supported. return success 633 err = nil 634 case config.VFIODev: 635 // Not supported. return success 636 err = nil 637 default: 638 err = nil 639 a.Logger().WithField("unknown-device-type", devInfo).Error("Adding device") 640 } 641 642 return err 643 } 644 645 // getSandboxConsole builds the path of the console where we can read 646 // logs coming from the sandbox. 647 func (a *Acrn) getSandboxConsole(id string) (string, error) { 648 span, _ := a.trace("getSandboxConsole") 649 defer span.Finish() 650 651 return utils.BuildSocketPath(a.store.RunVMStoragePath(), id, acrnConsoleSocket) 652 } 653 654 func (a *Acrn) saveSandbox() error { 655 a.Logger().Info("save sandbox") 656 657 // Not supported. return success 658 659 return nil 660 } 661 662 func (a *Acrn) disconnect() { 663 span, _ := a.trace("disconnect") 664 defer span.Finish() 665 666 // Not supported. 667 } 668 669 func (a *Acrn) getThreadIDs() (vcpuThreadIDs, error) { 670 span, _ := a.trace("getThreadIDs") 671 defer span.Finish() 672 673 // Not supported. return success 674 //Just allocating an empty map 675 676 return vcpuThreadIDs{}, nil 677 } 678 679 func (a *Acrn) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) { 680 return 0, memoryDevice{}, nil 681 } 682 683 func (a *Acrn) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) { 684 return 0, 0, nil 685 } 686 687 func (a *Acrn) cleanup() error { 688 span, _ := a.trace("cleanup") 689 defer span.Finish() 690 691 return nil 692 } 693 694 func (a *Acrn) getPids() []int { 695 return []int{a.state.PID} 696 } 697 698 func (a *Acrn) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error { 699 return errors.New("acrn is not supported by VM cache") 700 } 701 702 func (a *Acrn) toGrpc() ([]byte, error) { 703 return nil, errors.New("acrn is not supported by VM cache") 704 } 705 706 func (a *Acrn) save() (s persistapi.HypervisorState) { 707 s.Pid = a.state.PID 708 s.Type = string(AcrnHypervisor) 709 s.UUID = a.state.UUID 710 return 711 } 712 713 func (a *Acrn) load(s persistapi.HypervisorState) { 714 a.state.PID = s.Pid 715 a.state.UUID = s.UUID 716 } 717 718 func (a *Acrn) check() error { 719 if err := syscall.Kill(a.state.PID, syscall.Signal(0)); err != nil { 720 return errors.Wrapf(err, "failed to ping acrn process") 721 } 722 723 return nil 724 } 725 726 func (a *Acrn) generateSocket(id string, useVsock bool) (interface{}, error) { 727 return generateVMSocket(id, useVsock, a.store.RunVMStoragePath()) 728 } 729 730 // GetACRNUUIDBytes returns UUID bytes that is used for VM creation 731 func (a *Acrn) GetACRNUUIDBytes(uid string) (uuid.UUID, error) { 732 return uuid.Parse(uid) 733 } 734 735 // GetNextAvailableUUID returns next available UUID VM creation 736 // If no valid UUIDs are available it returns err. 737 func (a *Acrn) GetNextAvailableUUID() (string, error) { 738 var MaxVMSupported uint8 739 var Idx uint8 740 var uuidStr string 741 var err error 742 743 if err = a.loadInfo(); err != nil { 744 a.Logger().Infof("Load UUID store failed") 745 } 746 747 if MaxVMSupported, err = a.GetMaxSupportedACRNVM(); err != nil { 748 return "", fmt.Errorf("IOCTL GetMaxSupportedACRNVM failed") 749 } 750 751 for Idx = 0; Idx < MaxVMSupported; Idx++ { 752 if a.info.UUIDAvailability[Idx] == UUIDFree { 753 uuidStr = acrnIdxToUUIDs[Idx] 754 break 755 } 756 } 757 758 if uuidStr == "" { 759 return "", fmt.Errorf("Invalid UUID: Max VMs reached") 760 } 761 762 return uuidStr, nil 763 } 764 765 // GetMaxSupportedACRNVM checks the max number of VMs that can be 766 // launched from kata-runtime. 767 func (a *Acrn) GetMaxSupportedACRNVM() (uint8, error) { 768 flags := syscall.O_RDWR | syscall.O_CLOEXEC 769 770 f, err := syscall.Open(acrnDevice, flags, 0) 771 if err != nil { 772 return 0, err 773 } 774 defer syscall.Close(f) 775 776 var platformInfo acrnPlatformInfo 777 778 ret, _, errno := syscall.Syscall(syscall.SYS_IOCTL, 779 uintptr(f), 780 uintptr(ioctl_ACRN_GET_PLATFORM_INFO), 781 uintptr(unsafe.Pointer(&platformInfo))) 782 if ret != 0 || errno != 0 { 783 return 0, errno 784 } 785 786 return platformInfo.maxKataContainers, nil 787 } 788 789 func (a *Acrn) storeInfo() error { 790 relPath := filepath.Join(UUIDPathSuffix, uuidFile) 791 792 jsonOut, err := json.Marshal(a.info) 793 if err != nil { 794 return fmt.Errorf("Could not marshal data: %s", err) 795 } 796 797 if err := a.store.GlobalWrite(relPath, jsonOut); err != nil { 798 return fmt.Errorf("failed to write uuid to file: %v", err) 799 } 800 801 return nil 802 } 803 804 func (a *Acrn) loadInfo() error { 805 relPath := filepath.Join(UUIDPathSuffix, uuidFile) 806 807 data, err := a.store.GlobalRead(relPath) 808 if err != nil { 809 return fmt.Errorf("failed to read uuid from file: %v", err) 810 } 811 812 if err := json.Unmarshal(data, &a.info); err != nil { 813 return fmt.Errorf("failed to unmarshal uuid info: %v", err) 814 } 815 return nil 816 } 817 818 func (a *Acrn) getVirtDriveOffset() int { 819 return 0 820 }