github.com/containers/podman/v4@v4.9.4/pkg/machine/qemu/machine.go (about) 1 //go:build amd64 || arm64 2 // +build amd64 arm64 3 4 package qemu 5 6 import ( 7 "bufio" 8 "bytes" 9 "encoding/base64" 10 "encoding/json" 11 "errors" 12 "fmt" 13 "io/fs" 14 "net" 15 "os" 16 "os/exec" 17 "os/signal" 18 "path/filepath" 19 "strconv" 20 "strings" 21 "syscall" 22 "time" 23 24 "github.com/containers/common/pkg/config" 25 gvproxy "github.com/containers/gvisor-tap-vsock/pkg/types" 26 "github.com/containers/podman/v4/pkg/machine" 27 "github.com/containers/podman/v4/pkg/machine/define" 28 "github.com/containers/podman/v4/pkg/rootless" 29 "github.com/containers/podman/v4/pkg/util" 30 "github.com/containers/storage/pkg/lockfile" 31 "github.com/digitalocean/go-qemu/qmp" 32 "github.com/sirupsen/logrus" 33 ) 34 35 var ( 36 // vmtype refers to qemu (vs libvirt, krun, etc). 37 // Could this be moved into Provider 38 vmtype = machine.QemuVirt 39 ) 40 41 const ( 42 VolumeTypeVirtfs = "virtfs" 43 MountType9p = "9p" 44 dockerSock = "/var/run/docker.sock" 45 dockerConnectTimeout = 5 * time.Second 46 ) 47 48 // qemuReadyUnit is a unit file that sets up the virtual serial device 49 // where when the VM is done configuring, it will send an ack 50 // so a listening host tknows it can begin interacting with it 51 const qemuReadyUnit = `[Unit] 52 Requires=dev-virtio\\x2dports-%s.device 53 After=remove-moby.service sshd.socket sshd.service 54 After=systemd-user-sessions.service 55 OnFailure=emergency.target 56 OnFailureJobMode=isolate 57 [Service] 58 Type=oneshot 59 RemainAfterExit=yes 60 ExecStart=/bin/sh -c '/usr/bin/echo Ready >/dev/%s' 61 [Install] 62 RequiredBy=default.target 63 ` 64 65 type MachineVM struct { 66 // ConfigPath is the path to the configuration file 67 ConfigPath define.VMFile 68 // The command line representation of the qemu command 69 CmdLine QemuCmd 70 // HostUser contains info about host user 71 machine.HostUser 72 // ImageConfig describes the bootable image 73 machine.ImageConfig 74 // Mounts is the list of remote filesystems to mount 75 Mounts []machine.Mount 76 // Name of VM 77 Name string 78 // PidFilePath is the where the Proxy PID file lives 79 PidFilePath define.VMFile 80 // VMPidFilePath is the where the VM PID file lives 81 VMPidFilePath define.VMFile 82 // QMPMonitor is the qemu monitor object for sending commands 83 QMPMonitor Monitor 84 // ReadySocket tells host when vm is booted 85 ReadySocket define.VMFile 86 // ResourceConfig is physical attrs of the VM 87 machine.ResourceConfig 88 // SSHConfig for accessing the remote vm 89 machine.SSHConfig 90 // Starting tells us whether the machine is running or if we have just dialed it to start it 91 Starting bool 92 // Created contains the original created time instead of querying the file mod time 93 Created time.Time 94 // LastUp contains the last recorded uptime 95 LastUp time.Time 96 97 // User at runtime for serializing write operations. 98 lock *lockfile.LockFile 99 } 100 101 type Monitor struct { 102 // Address portion of the qmp monitor (/tmp/tmp.sock) 103 Address define.VMFile 104 // Network portion of the qmp monitor (unix) 105 Network string 106 // Timeout in seconds for qmp monitor transactions 107 Timeout time.Duration 108 } 109 110 // migrateVM takes the old configuration structure and migrates it 111 // to the new structure and writes it to the filesystem 112 func migrateVM(configPath string, config []byte, vm *MachineVM) error { 113 fmt.Printf("Migrating machine %q\n", vm.Name) 114 var old MachineVMV1 115 err := json.Unmarshal(config, &old) 116 if err != nil { 117 return err 118 } 119 // Looks like we loaded the older structure; now we need to migrate 120 // from the old structure to the new structure 121 _, pidFile, err := vm.getSocketandPid() 122 if err != nil { 123 return err 124 } 125 126 pidFilePath := define.VMFile{Path: pidFile} 127 qmpMonitor := Monitor{ 128 Address: define.VMFile{Path: old.QMPMonitor.Address}, 129 Network: old.QMPMonitor.Network, 130 Timeout: old.QMPMonitor.Timeout, 131 } 132 socketPath, err := getRuntimeDir() 133 if err != nil { 134 return err 135 } 136 virtualSocketPath := filepath.Join(socketPath, "podman", vm.Name+"_ready.sock") 137 readySocket := define.VMFile{Path: virtualSocketPath} 138 139 vm.HostUser = machine.HostUser{} 140 vm.ImageConfig = machine.ImageConfig{} 141 vm.ResourceConfig = machine.ResourceConfig{} 142 vm.SSHConfig = machine.SSHConfig{} 143 144 ignitionFilePath, err := define.NewMachineFile(old.IgnitionFilePath, nil) 145 if err != nil { 146 return err 147 } 148 imagePath, err := define.NewMachineFile(old.ImagePath, nil) 149 if err != nil { 150 return err 151 } 152 153 // setReadySocket will stick the entry into the new struct 154 symlink := vm.Name + "_ready.sock" 155 if err := machine.SetSocket(&vm.ReadySocket, machine.ReadySocketPath(socketPath+"/podman/", vm.Name), &symlink); err != nil { 156 return err 157 } 158 159 vm.CPUs = old.CPUs 160 vm.CmdLine = old.CmdLine 161 vm.DiskSize = old.DiskSize 162 vm.IdentityPath = old.IdentityPath 163 vm.IgnitionFile = *ignitionFilePath 164 vm.ImagePath = *imagePath 165 vm.ImageStream = old.ImageStream 166 vm.Memory = old.Memory 167 vm.Mounts = old.Mounts 168 vm.Name = old.Name 169 vm.PidFilePath = pidFilePath 170 vm.Port = old.Port 171 vm.QMPMonitor = qmpMonitor 172 vm.ReadySocket = readySocket 173 vm.RemoteUsername = old.RemoteUsername 174 vm.Rootful = old.Rootful 175 vm.UID = old.UID 176 177 // Back up the original config file 178 if err := os.Rename(configPath, configPath+".orig"); err != nil { 179 return err 180 } 181 // Write the config file 182 if err := vm.writeConfig(); err != nil { 183 // If the config file fails to be written, put the original 184 // config file back before erroring 185 if renameError := os.Rename(configPath+".orig", configPath); renameError != nil { 186 logrus.Warn(renameError) 187 } 188 return err 189 } 190 // Remove the backup file 191 return os.Remove(configPath + ".orig") 192 } 193 194 // addMountsToVM converts the volumes passed through the CLI into the specified 195 // volume driver and adds them to the machine 196 func (v *MachineVM) addMountsToVM(opts machine.InitOptions) error { 197 var volumeType string 198 switch opts.VolumeDriver { 199 // "" is the default volume driver 200 case "virtfs", "": 201 volumeType = VolumeTypeVirtfs 202 default: 203 return fmt.Errorf("unknown volume driver: %s", opts.VolumeDriver) 204 } 205 206 mounts := []machine.Mount{} 207 for i, volume := range opts.Volumes { 208 tag := fmt.Sprintf("vol%d", i) 209 paths := pathsFromVolume(volume) 210 source := extractSourcePath(paths) 211 target := extractTargetPath(paths) 212 readonly, securityModel := extractMountOptions(paths) 213 if volumeType == VolumeTypeVirtfs { 214 v.CmdLine.SetVirtfsMount(source, tag, securityModel, readonly) 215 mounts = append(mounts, machine.Mount{Type: MountType9p, Tag: tag, Source: source, Target: target, ReadOnly: readonly}) 216 } 217 } 218 v.Mounts = mounts 219 return nil 220 } 221 222 // Init writes the json configuration file to the filesystem for 223 // other verbs (start, stop) 224 func (v *MachineVM) Init(opts machine.InitOptions) (bool, error) { 225 var ( 226 key string 227 err error 228 ) 229 230 // cleanup half-baked files if init fails at any point 231 callbackFuncs := machine.InitCleanup() 232 defer callbackFuncs.CleanIfErr(&err) 233 go callbackFuncs.CleanOnSignal() 234 235 v.IdentityPath = util.GetIdentityPath(v.Name) 236 v.Rootful = opts.Rootful 237 238 imagePath, strm, err := machine.Pull(opts.ImagePath, opts.Name, VirtualizationProvider()) 239 if err != nil { 240 return false, err 241 } 242 243 // By this time, image should be had and uncompressed 244 callbackFuncs.Add(imagePath.Delete) 245 246 // Assign values about the download 247 v.ImagePath = *imagePath 248 v.ImageStream = strm.String() 249 250 if err = v.addMountsToVM(opts); err != nil { 251 return false, err 252 } 253 254 v.UID = os.Getuid() 255 256 // Add location of bootable image 257 v.CmdLine.SetBootableImage(v.getImageFile()) 258 259 if err = machine.AddSSHConnectionsToPodmanSocket( 260 v.UID, 261 v.Port, 262 v.IdentityPath, 263 v.Name, 264 v.RemoteUsername, 265 opts, 266 ); err != nil { 267 return false, err 268 } 269 callbackFuncs.Add(v.removeSystemConnections) 270 271 // Write the JSON file 272 if err = v.writeConfig(); err != nil { 273 return false, fmt.Errorf("writing JSON file: %w", err) 274 } 275 callbackFuncs.Add(v.ConfigPath.Delete) 276 277 // User has provided ignition file so keygen 278 // will be skipped. 279 if len(opts.IgnitionPath) < 1 { 280 key, err = machine.CreateSSHKeys(v.IdentityPath) 281 if err != nil { 282 return false, err 283 } 284 callbackFuncs.Add(v.removeSSHKeys) 285 } 286 // Run arch specific things that need to be done 287 if err = v.prepare(); err != nil { 288 return false, err 289 } 290 originalDiskSize, err := getDiskSize(v.getImageFile()) 291 if err != nil { 292 return false, err 293 } 294 295 if err = v.resizeDisk(opts.DiskSize, originalDiskSize>>(10*3)); err != nil { 296 return false, err 297 } 298 299 if opts.UserModeNetworking != nil && !*opts.UserModeNetworking { 300 logrus.Warn("ignoring init option to disable user-mode networking: this mode is not supported by the QEMU backend") 301 } 302 303 builder := machine.NewIgnitionBuilder(machine.DynamicIgnition{ 304 Name: opts.Username, 305 Key: key, 306 VMName: v.Name, 307 VMType: machine.QemuVirt, 308 TimeZone: opts.TimeZone, 309 WritePath: v.getIgnitionFile(), 310 UID: v.UID, 311 Rootful: v.Rootful, 312 NetRecover: useNetworkRecover(), 313 }) 314 315 // If the user provides an ignition file, we need to 316 // copy it into the conf dir 317 if len(opts.IgnitionPath) > 0 { 318 return false, builder.BuildWithIgnitionFile(opts.IgnitionPath) 319 } 320 321 if err := builder.GenerateIgnitionConfig(); err != nil { 322 return false, err 323 } 324 325 readyUnit := machine.Unit{ 326 Enabled: machine.BoolToPtr(true), 327 Name: "ready.service", 328 Contents: machine.StrToPtr(fmt.Sprintf(qemuReadyUnit, "vport1p1", "vport1p1")), 329 } 330 builder.WithUnit(readyUnit) 331 332 err = builder.Build() 333 callbackFuncs.Add(v.IgnitionFile.Delete) 334 335 return err == nil, err 336 } 337 338 func (v *MachineVM) removeSSHKeys() error { 339 if err := os.Remove(fmt.Sprintf("%s.pub", v.IdentityPath)); err != nil { 340 logrus.Error(err) 341 } 342 return os.Remove(v.IdentityPath) 343 } 344 345 func (v *MachineVM) removeSystemConnections() error { 346 return machine.RemoveConnections(v.Name, fmt.Sprintf("%s-root", v.Name)) 347 } 348 349 func (v *MachineVM) Set(_ string, opts machine.SetOptions) ([]error, error) { 350 // If one setting fails to be applied, the others settings will not fail and still be applied. 351 // The setting(s) that failed to be applied will have its errors returned in setErrors 352 var setErrors []error 353 354 v.lock.Lock() 355 defer v.lock.Unlock() 356 357 state, err := v.State(false) 358 if err != nil { 359 return setErrors, err 360 } 361 362 if state == machine.Running { 363 suffix := "" 364 if v.Name != machine.DefaultMachineName { 365 suffix = " " + v.Name 366 } 367 return setErrors, fmt.Errorf("cannot change settings while the vm is running, run 'podman machine stop%s' first", suffix) 368 } 369 370 if opts.Rootful != nil && v.Rootful != *opts.Rootful { 371 if err := v.setRootful(*opts.Rootful); err != nil { 372 setErrors = append(setErrors, fmt.Errorf("failed to set rootful option: %w", err)) 373 } else { 374 v.Rootful = *opts.Rootful 375 } 376 } 377 378 if opts.CPUs != nil && v.CPUs != *opts.CPUs { 379 v.CPUs = *opts.CPUs 380 v.editCmdLine("-smp", strconv.Itoa(int(v.CPUs))) 381 } 382 383 if opts.Memory != nil && v.Memory != *opts.Memory { 384 v.Memory = *opts.Memory 385 v.editCmdLine("-m", strconv.Itoa(int(v.Memory))) 386 } 387 388 if opts.DiskSize != nil && v.DiskSize != *opts.DiskSize { 389 if err := v.resizeDisk(*opts.DiskSize, v.DiskSize); err != nil { 390 setErrors = append(setErrors, fmt.Errorf("failed to resize disk: %w", err)) 391 } else { 392 v.DiskSize = *opts.DiskSize 393 } 394 } 395 396 if opts.USBs != nil { 397 if usbConfigs, err := parseUSBs(*opts.USBs); err != nil { 398 setErrors = append(setErrors, fmt.Errorf("failed to set usb: %w", err)) 399 } else { 400 v.USBs = usbConfigs 401 } 402 } 403 404 err = v.writeConfig() 405 if err != nil { 406 setErrors = append(setErrors, err) 407 } 408 409 if len(setErrors) > 0 { 410 return setErrors, setErrors[0] 411 } 412 413 return setErrors, nil 414 } 415 416 // mountVolumesToVM iterates through the machine's volumes and mounts them to the 417 // machine 418 func (v *MachineVM) mountVolumesToVM(opts machine.StartOptions, name string) error { 419 for _, mount := range v.Mounts { 420 if !opts.Quiet { 421 fmt.Printf("Mounting volume... %s:%s\n", mount.Source, mount.Target) 422 } 423 // create mountpoint directory if it doesn't exist 424 // because / is immutable, we have to monkey around with permissions 425 // if we dont mount in /home or /mnt 426 args := []string{"-q", "--"} 427 if !strings.HasPrefix(mount.Target, "/home") && !strings.HasPrefix(mount.Target, "/mnt") { 428 args = append(args, "sudo", "chattr", "-i", "/", ";") 429 } 430 args = append(args, "sudo", "mkdir", "-p", mount.Target) 431 if !strings.HasPrefix(mount.Target, "/home") && !strings.HasPrefix(mount.Target, "/mnt") { 432 args = append(args, ";", "sudo", "chattr", "+i", "/", ";") 433 } 434 err := v.SSH(name, machine.SSHOptions{Args: args}) 435 if err != nil { 436 return err 437 } 438 switch mount.Type { 439 case MountType9p: 440 mountOptions := []string{"-t", "9p"} 441 mountOptions = append(mountOptions, []string{"-o", "trans=virtio", mount.Tag, mount.Target}...) 442 mountOptions = append(mountOptions, []string{"-o", "version=9p2000.L,msize=131072"}...) 443 if mount.ReadOnly { 444 mountOptions = append(mountOptions, []string{"-o", "ro"}...) 445 } 446 err = v.SSH(name, machine.SSHOptions{Args: append([]string{"-q", "--", "sudo", "mount"}, mountOptions...)}) 447 if err != nil { 448 return err 449 } 450 default: 451 return fmt.Errorf("unknown mount type: %s", mount.Type) 452 } 453 } 454 return nil 455 } 456 457 // conductVMReadinessCheck checks to make sure the machine is in the proper state 458 // and that SSH is up and running 459 func (v *MachineVM) conductVMReadinessCheck(name string, maxBackoffs int, backoff time.Duration) (connected bool, sshError error, err error) { 460 for i := 0; i < maxBackoffs; i++ { 461 if i > 0 { 462 time.Sleep(backoff) 463 backoff *= 2 464 } 465 state, err := v.State(true) 466 if err != nil { 467 return false, nil, err 468 } 469 if state == machine.Running && v.isListening() { 470 // Also make sure that SSH is up and running. The 471 // ready service's dependencies don't fully make sure 472 // that clients can SSH into the machine immediately 473 // after boot. 474 // 475 // CoreOS users have reported the same observation but 476 // the underlying source of the issue remains unknown. 477 if sshError = v.SSH(name, machine.SSHOptions{Args: []string{"true"}}); sshError != nil { 478 logrus.Debugf("SSH readiness check for machine failed: %v", sshError) 479 continue 480 } 481 connected = true 482 break 483 } 484 } 485 return 486 } 487 488 // runStartVMCommand executes the command to start the VM 489 func runStartVMCommand(cmd *exec.Cmd) error { 490 err := cmd.Start() 491 if err != nil { 492 // check if qemu was not found 493 if !errors.Is(err, os.ErrNotExist) { 494 return err 495 } 496 // look up qemu again maybe the path was changed, https://github.com/containers/podman/issues/13394 497 cfg, err := config.Default() 498 if err != nil { 499 return err 500 } 501 qemuBinaryPath, err := cfg.FindHelperBinary(QemuCommand, true) 502 if err != nil { 503 return err 504 } 505 cmd.Path = qemuBinaryPath 506 err = cmd.Start() 507 if err != nil { 508 return fmt.Errorf("unable to execute %q: %w", cmd, err) 509 } 510 } 511 512 return nil 513 } 514 515 // qemuPid returns -1 or the PID of the running QEMU instance. 516 func (v *MachineVM) qemuPid() (int, error) { 517 pidData, err := os.ReadFile(v.VMPidFilePath.GetPath()) 518 if err != nil { 519 // The file may not yet exist on start or have already been 520 // cleaned up after stop, so we need to be defensive. 521 if errors.Is(err, os.ErrNotExist) { 522 return -1, nil 523 } 524 return -1, err 525 } 526 if len(pidData) == 0 { 527 return -1, nil 528 } 529 530 pid, err := strconv.Atoi(strings.TrimRight(string(pidData), "\n")) 531 if err != nil { 532 logrus.Warnf("Reading QEMU pidfile: %v", err) 533 return -1, nil 534 } 535 return findProcess(pid) 536 } 537 538 // Start executes the qemu command line and forks it 539 func (v *MachineVM) Start(name string, opts machine.StartOptions) error { 540 var ( 541 conn net.Conn 542 err error 543 qemuSocketConn net.Conn 544 ) 545 546 defaultBackoff := 500 * time.Millisecond 547 maxBackoffs := 6 548 549 v.lock.Lock() 550 defer v.lock.Unlock() 551 552 state, err := v.State(false) 553 if err != nil { 554 return err 555 } 556 switch state { 557 case machine.Starting: 558 return fmt.Errorf("cannot start VM %q: starting state indicates that a previous start has failed: please stop and restart the VM", v.Name) 559 case machine.Running: 560 return fmt.Errorf("cannot start VM %q: %w", v.Name, machine.ErrVMAlreadyRunning) 561 } 562 563 // If QEMU is running already, something went wrong and we cannot 564 // proceed. 565 qemuPid, err := v.qemuPid() 566 if err != nil { 567 return err 568 } 569 if qemuPid != -1 { 570 return fmt.Errorf("cannot start VM %q: another instance of %q is already running with process ID %d: please stop and restart the VM", v.Name, v.CmdLine[0], qemuPid) 571 } 572 573 v.Starting = true 574 if err := v.writeConfig(); err != nil { 575 return fmt.Errorf("writing JSON file: %w", err) 576 } 577 doneStarting := func() { 578 v.Starting = false 579 if err := v.writeConfig(); err != nil { 580 logrus.Errorf("Writing JSON file: %v", err) 581 } 582 } 583 defer doneStarting() 584 585 c := make(chan os.Signal, 1) 586 signal.Notify(c, os.Interrupt, syscall.SIGTERM) 587 go func() { 588 _, ok := <-c 589 if !ok { 590 return 591 } 592 doneStarting() 593 os.Exit(1) 594 }() 595 defer close(c) 596 597 if v.isIncompatible() { 598 logrus.Errorf("machine %q is incompatible with this release of podman and needs to be recreated, starting for recovery only", v.Name) 599 } 600 601 forwardSock, forwardState, err := v.startHostNetworking() 602 if err != nil { 603 return fmt.Errorf("unable to start host networking: %q", err) 604 } 605 606 rtPath, err := getRuntimeDir() 607 if err != nil { 608 return err 609 } 610 611 // If the temporary podman dir is not created, create it 612 podmanTempDir := filepath.Join(rtPath, "podman") 613 if _, err := os.Stat(podmanTempDir); errors.Is(err, fs.ErrNotExist) { 614 if mkdirErr := os.MkdirAll(podmanTempDir, 0755); mkdirErr != nil { 615 return err 616 } 617 } 618 619 // If the qemusocketpath exists and the vm is off/down, we should rm 620 // it before the dial as to avoid a segv 621 if err := v.QMPMonitor.Address.Delete(); err != nil { 622 return err 623 } 624 625 qemuSocketConn, err = machine.DialSocketWithBackoffs(maxBackoffs, defaultBackoff, v.QMPMonitor.Address.Path) 626 if err != nil { 627 return err 628 } 629 defer qemuSocketConn.Close() 630 631 fd, err := qemuSocketConn.(*net.UnixConn).File() 632 if err != nil { 633 return err 634 } 635 defer fd.Close() 636 637 dnr, dnw, err := machine.GetDevNullFiles() 638 if err != nil { 639 return err 640 } 641 defer dnr.Close() 642 defer dnw.Close() 643 644 attr := new(os.ProcAttr) 645 files := []*os.File{dnr, dnw, dnw, fd} 646 attr.Files = files 647 cmdLine := v.CmdLine 648 649 cmdLine.SetPropagatedHostEnvs() 650 651 // Disable graphic window when not in debug mode 652 // Done in start, so we're not suck with the debug level we used on init 653 if !logrus.IsLevelEnabled(logrus.DebugLevel) { 654 cmdLine.SetDisplay("none") 655 } 656 657 logrus.Debugf("qemu cmd: %v", cmdLine) 658 659 stderrBuf := &bytes.Buffer{} 660 661 // actually run the command that starts the virtual machine 662 cmd := &exec.Cmd{ 663 Args: cmdLine, 664 Path: cmdLine[0], 665 Stdin: dnr, 666 Stdout: dnw, 667 Stderr: stderrBuf, 668 ExtraFiles: []*os.File{fd}, 669 } 670 671 if err := runStartVMCommand(cmd); err != nil { 672 return err 673 } 674 defer cmd.Process.Release() //nolint:errcheck 675 676 if !opts.Quiet { 677 fmt.Println("Waiting for VM ...") 678 } 679 680 conn, err = machine.DialSocketWithBackoffsAndProcCheck(maxBackoffs, defaultBackoff, v.ReadySocket.GetPath(), checkProcessStatus, "qemu", cmd.Process.Pid, stderrBuf) 681 if err != nil { 682 return err 683 } 684 defer conn.Close() 685 686 _, err = bufio.NewReader(conn).ReadString('\n') 687 if err != nil { 688 return err 689 } 690 691 // update the podman/docker socket service if the host user has been modified at all (UID or Rootful) 692 if v.HostUser.Modified { 693 if machine.UpdatePodmanDockerSockService(v, name, v.UID, v.Rootful) == nil { 694 // Reset modification state if there are no errors, otherwise ignore errors 695 // which are already logged 696 v.HostUser.Modified = false 697 _ = v.writeConfig() 698 } 699 } 700 701 if len(v.Mounts) == 0 { 702 machine.WaitAPIAndPrintInfo( 703 forwardState, 704 v.Name, 705 findClaimHelper(), 706 forwardSock, 707 opts.NoInfo, 708 v.isIncompatible(), 709 v.Rootful, 710 ) 711 return nil 712 } 713 714 connected, sshError, err := v.conductVMReadinessCheck(name, maxBackoffs, defaultBackoff) 715 if err != nil { 716 return err 717 } 718 719 if !connected { 720 msg := "machine did not transition into running state" 721 if sshError != nil { 722 return fmt.Errorf("%s: ssh error: %v", msg, sshError) 723 } 724 return errors.New(msg) 725 } 726 727 // mount the volumes to the VM 728 if err := v.mountVolumesToVM(opts, name); err != nil { 729 return err 730 } 731 732 machine.WaitAPIAndPrintInfo( 733 forwardState, 734 v.Name, 735 findClaimHelper(), 736 forwardSock, 737 opts.NoInfo, 738 v.isIncompatible(), 739 v.Rootful, 740 ) 741 return nil 742 } 743 744 // propagateHostEnv is here for providing the ability to propagate 745 // proxy and SSL settings (e.g. HTTP_PROXY and others) on a start 746 // and avoid a need of re-creating/re-initiating a VM 747 func propagateHostEnv(cmdLine QemuCmd) QemuCmd { 748 varsToPropagate := make([]string, 0) 749 750 for k, v := range machine.GetProxyVariables() { 751 varsToPropagate = append(varsToPropagate, fmt.Sprintf("%s=%q", k, v)) 752 } 753 754 if sslCertFile, ok := os.LookupEnv("SSL_CERT_FILE"); ok { 755 pathInVM := filepath.Join(machine.UserCertsTargetPath, filepath.Base(sslCertFile)) 756 varsToPropagate = append(varsToPropagate, fmt.Sprintf("%s=%q", "SSL_CERT_FILE", pathInVM)) 757 } 758 759 if _, ok := os.LookupEnv("SSL_CERT_DIR"); ok { 760 varsToPropagate = append(varsToPropagate, fmt.Sprintf("%s=%q", "SSL_CERT_DIR", machine.UserCertsTargetPath)) 761 } 762 763 if len(varsToPropagate) > 0 { 764 prefix := "name=opt/com.coreos/environment,string=" 765 envVarsJoined := strings.Join(varsToPropagate, "|") 766 fwCfgArg := prefix + base64.StdEncoding.EncodeToString([]byte(envVarsJoined)) 767 return append(cmdLine, "-fw_cfg", fwCfgArg) 768 } 769 770 return cmdLine 771 } 772 773 func (v *MachineVM) checkStatus(monitor *qmp.SocketMonitor) (machine.Status, error) { 774 // this is the format returned from the monitor 775 // {"return": {"status": "running", "singlestep": false, "running": true}} 776 777 type statusDetails struct { 778 Status string `json:"status"` 779 Step bool `json:"singlestep"` 780 Running bool `json:"running"` 781 Starting bool `json:"starting"` 782 } 783 type statusResponse struct { 784 Response statusDetails `json:"return"` 785 } 786 var response statusResponse 787 788 checkCommand := struct { 789 Execute string `json:"execute"` 790 }{ 791 Execute: "query-status", 792 } 793 input, err := json.Marshal(checkCommand) 794 if err != nil { 795 return "", err 796 } 797 b, err := monitor.Run(input) 798 if err != nil { 799 if errors.Is(err, os.ErrNotExist) { 800 return machine.Stopped, nil 801 } 802 return "", err 803 } 804 if err := json.Unmarshal(b, &response); err != nil { 805 return "", err 806 } 807 if response.Response.Status == machine.Running { 808 return machine.Running, nil 809 } 810 return machine.Stopped, nil 811 } 812 813 // waitForMachineToStop waits for the machine to stop running 814 func (v *MachineVM) waitForMachineToStop() error { 815 fmt.Println("Waiting for VM to stop running...") 816 waitInternal := 250 * time.Millisecond 817 for i := 0; i < 5; i++ { 818 state, err := v.State(false) 819 if err != nil { 820 return err 821 } 822 if state != machine.Running { 823 break 824 } 825 time.Sleep(waitInternal) 826 waitInternal *= 2 827 } 828 // after the machine stops running it normally takes about 1 second for the 829 // qemu VM to exit so we wait a bit to try to avoid issues 830 time.Sleep(2 * time.Second) 831 return nil 832 } 833 834 // ProxyPID retrieves the pid from the proxy pidfile 835 func (v *MachineVM) ProxyPID() (int, error) { 836 if _, err := os.Stat(v.PidFilePath.Path); errors.Is(err, fs.ErrNotExist) { 837 return -1, nil 838 } 839 proxyPidString, err := v.PidFilePath.Read() 840 if err != nil { 841 return -1, err 842 } 843 proxyPid, err := strconv.Atoi(string(proxyPidString)) 844 if err != nil { 845 return -1, err 846 } 847 return proxyPid, nil 848 } 849 850 // cleanupVMProxyProcess kills the proxy process and removes the VM's pidfile 851 func (v *MachineVM) cleanupVMProxyProcess(proxyProc *os.Process) error { 852 // Kill the process 853 if err := proxyProc.Kill(); err != nil { 854 return err 855 } 856 // Remove the pidfile 857 if err := v.PidFilePath.Delete(); err != nil { 858 return err 859 } 860 return nil 861 } 862 863 // VMPid retrieves the pid from the VM's pidfile 864 func (v *MachineVM) VMPid() (int, error) { 865 vmPidString, err := v.VMPidFilePath.Read() 866 if err != nil { 867 return -1, err 868 } 869 vmPid, err := strconv.Atoi(strings.TrimSpace(string(vmPidString))) 870 if err != nil { 871 return -1, err 872 } 873 874 return vmPid, nil 875 } 876 877 // Stop uses the qmp monitor to call a system_powerdown 878 func (v *MachineVM) Stop(_ string, _ machine.StopOptions) error { 879 v.lock.Lock() 880 defer v.lock.Unlock() 881 882 if err := v.update(); err != nil { 883 return err 884 } 885 886 stopErr := v.stopLocked() 887 888 // Make sure that the associated QEMU process gets killed in case it's 889 // still running (#16054). 890 qemuPid, err := v.qemuPid() 891 if err != nil { 892 if stopErr == nil { 893 return err 894 } 895 return fmt.Errorf("%w: %w", stopErr, err) 896 } 897 898 if qemuPid == -1 { 899 return stopErr 900 } 901 902 if err := sigKill(qemuPid); err != nil { 903 if stopErr == nil { 904 return err 905 } 906 return fmt.Errorf("%w: %w", stopErr, err) 907 } 908 909 return stopErr 910 } 911 912 // stopLocked stops the machine and expects the caller to hold the machine's lock. 913 func (v *MachineVM) stopLocked() error { 914 // check if the qmp socket is there. if not, qemu instance is gone 915 if _, err := os.Stat(v.QMPMonitor.Address.GetPath()); errors.Is(err, fs.ErrNotExist) { 916 // Right now it is NOT an error to stop a stopped machine 917 logrus.Debugf("QMP monitor socket %v does not exist", v.QMPMonitor.Address) 918 // Fix incorrect starting state in case of crash during start 919 if v.Starting { 920 v.Starting = false 921 if err := v.writeConfig(); err != nil { 922 return fmt.Errorf("writing JSON file: %w", err) 923 } 924 } 925 return nil 926 } 927 928 qmpMonitor, err := qmp.NewSocketMonitor(v.QMPMonitor.Network, v.QMPMonitor.Address.GetPath(), v.QMPMonitor.Timeout) 929 if err != nil { 930 return err 931 } 932 // Simple JSON formation for the QAPI 933 stopCommand := struct { 934 Execute string `json:"execute"` 935 }{ 936 Execute: "system_powerdown", 937 } 938 939 input, err := json.Marshal(stopCommand) 940 if err != nil { 941 return err 942 } 943 944 if err := qmpMonitor.Connect(); err != nil { 945 return err 946 } 947 948 var disconnected bool 949 defer func() { 950 if !disconnected { 951 if err := qmpMonitor.Disconnect(); err != nil { 952 logrus.Error(err) 953 } 954 } 955 }() 956 957 if _, err = qmpMonitor.Run(input); err != nil { 958 return err 959 } 960 961 proxyPid, err := v.ProxyPID() 962 if err != nil || proxyPid < 0 { 963 // may return nil if proxyPid == -1 because the pidfile does not exist 964 return err 965 } 966 967 proxyProc, err := os.FindProcess(proxyPid) 968 if proxyProc == nil && err != nil { 969 return err 970 } 971 972 v.LastUp = time.Now() 973 if err := v.writeConfig(); err != nil { // keep track of last up 974 return err 975 } 976 977 if err := v.cleanupVMProxyProcess(proxyProc); err != nil { 978 return err 979 } 980 981 // Remove socket 982 if err := v.QMPMonitor.Address.Delete(); err != nil { 983 return err 984 } 985 986 if err := qmpMonitor.Disconnect(); err != nil { 987 // FIXME: this error should probably be returned 988 return nil //nolint: nilerr 989 } 990 disconnected = true 991 992 if err := v.ReadySocket.Delete(); err != nil { 993 return err 994 } 995 996 if v.VMPidFilePath.GetPath() == "" { 997 // no vm pid file path means it's probably a machine created before we 998 // started using it, so we revert to the old way of waiting for the 999 // machine to stop 1000 return v.waitForMachineToStop() 1001 } 1002 1003 vmPid, err := v.VMPid() 1004 if err != nil { 1005 return err 1006 } 1007 1008 fmt.Println("Waiting for VM to exit...") 1009 for isProcessAlive(vmPid) { 1010 time.Sleep(500 * time.Millisecond) 1011 } 1012 1013 return nil 1014 } 1015 1016 // NewQMPMonitor creates the monitor subsection of our vm 1017 func NewQMPMonitor(network, name string, timeout time.Duration) (Monitor, error) { 1018 rtDir, err := getRuntimeDir() 1019 if err != nil { 1020 return Monitor{}, err 1021 } 1022 if isRootful() { 1023 rtDir = "/run" 1024 } 1025 rtDir = filepath.Join(rtDir, "podman") 1026 if _, err := os.Stat(rtDir); errors.Is(err, fs.ErrNotExist) { 1027 if err := os.MkdirAll(rtDir, 0755); err != nil { 1028 return Monitor{}, err 1029 } 1030 } 1031 if timeout == 0 { 1032 timeout = defaultQMPTimeout 1033 } 1034 address, err := define.NewMachineFile(filepath.Join(rtDir, "qmp_"+name+".sock"), nil) 1035 if err != nil { 1036 return Monitor{}, err 1037 } 1038 monitor := Monitor{ 1039 Network: network, 1040 Address: *address, 1041 Timeout: timeout, 1042 } 1043 return monitor, nil 1044 } 1045 1046 // collectFilesToDestroy retrieves the files that will be destroyed by `Remove` 1047 func (v *MachineVM) collectFilesToDestroy(opts machine.RemoveOptions) ([]string, error) { 1048 files := []string{} 1049 // Collect all the files that need to be destroyed 1050 if !opts.SaveKeys { 1051 files = append(files, v.IdentityPath, v.IdentityPath+".pub") 1052 } 1053 if !opts.SaveIgnition { 1054 files = append(files, v.getIgnitionFile()) 1055 } 1056 if !opts.SaveImage { 1057 files = append(files, v.getImageFile()) 1058 } 1059 socketPath, err := v.forwardSocketPath() 1060 if err != nil { 1061 return nil, err 1062 } 1063 if socketPath.Symlink != nil { 1064 files = append(files, *socketPath.Symlink) 1065 } 1066 files = append(files, socketPath.Path) 1067 files = append(files, v.archRemovalFiles()...) 1068 1069 vmConfigDir, err := machine.GetConfDir(vmtype) 1070 if err != nil { 1071 return nil, err 1072 } 1073 files = append(files, filepath.Join(vmConfigDir, v.Name+".json")) 1074 1075 return files, nil 1076 } 1077 1078 // removeQMPMonitorSocketAndVMPidFile removes the VM pidfile, proxy pidfile, 1079 // and QMP Monitor Socket 1080 func (v *MachineVM) removeQMPMonitorSocketAndVMPidFile() { 1081 // remove socket and pid file if any: warn at low priority if things fail 1082 // Remove the pidfile 1083 if err := v.VMPidFilePath.Delete(); err != nil { 1084 logrus.Debugf("Error while removing VM pidfile: %v", err) 1085 } 1086 if err := v.PidFilePath.Delete(); err != nil { 1087 logrus.Debugf("Error while removing proxy pidfile: %v", err) 1088 } 1089 // Remove socket 1090 if err := v.QMPMonitor.Address.Delete(); err != nil { 1091 logrus.Debugf("Error while removing podman-machine-socket: %v", err) 1092 } 1093 } 1094 1095 // Remove deletes all the files associated with a machine including ssh keys, the image itself 1096 func (v *MachineVM) Remove(_ string, opts machine.RemoveOptions) (string, func() error, error) { 1097 var ( 1098 files []string 1099 ) 1100 1101 v.lock.Lock() 1102 defer v.lock.Unlock() 1103 1104 // cannot remove a running vm unless --force is used 1105 state, err := v.State(false) 1106 if err != nil { 1107 return "", nil, err 1108 } 1109 if state == machine.Running { 1110 if !opts.Force { 1111 return "", nil, &machine.ErrVMRunningCannotDestroyed{Name: v.Name} 1112 } 1113 err := v.stopLocked() 1114 if err != nil { 1115 return "", nil, err 1116 } 1117 } 1118 1119 files, err = v.collectFilesToDestroy(opts) 1120 if err != nil { 1121 return "", nil, err 1122 } 1123 1124 confirmationMessage := "\nThe following files will be deleted:\n\n" 1125 for _, msg := range files { 1126 confirmationMessage += msg + "\n" 1127 } 1128 1129 v.removeQMPMonitorSocketAndVMPidFile() 1130 1131 confirmationMessage += "\n" 1132 return confirmationMessage, func() error { 1133 machine.RemoveFilesAndConnections(files, v.Name, v.Name+"-root") 1134 return nil 1135 }, nil 1136 } 1137 1138 func (v *MachineVM) State(bypass bool) (machine.Status, error) { 1139 // Check if qmp socket path exists 1140 if _, err := os.Stat(v.QMPMonitor.Address.GetPath()); errors.Is(err, fs.ErrNotExist) { 1141 return "", nil 1142 } 1143 err := v.update() 1144 if err != nil { 1145 return "", err 1146 } 1147 // Check if we can dial it 1148 if v.Starting && !bypass { 1149 return machine.Starting, nil 1150 } 1151 monitor, err := qmp.NewSocketMonitor(v.QMPMonitor.Network, v.QMPMonitor.Address.GetPath(), v.QMPMonitor.Timeout) 1152 if err != nil { 1153 // If an improper cleanup was done and the socketmonitor was not deleted, 1154 // it can appear as though the machine state is not stopped. Check for ECONNREFUSED 1155 // almost assures us that the vm is stopped. 1156 if errors.Is(err, syscall.ECONNREFUSED) { 1157 return machine.Stopped, nil 1158 } 1159 return "", err 1160 } 1161 if err := monitor.Connect(); err != nil { 1162 return "", err 1163 } 1164 defer func() { 1165 if err := monitor.Disconnect(); err != nil { 1166 logrus.Error(err) 1167 } 1168 }() 1169 // If there is a monitor, let's see if we can query state 1170 return v.checkStatus(monitor) 1171 } 1172 1173 func (v *MachineVM) isListening() bool { 1174 // Check if we can dial it 1175 conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", "127.0.0.1", v.Port), 10*time.Millisecond) 1176 if err != nil { 1177 return false 1178 } 1179 conn.Close() 1180 return true 1181 } 1182 1183 // SSH opens an interactive SSH session to the vm specified. 1184 // Added ssh function to VM interface: pkg/machine/config/go : line 58 1185 func (v *MachineVM) SSH(_ string, opts machine.SSHOptions) error { 1186 state, err := v.State(true) 1187 if err != nil { 1188 return err 1189 } 1190 if state != machine.Running { 1191 return fmt.Errorf("vm %q is not running", v.Name) 1192 } 1193 1194 username := opts.Username 1195 if username == "" { 1196 username = v.RemoteUsername 1197 } 1198 1199 return machine.CommonSSH(username, v.IdentityPath, v.Name, v.Port, opts.Args) 1200 } 1201 1202 // executes qemu-image info to get the virtual disk size 1203 // of the diskimage 1204 func getDiskSize(path string) (uint64, error) { 1205 // Find the qemu executable 1206 cfg, err := config.Default() 1207 if err != nil { 1208 return 0, err 1209 } 1210 qemuPathDir, err := cfg.FindHelperBinary("qemu-img", true) 1211 if err != nil { 1212 return 0, err 1213 } 1214 diskInfo := exec.Command(qemuPathDir, "info", "--output", "json", path) 1215 stdout, err := diskInfo.StdoutPipe() 1216 if err != nil { 1217 return 0, err 1218 } 1219 if err := diskInfo.Start(); err != nil { 1220 return 0, err 1221 } 1222 tmpInfo := struct { 1223 VirtualSize uint64 `json:"virtual-size"` 1224 Filename string `json:"filename"` 1225 ClusterSize int64 `json:"cluster-size"` 1226 Format string `json:"format"` 1227 FormatSpecific struct { 1228 Type string `json:"type"` 1229 Data map[string]string `json:"data"` 1230 } 1231 DirtyFlag bool `json:"dirty-flag"` 1232 }{} 1233 if err := json.NewDecoder(stdout).Decode(&tmpInfo); err != nil { 1234 return 0, err 1235 } 1236 if err := diskInfo.Wait(); err != nil { 1237 return 0, err 1238 } 1239 return tmpInfo.VirtualSize, nil 1240 } 1241 1242 // startHostNetworking runs a binary on the host system that allows users 1243 // to set up port forwarding to the podman virtual machine 1244 func (v *MachineVM) startHostNetworking() (string, machine.APIForwardingState, error) { 1245 cfg, err := config.Default() 1246 if err != nil { 1247 return "", machine.NoForwarding, err 1248 } 1249 binary, err := cfg.FindHelperBinary(machine.ForwarderBinaryName, false) 1250 if err != nil { 1251 return "", machine.NoForwarding, err 1252 } 1253 1254 cmd := gvproxy.NewGvproxyCommand() 1255 cmd.AddQemuSocket(fmt.Sprintf("unix://%s", v.QMPMonitor.Address.GetPath())) 1256 cmd.PidFile = v.PidFilePath.GetPath() 1257 cmd.SSHPort = v.Port 1258 1259 var forwardSock string 1260 var state machine.APIForwardingState 1261 if !v.isIncompatible() { 1262 cmd, forwardSock, state = v.setupAPIForwarding(cmd) 1263 } 1264 1265 if logrus.IsLevelEnabled(logrus.DebugLevel) { 1266 cmd.Debug = true 1267 logrus.Debug(cmd) 1268 } 1269 c := cmd.Cmd(binary) 1270 logrus.Debugf("gvproxy args: %v", c.Args) 1271 if err := c.Start(); err != nil { 1272 return "", 0, fmt.Errorf("unable to execute: %q: %w", cmd.ToCmdline(), err) 1273 } 1274 return forwardSock, state, nil 1275 } 1276 1277 func (v *MachineVM) setupAPIForwarding(cmd gvproxy.GvproxyCommand) (gvproxy.GvproxyCommand, string, machine.APIForwardingState) { 1278 socket, err := v.forwardSocketPath() 1279 1280 if err != nil { 1281 return cmd, "", machine.NoForwarding 1282 } 1283 1284 destSock := fmt.Sprintf("/run/user/%d/podman/podman.sock", v.UID) 1285 1286 forwardUser := v.RemoteUsername 1287 1288 if v.Rootful { 1289 destSock = "/run/podman/podman.sock" 1290 forwardUser = "root" 1291 } 1292 1293 cmd.AddForwardSock(socket.GetPath()) 1294 cmd.AddForwardDest(destSock) 1295 cmd.AddForwardUser(forwardUser) 1296 cmd.AddForwardIdentity(v.IdentityPath) 1297 1298 // The linking pattern is /var/run/docker.sock -> user global sock (link) -> machine sock (socket) 1299 // This allows the helper to only have to maintain one constant target to the user, which can be 1300 // repositioned without updating docker.sock. 1301 1302 link, err := v.userGlobalSocketLink() 1303 if err != nil { 1304 return cmd, socket.GetPath(), machine.MachineLocal 1305 } 1306 1307 if !dockerClaimSupported() { 1308 return cmd, socket.GetPath(), machine.ClaimUnsupported 1309 } 1310 1311 if !dockerClaimHelperInstalled() { 1312 return cmd, socket.GetPath(), machine.NotInstalled 1313 } 1314 1315 if !alreadyLinked(socket.GetPath(), link) { 1316 if checkSockInUse(link) { 1317 return cmd, socket.GetPath(), machine.MachineLocal 1318 } 1319 1320 _ = os.Remove(link) 1321 if err = os.Symlink(socket.GetPath(), link); err != nil { 1322 logrus.Warnf("could not create user global API forwarding link: %s", err.Error()) 1323 return cmd, socket.GetPath(), machine.MachineLocal 1324 } 1325 } 1326 1327 if !alreadyLinked(link, dockerSock) { 1328 if checkSockInUse(dockerSock) { 1329 return cmd, socket.GetPath(), machine.MachineLocal 1330 } 1331 1332 if !claimDockerSock() { 1333 logrus.Warn("podman helper is installed, but was not able to claim the global docker sock") 1334 return cmd, socket.GetPath(), machine.MachineLocal 1335 } 1336 } 1337 1338 return cmd, dockerSock, machine.DockerGlobal 1339 } 1340 1341 func (v *MachineVM) isIncompatible() bool { 1342 return v.UID == -1 1343 } 1344 1345 func (v *MachineVM) userGlobalSocketLink() (string, error) { 1346 path, err := machine.GetDataDir(machine.QemuVirt) 1347 if err != nil { 1348 logrus.Errorf("Resolving data dir: %s", err.Error()) 1349 return "", err 1350 } 1351 // User global socket is located in parent directory of machine dirs (one per user) 1352 return filepath.Join(filepath.Dir(path), "podman.sock"), err 1353 } 1354 1355 func (v *MachineVM) forwardSocketPath() (*define.VMFile, error) { 1356 sockName := "podman.sock" 1357 path, err := machine.GetDataDir(machine.QemuVirt) 1358 if err != nil { 1359 logrus.Errorf("Resolving data dir: %s", err.Error()) 1360 return nil, err 1361 } 1362 return define.NewMachineFile(filepath.Join(path, sockName), &sockName) 1363 } 1364 1365 func (v *MachineVM) setConfigPath() error { 1366 vmConfigDir, err := machine.GetConfDir(vmtype) 1367 if err != nil { 1368 return err 1369 } 1370 1371 configPath, err := define.NewMachineFile(filepath.Join(vmConfigDir, v.Name)+".json", nil) 1372 if err != nil { 1373 return err 1374 } 1375 v.ConfigPath = *configPath 1376 return nil 1377 } 1378 1379 func (v *MachineVM) setPIDSocket() error { 1380 rtPath, err := getRuntimeDir() 1381 if err != nil { 1382 return err 1383 } 1384 if isRootful() { 1385 rtPath = "/run" 1386 } 1387 socketDir := filepath.Join(rtPath, "podman") 1388 vmPidFileName := fmt.Sprintf("%s_vm.pid", v.Name) 1389 proxyPidFileName := fmt.Sprintf("%s_proxy.pid", v.Name) 1390 vmPidFilePath, err := define.NewMachineFile(filepath.Join(socketDir, vmPidFileName), &vmPidFileName) 1391 if err != nil { 1392 return err 1393 } 1394 proxyPidFilePath, err := define.NewMachineFile(filepath.Join(socketDir, proxyPidFileName), &proxyPidFileName) 1395 if err != nil { 1396 return err 1397 } 1398 v.VMPidFilePath = *vmPidFilePath 1399 v.PidFilePath = *proxyPidFilePath 1400 return nil 1401 } 1402 1403 // Deprecated: getSocketandPid is being replaced by setPIDSocket and 1404 // machinefiles. 1405 func (v *MachineVM) getSocketandPid() (string, string, error) { 1406 rtPath, err := getRuntimeDir() 1407 if err != nil { 1408 return "", "", err 1409 } 1410 if isRootful() { 1411 rtPath = "/run" 1412 } 1413 socketDir := filepath.Join(rtPath, "podman") 1414 pidFile := filepath.Join(socketDir, fmt.Sprintf("%s.pid", v.Name)) 1415 qemuSocket := filepath.Join(socketDir, fmt.Sprintf("qemu_%s.sock", v.Name)) 1416 return qemuSocket, pidFile, nil 1417 } 1418 1419 func checkSockInUse(sock string) bool { 1420 if info, err := os.Stat(sock); err == nil && info.Mode()&fs.ModeSocket == fs.ModeSocket { 1421 _, err = net.DialTimeout("unix", dockerSock, dockerConnectTimeout) 1422 return err == nil 1423 } 1424 1425 return false 1426 } 1427 1428 func alreadyLinked(target string, link string) bool { 1429 read, err := os.Readlink(link) 1430 return err == nil && read == target 1431 } 1432 1433 // update returns the content of the VM's 1434 // configuration file in json 1435 func (v *MachineVM) update() error { 1436 if err := v.setConfigPath(); err != nil { 1437 return err 1438 } 1439 b, err := v.ConfigPath.Read() 1440 if err != nil { 1441 if errors.Is(err, os.ErrNotExist) { 1442 return fmt.Errorf("%v: %w", v.Name, machine.ErrNoSuchVM) 1443 } 1444 return err 1445 } 1446 if err != nil { 1447 return err 1448 } 1449 err = json.Unmarshal(b, v) 1450 if err != nil { 1451 err = migrateVM(v.ConfigPath.GetPath(), b, v) 1452 if err != nil { 1453 return err 1454 } 1455 } 1456 return err 1457 } 1458 1459 func (v *MachineVM) writeConfig() error { 1460 // Set the path of the configfile before writing to make 1461 // life easier down the line 1462 if err := v.setConfigPath(); err != nil { 1463 return err 1464 } 1465 // Write the JSON file 1466 return machine.WriteConfig(v.ConfigPath.Path, v) 1467 } 1468 1469 // getImageFile wrapper returns the path to the image used 1470 // to boot the VM 1471 func (v *MachineVM) getImageFile() string { 1472 return v.ImagePath.GetPath() 1473 } 1474 1475 // getIgnitionFile wrapper returns the path to the ignition file 1476 func (v *MachineVM) getIgnitionFile() string { 1477 return v.IgnitionFile.GetPath() 1478 } 1479 1480 // Inspect returns verbose detail about the machine 1481 func (v *MachineVM) Inspect() (*machine.InspectInfo, error) { 1482 state, err := v.State(false) 1483 if err != nil { 1484 return nil, err 1485 } 1486 connInfo := new(machine.ConnectionConfig) 1487 podmanSocket, err := v.forwardSocketPath() 1488 if err != nil { 1489 return nil, err 1490 } 1491 connInfo.PodmanSocket = podmanSocket 1492 return &machine.InspectInfo{ 1493 ConfigPath: v.ConfigPath, 1494 ConnectionInfo: *connInfo, 1495 Created: v.Created, 1496 Image: v.ImageConfig, 1497 LastUp: v.LastUp, 1498 Name: v.Name, 1499 Resources: v.ResourceConfig, 1500 SSHConfig: v.SSHConfig, 1501 State: state, 1502 UserModeNetworking: true, // always true 1503 Rootful: v.Rootful, 1504 }, nil 1505 } 1506 1507 // resizeDisk increases the size of the machine's disk in GB. 1508 func (v *MachineVM) resizeDisk(diskSize uint64, oldSize uint64) error { 1509 // Resize the disk image to input disk size 1510 // only if the virtualdisk size is less than 1511 // the given disk size 1512 if diskSize < oldSize { 1513 return fmt.Errorf("new disk size must be larger than current disk size: %vGB", oldSize) 1514 } 1515 1516 // Find the qemu executable 1517 cfg, err := config.Default() 1518 if err != nil { 1519 return err 1520 } 1521 resizePath, err := cfg.FindHelperBinary("qemu-img", true) 1522 if err != nil { 1523 return err 1524 } 1525 resize := exec.Command(resizePath, []string{"resize", v.getImageFile(), strconv.Itoa(int(diskSize)) + "G"}...) 1526 resize.Stdout = os.Stdout 1527 resize.Stderr = os.Stderr 1528 if err := resize.Run(); err != nil { 1529 return fmt.Errorf("resizing image: %q", err) 1530 } 1531 1532 return nil 1533 } 1534 1535 func (v *MachineVM) setRootful(rootful bool) error { 1536 if err := machine.SetRootful(rootful, v.Name, v.Name+"-root"); err != nil { 1537 return err 1538 } 1539 1540 v.HostUser.Modified = true 1541 return nil 1542 } 1543 1544 func (v *MachineVM) editCmdLine(flag string, value string) { 1545 found := false 1546 for i, val := range v.CmdLine { 1547 if val == flag { 1548 found = true 1549 v.CmdLine[i+1] = value 1550 break 1551 } 1552 } 1553 if !found { 1554 v.CmdLine = append(v.CmdLine, []string{flag, value}...) 1555 } 1556 } 1557 1558 func isRootful() bool { 1559 // Rootless is not relevant on Windows. In the future rootless.IsRootless 1560 // could be switched to return true on Windows, and other codepaths migrated 1561 // for now will check additionally for valid os.Getuid 1562 1563 return !rootless.IsRootless() && os.Getuid() != -1 1564 } 1565 1566 func extractSourcePath(paths []string) string { 1567 return paths[0] 1568 } 1569 1570 func extractMountOptions(paths []string) (bool, string) { 1571 readonly := false 1572 securityModel := "none" 1573 if len(paths) > 2 { 1574 options := paths[2] 1575 volopts := strings.Split(options, ",") 1576 for _, o := range volopts { 1577 switch { 1578 case o == "rw": 1579 readonly = false 1580 case o == "ro": 1581 readonly = true 1582 case strings.HasPrefix(o, "security_model="): 1583 securityModel = strings.Split(o, "=")[1] 1584 default: 1585 fmt.Printf("Unknown option: %s\n", o) 1586 } 1587 } 1588 } 1589 return readonly, securityModel 1590 }