github.com/niedbalski/juju@v0.0.0-20190215020005-8ff100488e47/container/kvm/wrappedcmds.go (about) 1 // Copyright 2013-2016 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package kvm 5 6 // This file contains wrappers around the following executables: 7 // genisoimage 8 // qemu-img 9 // virsh 10 // Those executables are found in the following packages: 11 // genisoimage 12 // libvirt-bin 13 // qemu-utils 14 // 15 // These executables provide Juju's interface to dealing with kvm containers. 16 // They are the means by which we start, stop and list running containers on 17 // the host 18 19 import ( 20 "encoding/xml" 21 "fmt" 22 "os" 23 "path/filepath" 24 "regexp" 25 "strings" 26 27 "github.com/juju/errors" 28 "github.com/juju/os/series" 29 "github.com/juju/utils" 30 "github.com/juju/utils/arch" 31 "gopkg.in/yaml.v2" 32 33 "github.com/juju/juju/container/kvm/libvirt" 34 "github.com/juju/juju/juju/paths" 35 ) 36 37 const ( 38 guestDir = "guests" 39 poolName = "juju-pool" 40 kvm = "kvm" 41 metadata = "meta-data" 42 userdata = "user-data" 43 networkconfig = "network-config" 44 45 // This path is only valid on ubuntu, and xenial at this point. 46 // TODO(ro) 2017-01-20 Determine if we will support trusty and update this 47 // as necessary if so. It seems it will require some serious acrobatics to 48 // get trusty to work properly and that may be out of scope for juju. 49 nvramCode = "/usr/share/AAVMF/AAVMF_CODE.fd" 50 ) 51 52 var ( 53 // The regular expression for breaking up the results of 'virsh list' 54 // (?m) - specify that this is a multi-line regex 55 // first part is the opaque identifier we don't care about 56 // then the hostname, and lastly the status. 57 machineListPattern = regexp.MustCompile(`(?m)^\s+\d+\s+(?P<hostname>[-\w]+)\s+(?P<status>.+)\s*$`) 58 ) 59 60 // CreateMachineParams Implements libvirt.domainParams. 61 type CreateMachineParams struct { 62 Hostname string 63 Series string 64 UserDataFile string 65 NetworkConfigData string 66 NetworkBridge string 67 Memory uint64 68 CpuCores uint64 69 RootDisk uint64 70 Interfaces []libvirt.InterfaceInfo 71 72 disks []libvirt.DiskInfo 73 findPath func(string) (string, error) 74 75 runCmd runFunc 76 runCmdAsRoot runFunc 77 arch string 78 } 79 80 // Arch returns the architecture to be used. 81 func (p CreateMachineParams) Arch() string { 82 if p.arch != "" { 83 return p.arch 84 } 85 return arch.HostArch() 86 } 87 88 // Loader is the path to the binary firmware blob used in UEFI booting. At the 89 // time of this writing only ARM64 requires this to run. 90 func (p CreateMachineParams) Loader() string { 91 return nvramCode 92 } 93 94 // Host implements libvirt.domainParams. 95 func (p CreateMachineParams) Host() string { 96 return p.Hostname 97 } 98 99 // CPUs implements libvirt.domainParams. 100 func (p CreateMachineParams) CPUs() uint64 { 101 if p.CpuCores == 0 { 102 return 1 103 } 104 return p.CpuCores 105 } 106 107 // DiskInfo implements libvirt.domainParams. 108 func (p CreateMachineParams) DiskInfo() []libvirt.DiskInfo { 109 return p.disks 110 } 111 112 // RAM implements libvirt.domainParams. 113 func (p CreateMachineParams) RAM() uint64 { 114 if p.Memory == 0 { 115 return 512 116 } 117 return p.Memory 118 } 119 120 // NetworkInfo implements libvirt.domainParams. 121 func (p CreateMachineParams) NetworkInfo() []libvirt.InterfaceInfo { 122 return p.Interfaces 123 } 124 125 // ValidateDomainParams implements libvirt.domainParams. 126 func (p CreateMachineParams) ValidateDomainParams() error { 127 if p.Hostname == "" { 128 return errors.Errorf("missing required hostname") 129 } 130 if len(p.disks) < 2 { 131 // We need at least the drive and the data source disk. 132 return errors.Errorf("got %d disks, need at least 2", len(p.disks)) 133 } 134 var ds, fs bool 135 for _, d := range p.disks { 136 if d.Driver() == "qcow2" { 137 fs = true 138 } 139 if d.Driver() == "raw" { 140 ds = true 141 } 142 } 143 if !ds { 144 return errors.Trace(errors.Errorf("missing data source disk")) 145 } 146 if !fs { 147 return errors.Trace(errors.Errorf("missing system disk")) 148 } 149 return nil 150 } 151 152 // diskInfo is type for imlementing libvirt.DiskInfo. 153 type diskInfo struct { 154 driver, source string 155 } 156 157 // Driver implements libvirt.DiskInfo. 158 func (d diskInfo) Driver() string { 159 return d.driver 160 } 161 162 // Source implements libvirt.Source. 163 func (d diskInfo) Source() string { 164 return d.source 165 } 166 167 // CreateMachine creates a virtual machine and starts it. 168 func CreateMachine(params CreateMachineParams) error { 169 if params.Hostname == "" { 170 return fmt.Errorf("hostname is required") 171 } 172 173 setDefaults(¶ms) 174 175 templateDir := filepath.Dir(params.UserDataFile) 176 177 err := writeMetadata(templateDir) 178 if err != nil { 179 return errors.Annotate(err, "failed to write instance metadata") 180 } 181 182 dsPath, err := writeDatasourceVolume(params) 183 if err != nil { 184 return errors.Annotatef(err, "failed to write data source volume for %q", params.Host()) 185 } 186 187 imgPath, err := writeRootDisk(params) 188 if err != nil { 189 return errors.Annotatef(err, "failed to write root volume for %q", params.Host()) 190 } 191 192 params.disks = append(params.disks, diskInfo{source: imgPath, driver: "qcow2"}) 193 params.disks = append(params.disks, diskInfo{source: dsPath, driver: "raw"}) 194 195 domainPath, err := writeDomainXML(templateDir, params) 196 if err != nil { 197 return errors.Annotatef(err, "failed to write domain xml for %q", params.Host()) 198 } 199 200 out, err := params.runCmdAsRoot("virsh", "define", domainPath) 201 if err != nil { 202 return errors.Annotatef(err, "failed to defined the domain for %q from %s", params.Host(), domainPath) 203 } 204 logger.Debugf("created domain: %s", out) 205 206 out, err = params.runCmdAsRoot("virsh", "start", params.Host()) 207 if err != nil { 208 return errors.Annotatef(err, "failed to start domain %q", params.Host()) 209 } 210 logger.Debugf("started domain: %s", out) 211 212 return err 213 } 214 215 // Setup the default values for params. 216 func setDefaults(p *CreateMachineParams) { 217 if p.findPath == nil { 218 p.findPath = paths.DataDir 219 } 220 if p.runCmd == nil { 221 p.runCmd = runAsLibvirt 222 } 223 if p.runCmdAsRoot == nil { 224 p.runCmdAsRoot = run 225 } 226 } 227 228 // DestroyMachine destroys the virtual machine represented by the kvmContainer. 229 func DestroyMachine(c *kvmContainer) error { 230 if c.runCmd == nil { 231 c.runCmd = run 232 } 233 if c.pathfinder == nil { 234 c.pathfinder = paths.DataDir 235 } 236 237 // We don't return errors for virsh commands because it is possible that we 238 // didn't succeed in creating the domain. Additionally, we want all the 239 // commands to run. If any fail it is certainly because the thing we're 240 // trying to remove wasn't created. However, we still want to try removing 241 // all the parts. The exception here is getting the guestBase, if that 242 // fails we return the error because we cannot continue without it. 243 244 _, err := c.runCmd("virsh", "destroy", c.Name()) 245 if err != nil { 246 logger.Infof("`virsh destroy %s` failed: %q", c.Name(), err) 247 } 248 249 // The nvram flag here removes the pflash drive for us. There is also a 250 // `remove-all-storage` flag, but it is unclear if that would also remove 251 // the backing store which we don't want to do. So we remove those manually 252 // after undefining. 253 _, err = c.runCmd("virsh", "undefine", "--nvram", c.Name()) 254 if err != nil { 255 logger.Infof("`virsh undefine --nvram %s` failed: %q", c.Name(), err) 256 } 257 guestBase, err := guestPath(c.pathfinder) 258 if err != nil { 259 return errors.Trace(err) 260 } 261 err = os.Remove(filepath.Join(guestBase, fmt.Sprintf("%s.qcow", c.Name()))) 262 if err != nil { 263 logger.Errorf("failed to remove system disk for %q: %s", c.Name(), err) 264 } 265 err = os.Remove(filepath.Join(guestBase, fmt.Sprintf("%s-ds.iso", c.Name()))) 266 if err != nil { 267 logger.Errorf("failed to remove cloud-init data disk for %q: %s", c.Name(), err) 268 } 269 270 return nil 271 } 272 273 // AutostartMachine indicates that the virtual machines should automatically 274 // restart when the host restarts. 275 func AutostartMachine(c *kvmContainer) error { 276 if c.runCmd == nil { 277 c.runCmd = run 278 } 279 _, err := c.runCmd("virsh", "autostart", c.Name()) 280 return errors.Annotatef(err, "failed to autostart domain %q", c.Name()) 281 } 282 283 // ListMachines returns a map of machine name to state, where state is one of: 284 // running, idle, paused, shutdown, shut off, crashed, dying, pmsuspended. 285 func ListMachines(runCmd runFunc) (map[string]string, error) { 286 if runCmd == nil { 287 runCmd = run 288 } 289 290 output, err := runCmd("virsh", "-q", "list", "--all") 291 if err != nil { 292 return nil, err 293 } 294 // Split the output into lines. 295 // Regex matching is the easiest way to match the lines. 296 // id hostname status 297 // separated by whitespace, with whitespace at the start too. 298 result := make(map[string]string) 299 for _, s := range machineListPattern.FindAllStringSubmatchIndex(output, -1) { 300 hostnameAndStatus := machineListPattern.ExpandString(nil, "$hostname $status", output, s) 301 parts := strings.SplitN(string(hostnameAndStatus), " ", 2) 302 result[parts[0]] = parts[1] 303 } 304 return result, nil 305 } 306 307 // guestPath returns the path to the guest directory from the given 308 // pathfinder. 309 func guestPath(pathfinder func(string) (string, error)) (string, error) { 310 baseDir, err := pathfinder(series.MustHostSeries()) 311 if err != nil { 312 return "", errors.Trace(err) 313 } 314 return filepath.Join(baseDir, kvm, guestDir), nil 315 } 316 317 // writeDataSourceVolume creates a data source image for cloud init. 318 func writeDatasourceVolume(params CreateMachineParams) (string, error) { 319 templateDir := filepath.Dir(params.UserDataFile) 320 321 if err := writeMetadata(templateDir); err != nil { 322 return "", errors.Trace(err) 323 } 324 325 if err := writeNetworkConfig(params, templateDir); err != nil { 326 return "", errors.Trace(err) 327 } 328 329 // Creating a working DS volume was a bit troublesome for me. I finally 330 // found the details in the docs. 331 // http://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html 332 // 333 // The arguments passed to create the DS volume for NoCloud must be 334 // `user-data` and `meta-data`. So the `cloud-init` file we generate won't 335 // work. Also, they must be exactly `user-data` and `meta-data` with no 336 // path beforehand, so `$JUJUDIR/containers/juju-someid-0/user-data` also 337 // fails. 338 // 339 // Furthermore, symlinks aren't followed by NoCloud. So we rename our 340 // cloud-init file to user-data. We could change the output name in 341 // juju/cloudconfig/containerinit/container_userdata.go:WriteUserData but 342 // who knows what that will break. 343 userDataPath := filepath.Join(templateDir, userdata) 344 if err := os.Rename(params.UserDataFile, userDataPath); err != nil { 345 return "", errors.Trace(err) 346 } 347 348 // We then change directories and capture our original directory to return 349 // to. This allows us to run the command with user-data and meta-data as 350 // relative paths to appease the NoCloud script. 351 owd, err := os.Getwd() 352 if err != nil { 353 return "", errors.Trace(err) 354 } 355 if err = os.Chdir(templateDir); err != nil { 356 return "", errors.Trace(err) 357 } 358 359 // Create data the source volume outputting the iso image to the guests 360 // (AKA libvirt storage pool) directory. 361 guestBase, err := guestPath(params.findPath) 362 if err != nil { 363 return "", errors.Trace(err) 364 } 365 dsPath := filepath.Join(guestBase, fmt.Sprintf("%s-ds.iso", params.Host())) 366 367 out, err := params.runCmd( 368 "genisoimage", 369 "-output", dsPath, 370 "-volid", "cidata", 371 "-joliet", "-rock", 372 userdata, 373 metadata, 374 networkconfig) 375 if err != nil { 376 return "", errors.Trace(err) 377 } 378 logger.Debugf("create ds image: %s", out) 379 380 // Here we return to the old working directory. 381 if err := os.Chdir(owd); err != nil { 382 return "", errors.Trace(err) 383 } 384 385 return dsPath, nil 386 } 387 388 // writeDomainXML writes out the configuration required to create a new guest 389 // domain. 390 func writeDomainXML(templateDir string, p CreateMachineParams) (string, error) { 391 domainPath := filepath.Join(templateDir, fmt.Sprintf("%s.xml", p.Host())) 392 dom, err := libvirt.NewDomain(p) 393 if err != nil { 394 return "", errors.Trace(err) 395 } 396 397 ml, err := xml.MarshalIndent(&dom, "", " ") 398 if err != nil { 399 return "", errors.Trace(err) 400 } 401 402 f, err := os.Create(domainPath) 403 if err != nil { 404 return "", errors.Trace(err) 405 } 406 defer func() { 407 err = f.Close() 408 if err != nil { 409 logger.Debugf("failed defer %q", errors.Trace(err)) 410 } 411 }() 412 413 _, err = f.Write(ml) 414 if err != nil { 415 return "", errors.Trace(err) 416 } 417 418 return domainPath, nil 419 } 420 421 // writeMetadata writes out a metadata file with an UUID instance-id. The 422 // meta-data file is used in the data source image along with user-data nee 423 // cloud-init. `instance-id` is a required field in meta-data. It is what is 424 // used to determine if this is the first boot, thereby whether or not to run 425 // cloud-init. 426 // See: http://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html 427 func writeMetadata(dir string) error { 428 data := fmt.Sprintf(`{"instance-id": "%s"}`, utils.MustNewUUID()) 429 f, err := os.Create(filepath.Join(dir, metadata)) 430 if err != nil { 431 return errors.Trace(err) 432 } 433 defer func() { 434 if err = f.Close(); err != nil { 435 logger.Errorf("failed to close %q %s", f.Name(), err) 436 } 437 }() 438 _, err = f.WriteString(data) 439 if err != nil { 440 return errors.Trace(err) 441 } 442 return nil 443 } 444 445 func writeNetworkConfig(params CreateMachineParams, dir string) error { 446 f, err := os.Create(filepath.Join(dir, networkconfig)) 447 if err != nil { 448 return errors.Trace(err) 449 } 450 defer func() { 451 if err = f.Close(); err != nil { 452 logger.Errorf("failed to close %q %s", f.Name(), err) 453 } 454 }() 455 _, err = f.WriteString(params.NetworkConfigData) 456 if err != nil { 457 return errors.Trace(err) 458 } 459 return nil 460 } 461 462 // writeRootDisk writes out the root disk for the container. This creates a 463 // system disk backed by our shared series/arch backing store. 464 func writeRootDisk(params CreateMachineParams) (string, error) { 465 guestBase, err := guestPath(params.findPath) 466 if err != nil { 467 return "", errors.Trace(err) 468 } 469 imgPath := filepath.Join(guestBase, fmt.Sprintf("%s.qcow", params.Host())) 470 backingPath := filepath.Join( 471 guestBase, 472 backingFileName(params.Series, params.Arch())) 473 474 out, err := params.runCmd( 475 "qemu-img", 476 "create", 477 "-b", backingPath, 478 "-f", "qcow2", 479 imgPath, 480 fmt.Sprintf("%dG", params.RootDisk)) 481 logger.Debugf("create root image: %s", out) 482 if err != nil { 483 return "", errors.Trace(err) 484 } 485 486 return imgPath, nil 487 } 488 489 // pool info parses and returns the output of `virsh pool-info <poolname>`. 490 func poolInfo(runCmd runFunc) (*libvirtPool, error) { 491 output, err := runCmd("virsh", "pool-info", poolName) 492 if err != nil { 493 logger.Debugf("pool %q doesn't appear to exist: %s", poolName, err) 494 return nil, nil 495 } 496 497 p := &libvirtPool{} 498 err = yaml.Unmarshal([]byte(output), p) 499 if err != nil { 500 logger.Errorf("failed to unmarshal info %s", err) 501 return nil, errors.Trace(err) 502 } 503 return p, nil 504 } 505 506 // libvirtPool represents the guest pool information we care about. Additional 507 // fields are available but ignored here. 508 type libvirtPool struct { 509 Name string `yaml:"Name"` 510 State string `yaml:"State"` 511 Autostart string `yaml:"Autostart"` 512 }