github.com/juju/juju@v0.0.0-20240430160146-1752b71fcf00/provider/lxd/environ_broker.go (about)

     1  // Copyright 2015 Canonical Ltd.
     2  // Licensed under the AGPLv3, see LICENCE file for details.
     3  
     4  package lxd
     5  
     6  import (
     7  	"fmt"
     8  	"strings"
     9  
    10  	"github.com/juju/collections/set"
    11  	"github.com/juju/errors"
    12  
    13  	"github.com/juju/juju/cloudconfig/cloudinit"
    14  	"github.com/juju/juju/cloudconfig/instancecfg"
    15  	"github.com/juju/juju/cloudconfig/providerinit"
    16  	"github.com/juju/juju/container/lxd"
    17  	"github.com/juju/juju/core/arch"
    18  	"github.com/juju/juju/core/instance"
    19  	"github.com/juju/juju/core/status"
    20  	"github.com/juju/juju/environs"
    21  	"github.com/juju/juju/environs/context"
    22  	"github.com/juju/juju/environs/instances"
    23  	"github.com/juju/juju/environs/simplestreams"
    24  	"github.com/juju/juju/environs/tags"
    25  	"github.com/juju/juju/provider/common"
    26  	"github.com/juju/juju/tools"
    27  )
    28  
    29  // StartInstance implements environs.InstanceBroker.
    30  func (env *environ) StartInstance(
    31  	ctx context.ProviderCallContext, args environs.StartInstanceParams,
    32  ) (*environs.StartInstanceResult, error) {
    33  	logger.Debugf("StartInstance: %q, %s", args.InstanceConfig.MachineId, args.InstanceConfig.Base)
    34  
    35  	arch, virtType, err := env.finishInstanceConfig(args)
    36  	if err != nil {
    37  		return nil, errors.Trace(err)
    38  	}
    39  
    40  	container, err := env.newContainer(ctx, args, arch, virtType)
    41  	if err != nil {
    42  		common.HandleCredentialError(IsAuthorisationFailure, err, ctx)
    43  		if args.StatusCallback != nil {
    44  			_ = args.StatusCallback(status.ProvisioningError, err.Error(), nil)
    45  		}
    46  		return nil, errors.Trace(err)
    47  	}
    48  	logger.Infof("started instance %q", container.Name)
    49  	inst := newInstance(container, env)
    50  
    51  	// Build the result.
    52  	hwc := env.getHardwareCharacteristics(args, inst)
    53  	result := environs.StartInstanceResult{
    54  		Instance: inst,
    55  		Hardware: hwc,
    56  	}
    57  	return &result, nil
    58  }
    59  
    60  func (env *environ) finishInstanceConfig(args environs.StartInstanceParams) (string, instance.VirtType, error) {
    61  	// Use the HostArch to determine the tools to use.
    62  	arch := env.server().HostArch()
    63  	tools, err := args.Tools.Match(tools.Filter{Arch: arch})
    64  	if err != nil {
    65  		return "", "", errors.Trace(err)
    66  	}
    67  	if err := args.InstanceConfig.SetTools(tools); err != nil {
    68  		return "", "", errors.Trace(err)
    69  	}
    70  
    71  	// Parse the virt-type from the constraints, so we can pass it to the
    72  	// findImage function.
    73  	virtType := instance.DefaultInstanceType
    74  	if args.Constraints.HasVirtType() {
    75  		if virtType, err = instance.ParseVirtType(*args.Constraints.VirtType); err != nil {
    76  			return "", "", errors.Trace(err)
    77  		}
    78  	}
    79  
    80  	if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config()); err != nil {
    81  		return "", "", errors.Trace(err)
    82  	}
    83  	return arch, virtType, nil
    84  }
    85  
    86  // newContainer is where the new physical instance is actually
    87  // provisioned, relative to the provided args and spec. Info for that
    88  // low-level instance is returned.
    89  func (env *environ) newContainer(
    90  	ctx context.ProviderCallContext,
    91  	args environs.StartInstanceParams,
    92  	arch string,
    93  	virtType instance.VirtType,
    94  ) (*lxd.Container, error) {
    95  	// Note: other providers have the ImageMetadata already read for them
    96  	// and passed in as args.ImageMetadata. However, lxd provider doesn't
    97  	// use datatype: image-ids, it uses datatype: image-download, and we
    98  	// don't have a registered cloud/region.
    99  	imageSources, err := env.getImageSources()
   100  	if err != nil {
   101  		return nil, errors.Trace(err)
   102  	}
   103  
   104  	// Keep track of StatusCallback output so we may clean up later.
   105  	// This is implemented here, close to where the StatusCallback calls
   106  	// are made, instead of at a higher level in the package, so as not to
   107  	// assume that all providers will have the same need to be implemented
   108  	// in the same way.
   109  	statusCallback := func(currentStatus status.Status, msg string, data map[string]interface{}) error {
   110  		if args.StatusCallback != nil {
   111  			_ = args.StatusCallback(currentStatus, msg, nil)
   112  		}
   113  		return nil
   114  	}
   115  	cleanupCallback := func() {
   116  		if args.CleanupCallback != nil {
   117  			_ = args.CleanupCallback()
   118  		}
   119  	}
   120  	defer cleanupCallback()
   121  
   122  	target, err := env.getTargetServer(ctx, args)
   123  	if err != nil {
   124  		return nil, errors.Trace(err)
   125  	}
   126  
   127  	image, err := target.FindImage(ctx, args.InstanceConfig.Base, arch, virtType, imageSources, true, statusCallback)
   128  	if err != nil {
   129  		return nil, errors.Trace(err)
   130  	}
   131  	cleanupCallback() // Clean out any long line of completed download status
   132  
   133  	cSpec, err := env.getContainerSpec(image, target.ServerVersion(), args)
   134  	if err != nil {
   135  		return nil, errors.Trace(err)
   136  	}
   137  
   138  	_ = statusCallback(status.Allocating, "Creating container", nil)
   139  	container, err := target.CreateContainerFromSpec(cSpec)
   140  	if err != nil {
   141  		return nil, errors.Trace(err)
   142  	}
   143  	_ = statusCallback(status.Running, "Container started", nil)
   144  	return container, nil
   145  }
   146  
   147  func (env *environ) getImageSources() ([]lxd.ServerSpec, error) {
   148  	// TODO (stickupkid): Allow the passing in of the factory.
   149  	factory := simplestreams.DefaultDataSourceFactory()
   150  	metadataSources, err := environs.ImageMetadataSources(env, factory)
   151  	if err != nil {
   152  		return nil, errors.Trace(err)
   153  	}
   154  	remotes := make([]lxd.ServerSpec, 0)
   155  	for _, source := range metadataSources {
   156  		url, err := source.URL("")
   157  		if err != nil {
   158  			logger.Debugf("failed to get the URL for metadataSource: %s", err)
   159  			continue
   160  		}
   161  		// NOTE(jam) LXD only allows you to pass HTTPS URLs. So strip
   162  		// off http:// and replace it with https://
   163  		// Arguably we could give the user a direct error if
   164  		// env.ImageMetadataURL is http instead of https, but we also
   165  		// get http from the DefaultImageSources, which is why we
   166  		// replace it.
   167  		// TODO(jam) Maybe we could add a Validate step that ensures
   168  		// image-metadata-url is an "https://" URL, so that Users get a
   169  		// "your configuration is wrong" error, rather than silently
   170  		// changing it and having them get confused.
   171  		// https://github.com/canonical/lxd/issues/1763
   172  		remotes = append(remotes, lxd.MakeSimpleStreamsServerSpec(source.Description(), url))
   173  	}
   174  	// Required for CentOS images.
   175  	remotes = append(remotes, lxd.CloudImagesLinuxContainersRemote)
   176  	return remotes, nil
   177  }
   178  
   179  // getContainerSpec builds a container spec from the input container image and
   180  // start-up parameters.
   181  // Cloud-init config is generated based on the network devices in the default
   182  // profile and included in the spec config.
   183  func (env *environ) getContainerSpec(
   184  	image lxd.SourcedImage, serverVersion string, args environs.StartInstanceParams,
   185  ) (lxd.ContainerSpec, error) {
   186  	hostname, err := env.namespace.Hostname(args.InstanceConfig.MachineId)
   187  	if err != nil {
   188  		return lxd.ContainerSpec{}, errors.Trace(err)
   189  	}
   190  	cSpec := lxd.ContainerSpec{
   191  		Name:     hostname,
   192  		Profiles: append([]string{"default", env.profileName()}, args.CharmLXDProfiles...),
   193  		Image:    image,
   194  		Config:   make(map[string]string),
   195  	}
   196  	cSpec.ApplyConstraints(serverVersion, args.Constraints)
   197  
   198  	cloudCfg, err := cloudinit.New(args.InstanceConfig.Base.OS)
   199  	if err != nil {
   200  		return cSpec, errors.Trace(err)
   201  	}
   202  
   203  	// Assemble the list of NICs that need to be added to the container.
   204  	// This includes all NICs from the default profile as well as any
   205  	// additional NICs required to satisfy any subnets that were requested
   206  	// due to space constraints.
   207  	//
   208  	// If additional non-eth0 NICs are to be added, we need to ensure that
   209  	// cloud-init correctly configures them.
   210  	nics, err := env.assignContainerNICs(args)
   211  	if err != nil {
   212  		return cSpec, errors.Trace(err)
   213  	}
   214  
   215  	if !(len(nics) == 1 && nics["eth0"] != nil) {
   216  		logger.Debugf("generating custom cloud-init networking")
   217  
   218  		cSpec.Config[lxd.NetworkConfigKey] = cloudinit.CloudInitNetworkConfigDisabled
   219  
   220  		info, err := lxd.InterfaceInfoFromDevices(nics)
   221  		if err != nil {
   222  			return cSpec, errors.Trace(err)
   223  		}
   224  		if err := cloudCfg.AddNetworkConfig(info); err != nil {
   225  			return cSpec, errors.Trace(err)
   226  		}
   227  
   228  		cSpec.Devices = nics
   229  	}
   230  
   231  	userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudCfg, lxdRenderer{})
   232  	if err != nil {
   233  		return cSpec, errors.Annotate(err, "composing user data")
   234  	}
   235  	logger.Debugf("LXD user data; %d bytes", len(userData))
   236  
   237  	// TODO(ericsnow) Looks like LXD does not handle gzipped userdata
   238  	// correctly.  It likely has to do with the HTTP transport, much
   239  	// as we have to b64encode the userdata for GCE.  Until that is
   240  	// resolved we simply pass the plain text.
   241  	//cfg[lxd.UserDataKey] = utils.Gzip(userData)
   242  	cSpec.Config[lxd.UserDataKey] = string(userData)
   243  
   244  	for k, v := range args.InstanceConfig.Tags {
   245  		if !strings.HasPrefix(k, tags.JujuTagPrefix) {
   246  			// Since some metadata is interpreted by LXD, we cannot allow
   247  			// arbitrary tags to be passed in by the user.
   248  			// We currently only pass through Juju-defined tags.
   249  			logger.Debugf("ignoring non-juju tag: %s=%s", k, v)
   250  			continue
   251  		}
   252  		cSpec.Config[lxd.UserNamespacePrefix+k] = v
   253  	}
   254  
   255  	return cSpec, nil
   256  }
   257  
   258  func (env *environ) assignContainerNICs(instStartParams environs.StartInstanceParams) (map[string]map[string]string, error) {
   259  	// First, include any nics explicitly requested by the default profile.
   260  	assignedNICs, err := env.server().GetNICsFromProfile("default")
   261  	if err != nil {
   262  		return nil, errors.Trace(err)
   263  	}
   264  
   265  	// No additional NICs required.
   266  	if len(instStartParams.SubnetsToZones) == 0 {
   267  		return assignedNICs, nil
   268  	}
   269  
   270  	if assignedNICs == nil {
   271  		assignedNICs = make(map[string]map[string]string)
   272  	}
   273  
   274  	// We use two sets to de-dup the required NICs and ensure that each
   275  	// additional NIC gets assigned a sequential ethX name.
   276  	requestedHostBridges := set.NewStrings()
   277  	requestedNICNames := set.NewStrings()
   278  	for nicName, details := range assignedNICs {
   279  		requestedNICNames.Add(nicName)
   280  		if len(details) != 0 {
   281  			requestedHostBridges.Add(details["parent"])
   282  		}
   283  	}
   284  
   285  	// Assign any extra NICs required to satisfy the subnet requirements
   286  	// for this instance.
   287  	var nextIndex int
   288  	for _, subnetList := range instStartParams.SubnetsToZones {
   289  		for providerSubnetID := range subnetList {
   290  			subnetID := string(providerSubnetID)
   291  
   292  			// Sanity check: make sure we are using the correct subnet
   293  			// naming conventions (subnet-$hostBridgeName-$CIDR).
   294  			if !strings.HasPrefix(subnetID, "subnet-") {
   295  				continue
   296  			}
   297  
   298  			// Let's be paranoid here and assume that the bridge
   299  			// name may also contain dashes. So trim the "subnet-"
   300  			// prefix and anything from the right-most dash to
   301  			// recover the bridge name.
   302  			subnetID = strings.TrimPrefix(subnetID, "subnet-")
   303  			lastDashIndex := strings.LastIndexByte(subnetID, '-')
   304  			if lastDashIndex == -1 {
   305  				continue
   306  			}
   307  			hostBridge := subnetID[:lastDashIndex]
   308  
   309  			// We have already requested a device on this subnet
   310  			if requestedHostBridges.Contains(hostBridge) {
   311  				continue
   312  			}
   313  
   314  			// Allocate a new device entry and ensure it doesn't
   315  			// clash with any existing ones
   316  			var devName string
   317  			for {
   318  				devName = fmt.Sprintf("eth%d", nextIndex)
   319  				if requestedNICNames.Contains(devName) {
   320  					nextIndex++
   321  					continue
   322  				}
   323  				break
   324  			}
   325  
   326  			assignedNICs[devName] = map[string]string{
   327  				"name":    devName,
   328  				"type":    "nic",
   329  				"nictype": "bridged",
   330  				"parent":  hostBridge,
   331  			}
   332  
   333  			requestedHostBridges.Add(hostBridge)
   334  			requestedNICNames.Add(devName)
   335  		}
   336  	}
   337  
   338  	return assignedNICs, nil
   339  }
   340  
   341  // getTargetServer checks to see if a valid zone was passed as a placement
   342  // directive in the start-up start-up arguments. If so, a server for the
   343  // specific node is returned.
   344  func (env *environ) getTargetServer(
   345  	ctx context.ProviderCallContext, args environs.StartInstanceParams,
   346  ) (Server, error) {
   347  	p, err := env.parsePlacement(ctx, args.Placement)
   348  	if err != nil {
   349  		return nil, errors.Trace(err)
   350  	}
   351  
   352  	if p.nodeName == "" {
   353  		return env.server(), nil
   354  	}
   355  	return env.server().UseTargetServer(p.nodeName)
   356  }
   357  
   358  type lxdPlacement struct {
   359  	nodeName string
   360  }
   361  
   362  func (env *environ) parsePlacement(ctx context.ProviderCallContext, placement string) (*lxdPlacement, error) {
   363  	if placement == "" {
   364  		return &lxdPlacement{}, nil
   365  	}
   366  
   367  	var node string
   368  	pos := strings.IndexRune(placement, '=')
   369  	// Assume that a plain string is a node name.
   370  	if pos == -1 {
   371  		node = placement
   372  	} else {
   373  		if placement[:pos] != "zone" {
   374  			return nil, fmt.Errorf("unknown placement directive: %v", placement)
   375  		}
   376  		node = placement[pos+1:]
   377  	}
   378  
   379  	if node == "" {
   380  		return &lxdPlacement{}, nil
   381  	}
   382  
   383  	zones, err := env.AvailabilityZones(ctx)
   384  	if err != nil {
   385  		return nil, errors.Trace(err)
   386  	}
   387  	if err := zones.Validate(node); err != nil {
   388  		return nil, errors.Trace(err)
   389  	}
   390  
   391  	return &lxdPlacement{nodeName: node}, nil
   392  }
   393  
   394  // getHardwareCharacteristics compiles hardware-related details about
   395  // the given instance and relative to the provided spec and returns it.
   396  func (env *environ) getHardwareCharacteristics(
   397  	args environs.StartInstanceParams, inst *environInstance,
   398  ) *instance.HardwareCharacteristics {
   399  	container := inst.container
   400  
   401  	archStr := container.Arch()
   402  	if archStr == "unknown" || !arch.IsSupportedArch(archStr) {
   403  		archStr = env.server().HostArch()
   404  	}
   405  	cores := uint64(container.CPUs())
   406  	mem := uint64(container.Mem())
   407  	return &instance.HardwareCharacteristics{
   408  		Arch:     &archStr,
   409  		CpuCores: &cores,
   410  		Mem:      &mem,
   411  		VirtType: &container.Type,
   412  	}
   413  }
   414  
   415  // AllInstances implements environs.InstanceBroker.
   416  func (env *environ) AllInstances(ctx context.ProviderCallContext) ([]instances.Instance, error) {
   417  	environInstances, err := env.allInstances()
   418  	instances := make([]instances.Instance, len(environInstances))
   419  	for i, inst := range environInstances {
   420  		if inst == nil {
   421  			continue
   422  		}
   423  		instances[i] = inst
   424  	}
   425  	return instances, errors.Trace(err)
   426  }
   427  
   428  // AllRunningInstances implements environs.InstanceBroker.
   429  func (env *environ) AllRunningInstances(ctx context.ProviderCallContext) ([]instances.Instance, error) {
   430  	// We can only get Alive containers from lxd api which means that "all" is the same as "running".
   431  	return env.AllInstances(ctx)
   432  }
   433  
   434  // StopInstances implements environs.InstanceBroker.
   435  func (env *environ) StopInstances(ctx context.ProviderCallContext, instances ...instance.Id) error {
   436  	prefix := env.namespace.Prefix()
   437  	var names []string
   438  	for _, id := range instances {
   439  		name := string(id)
   440  		if strings.HasPrefix(name, prefix) {
   441  			names = append(names, name)
   442  		} else {
   443  			logger.Warningf("ignoring request to stop container %q - not in namespace %q", name, prefix)
   444  		}
   445  	}
   446  
   447  	err := env.server().RemoveContainers(names)
   448  	if err != nil {
   449  		common.HandleCredentialError(IsAuthorisationFailure, err, ctx)
   450  	}
   451  	return errors.Trace(err)
   452  }