github.com/niedbalski/juju@v0.0.0-20190215020005-8ff100488e47/container/lxd/container.go (about) 1 // Copyright 2018 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package lxd 5 6 import ( 7 "fmt" 8 "math" 9 "strings" 10 11 "github.com/juju/errors" 12 "github.com/juju/juju/core/constraints" 13 "github.com/juju/juju/network" 14 "github.com/juju/utils/arch" 15 "github.com/lxc/lxd/shared" 16 "github.com/lxc/lxd/shared/api" 17 ) 18 19 const ( 20 UserNamespacePrefix = "user." 21 UserDataKey = UserNamespacePrefix + "user-data" 22 NetworkConfigKey = UserNamespacePrefix + "network-config" 23 JujuModelKey = UserNamespacePrefix + "juju-model" 24 AutoStartKey = "boot.autostart" 25 ) 26 27 // ContainerSpec represents the data required to create a new container. 28 type ContainerSpec struct { 29 Name string 30 Image SourcedImage 31 Devices map[string]device 32 Config map[string]string 33 Profiles []string 34 InstanceType string 35 } 36 37 // ApplyConstraints applies the input constraints as valid LXD container 38 // configuration to the container spec. 39 // Note that we pass these through as supplied. If an instance type constraint 40 // has been specified along with specific cores/mem constraints, 41 // LXD behaviour is to override with the specific ones even when lower. 42 func (c *ContainerSpec) ApplyConstraints(cons constraints.Value) { 43 if cons.HasInstanceType() { 44 c.InstanceType = *cons.InstanceType 45 } 46 if cons.HasCpuCores() { 47 c.Config["limits.cpu"] = fmt.Sprintf("%d", *cons.CpuCores) 48 } 49 if cons.HasMem() { 50 c.Config["limits.memory"] = fmt.Sprintf("%dMiB", *cons.Mem) 51 } 52 } 53 54 // Container extends the upstream LXD container type. 55 type Container struct { 56 api.Container 57 } 58 59 // Metadata returns the value from container config for the input key. 60 // Such values are stored with the "user" namespace prefix. 61 func (c *Container) Metadata(key string) string { 62 return c.Config[UserNamespacePrefix+key] 63 } 64 65 // Arch returns the architecture of the container. 66 func (c *Container) Arch() string { 67 return arch.NormaliseArch(c.Architecture) 68 } 69 70 // CPUs returns the configured limit for number of container CPU cores. 71 // If unset, zero is returned. 72 func (c *Container) CPUs() uint { 73 var cores uint 74 if v := c.Config["limits.cpu"]; v != "" { 75 _, err := fmt.Sscanf(v, "%d", &cores) 76 if err != nil { 77 logger.Errorf("failed to parse %q into uint, ignoring err: %s", v, err) 78 } 79 } 80 return cores 81 } 82 83 // Mem returns the configured limit for container memory in MiB. 84 func (c *Container) Mem() uint { 85 v := c.Config["limits.memory"] 86 if v == "" { 87 return 0 88 } 89 90 bytes, err := shared.ParseByteSizeString(v) 91 if err != nil { 92 logger.Errorf("failed to parse %q into bytes, ignoring err: %s", v, err) 93 return 0 94 } 95 96 const oneMiB = 1024 * 1024 97 mib := bytes / oneMiB 98 if mib > math.MaxUint32 { 99 logger.Errorf("byte string %q overflowed uint32, using max value", v) 100 return math.MaxUint32 101 } 102 103 return uint(mib) 104 } 105 106 // AddDisk modifies updates the container's devices map to represent a disk 107 // device described by the input arguments. 108 // If the device already exists, an error is returned. 109 func (c *Container) AddDisk(name, path, source, pool string, readOnly bool) error { 110 if _, ok := c.Devices[name]; ok { 111 return errors.Errorf("container %q already has a device %q", c.Name, name) 112 } 113 114 if c.Devices == nil { 115 c.Devices = map[string]device{} 116 } 117 c.Devices[name] = map[string]string{ 118 "path": path, 119 "source": source, 120 } 121 if pool != "" { 122 c.Devices[name]["pool"] = pool 123 } 124 if readOnly { 125 c.Devices[name]["readonly"] = "true" 126 } 127 return nil 128 } 129 130 // aliveStatuses is the list of status strings that indicate 131 // a container is "alive". 132 var aliveStatuses = []string{ 133 api.Starting.String(), 134 api.Started.String(), 135 api.Running.String(), 136 api.Stopping.String(), 137 api.Stopped.String(), 138 } 139 140 // AliveContainers returns the list of containers based on the input namespace 141 // prefixed that are in a status indicating they are "alive". 142 func (s *Server) AliveContainers(prefix string) ([]Container, error) { 143 c, err := s.FilterContainers(prefix, aliveStatuses...) 144 return c, errors.Trace(err) 145 } 146 147 // FilterContainers retrieves the list of containers from the server and filters 148 // them based on the input namespace prefix and any supplied statuses. 149 func (s *Server) FilterContainers(prefix string, statuses ...string) ([]Container, error) { 150 containers, err := s.GetContainers() 151 if err != nil { 152 return nil, errors.Trace(err) 153 } 154 155 var results []Container 156 for _, c := range containers { 157 if prefix != "" && !strings.HasPrefix(c.Name, prefix) { 158 continue 159 } 160 if len(statuses) > 0 && !containerHasStatus(c, statuses) { 161 continue 162 } 163 results = append(results, Container{c}) 164 } 165 return results, nil 166 } 167 168 // ContainerAddresses gets usable network addresses for the container 169 // identified by the input name. 170 func (s *Server) ContainerAddresses(name string) ([]network.Address, error) { 171 state, _, err := s.GetContainerState(name) 172 if err != nil { 173 return nil, errors.Trace(err) 174 } 175 176 networks := state.Network 177 if networks == nil { 178 return []network.Address{}, nil 179 } 180 181 var results []network.Address 182 for netName, net := range networks { 183 if netName == network.DefaultLXCBridge || netName == network.DefaultLXDBridge { 184 continue 185 } 186 for _, addr := range net.Addresses { 187 netAddr := network.NewAddress(addr.Address) 188 if netAddr.Scope == network.ScopeLinkLocal || netAddr.Scope == network.ScopeMachineLocal { 189 logger.Tracef("ignoring address %q for container %q", addr, name) 190 continue 191 } 192 results = append(results, netAddr) 193 } 194 } 195 return results, nil 196 } 197 198 // CreateContainerFromSpec creates a new container based on the input spec, 199 // and starts it immediately. 200 // If the container fails to be started, it is removed. 201 // Upon successful creation and start, the container is returned. 202 func (s *Server) CreateContainerFromSpec(spec ContainerSpec) (*Container, error) { 203 logger.Infof("starting new container %q (image %q)", spec.Name, spec.Image.Image.Filename) 204 logger.Debugf("new container has profiles %v", spec.Profiles) 205 req := api.ContainersPost{ 206 Name: spec.Name, 207 InstanceType: spec.InstanceType, 208 ContainerPut: api.ContainerPut{ 209 Profiles: spec.Profiles, 210 Devices: spec.Devices, 211 Config: spec.Config, 212 Ephemeral: false, 213 }, 214 } 215 op, err := s.CreateContainerFromImage(spec.Image.LXDServer, *spec.Image.Image, req) 216 if err != nil { 217 return nil, errors.Trace(err) 218 } 219 220 if err := op.Wait(); err != nil { 221 return nil, errors.Trace(err) 222 } 223 opInfo, err := op.GetTarget() 224 if err != nil { 225 return nil, errors.Trace(err) 226 } 227 if opInfo.StatusCode != api.Success { 228 return nil, fmt.Errorf("container creation failed: %s", opInfo.Err) 229 } 230 231 logger.Debugf("created container %q, waiting for start...", spec.Name) 232 233 if err := s.StartContainer(spec.Name); err != nil { 234 if remErr := s.RemoveContainer(spec.Name); remErr != nil { 235 logger.Errorf("failed to remove container after unsuccessful start: %s", remErr.Error()) 236 } 237 return nil, errors.Trace(err) 238 } 239 240 container, _, err := s.GetContainer(spec.Name) 241 if err != nil { 242 return nil, errors.Trace(err) 243 } 244 c := Container{*container} 245 return &c, nil 246 } 247 248 // StartContainer starts the extant container identified by the input name. 249 func (s *Server) StartContainer(name string) error { 250 req := api.ContainerStatePut{ 251 Action: "start", 252 Timeout: -1, 253 Force: false, 254 Stateful: false, 255 } 256 op, err := s.UpdateContainerState(name, req, "") 257 if err != nil { 258 return errors.Trace(err) 259 } 260 261 return errors.Trace(op.Wait()) 262 } 263 264 // Remove containers stops and deletes containers matching the input list of 265 // names. Any failed removals are indicated in the returned error. 266 func (s *Server) RemoveContainers(names []string) error { 267 if len(names) == 0 { 268 return nil 269 } 270 271 var failed []string 272 for _, name := range names { 273 if err := s.RemoveContainer(name); err != nil { 274 failed = append(failed, name) 275 logger.Errorf("removing container %q: %v", name, err) 276 } 277 } 278 if len(failed) != 0 { 279 return errors.Errorf("failed to remove containers: %s", strings.Join(failed, ", ")) 280 } 281 return nil 282 } 283 284 // Remove container first ensures that the container is stopped, 285 // then deletes it. 286 func (s *Server) RemoveContainer(name string) error { 287 state, eTag, err := s.GetContainerState(name) 288 if err != nil { 289 return errors.Trace(err) 290 } 291 292 if state.StatusCode != api.Stopped { 293 req := api.ContainerStatePut{ 294 Action: "stop", 295 Timeout: -1, 296 Force: true, 297 Stateful: false, 298 } 299 op, err := s.UpdateContainerState(name, req, eTag) 300 if err != nil { 301 return errors.Trace(err) 302 } 303 if err := op.Wait(); err != nil { 304 return errors.Trace(err) 305 } 306 } 307 308 op, err := s.DeleteContainer(name) 309 if err != nil { 310 return errors.Trace(err) 311 } 312 313 return errors.Trace(op.Wait()) 314 } 315 316 // WriteContainer writes the current representation of the input container to 317 // the server. 318 func (s *Server) WriteContainer(c *Container) error { 319 resp, err := s.UpdateContainer(c.Name, c.Writable(), "") 320 if err != nil { 321 return errors.Trace(err) 322 } 323 if err := resp.Wait(); err != nil { 324 return errors.Trace(err) 325 } 326 return nil 327 } 328 329 // containerHasStatus returns true if the input container has a status 330 // matching one from the input list. 331 func containerHasStatus(container api.Container, statuses []string) bool { 332 for _, status := range statuses { 333 if container.StatusCode.String() == status { 334 return true 335 } 336 } 337 return false 338 }