github.com/koding/terraform@v0.6.4-0.20170608090606-5d7e0339779d/builtin/providers/docker/resource_docker_container_funcs.go (about) 1 package docker 2 3 import ( 4 "archive/tar" 5 "bytes" 6 "errors" 7 "fmt" 8 "strconv" 9 "time" 10 11 dc "github.com/fsouza/go-dockerclient" 12 "github.com/hashicorp/terraform/helper/schema" 13 ) 14 15 var ( 16 creationTime time.Time 17 ) 18 19 func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) error { 20 var err error 21 client := meta.(*dc.Client) 22 23 var data Data 24 if err := fetchLocalImages(&data, client); err != nil { 25 return err 26 } 27 28 image := d.Get("image").(string) 29 if _, ok := data.DockerImages[image]; !ok { 30 if _, ok := data.DockerImages[image+":latest"]; !ok { 31 return fmt.Errorf("Unable to find image %s", image) 32 } 33 image = image + ":latest" 34 } 35 36 // The awesome, wonderful, splendiferous, sensical 37 // Docker API now lets you specify a HostConfig in 38 // CreateContainerOptions, but in my testing it still only 39 // actually applies HostConfig options set in StartContainer. 40 // How cool is that? 41 createOpts := dc.CreateContainerOptions{ 42 Name: d.Get("name").(string), 43 Config: &dc.Config{ 44 Image: image, 45 Hostname: d.Get("hostname").(string), 46 Domainname: d.Get("domainname").(string), 47 }, 48 } 49 50 if v, ok := d.GetOk("env"); ok { 51 createOpts.Config.Env = stringSetToStringSlice(v.(*schema.Set)) 52 } 53 54 if v, ok := d.GetOk("command"); ok { 55 createOpts.Config.Cmd = stringListToStringSlice(v.([]interface{})) 56 for _, v := range createOpts.Config.Cmd { 57 if v == "" { 58 return fmt.Errorf("values for command may not be empty") 59 } 60 } 61 } 62 63 if v, ok := d.GetOk("entrypoint"); ok { 64 createOpts.Config.Entrypoint = stringListToStringSlice(v.([]interface{})) 65 } 66 67 if v, ok := d.GetOk("user"); ok { 68 createOpts.Config.User = v.(string) 69 } 70 71 exposedPorts := map[dc.Port]struct{}{} 72 portBindings := map[dc.Port][]dc.PortBinding{} 73 74 if v, ok := d.GetOk("ports"); ok { 75 exposedPorts, portBindings = portSetToDockerPorts(v.(*schema.Set)) 76 } 77 if len(exposedPorts) != 0 { 78 createOpts.Config.ExposedPorts = exposedPorts 79 } 80 81 extraHosts := []string{} 82 if v, ok := d.GetOk("host"); ok { 83 extraHosts = extraHostsSetToDockerExtraHosts(v.(*schema.Set)) 84 } 85 86 volumes := map[string]struct{}{} 87 binds := []string{} 88 volumesFrom := []string{} 89 90 if v, ok := d.GetOk("volumes"); ok { 91 volumes, binds, volumesFrom, err = volumeSetToDockerVolumes(v.(*schema.Set)) 92 if err != nil { 93 return fmt.Errorf("Unable to parse volumes: %s", err) 94 } 95 } 96 if len(volumes) != 0 { 97 createOpts.Config.Volumes = volumes 98 } 99 100 if v, ok := d.GetOk("labels"); ok { 101 createOpts.Config.Labels = mapTypeMapValsToString(v.(map[string]interface{})) 102 } 103 104 hostConfig := &dc.HostConfig{ 105 Privileged: d.Get("privileged").(bool), 106 PublishAllPorts: d.Get("publish_all_ports").(bool), 107 RestartPolicy: dc.RestartPolicy{ 108 Name: d.Get("restart").(string), 109 MaximumRetryCount: d.Get("max_retry_count").(int), 110 }, 111 LogConfig: dc.LogConfig{ 112 Type: d.Get("log_driver").(string), 113 }, 114 } 115 116 if len(portBindings) != 0 { 117 hostConfig.PortBindings = portBindings 118 } 119 if len(extraHosts) != 0 { 120 hostConfig.ExtraHosts = extraHosts 121 } 122 if len(binds) != 0 { 123 hostConfig.Binds = binds 124 } 125 if len(volumesFrom) != 0 { 126 hostConfig.VolumesFrom = volumesFrom 127 } 128 129 if v, ok := d.GetOk("capabilities"); ok { 130 for _, capInt := range v.(*schema.Set).List() { 131 capa := capInt.(map[string]interface{}) 132 hostConfig.CapAdd = stringSetToStringSlice(capa["add"].(*schema.Set)) 133 hostConfig.CapDrop = stringSetToStringSlice(capa["drop"].(*schema.Set)) 134 break 135 } 136 } 137 138 if v, ok := d.GetOk("dns"); ok { 139 hostConfig.DNS = stringSetToStringSlice(v.(*schema.Set)) 140 } 141 142 if v, ok := d.GetOk("dns_opts"); ok { 143 hostConfig.DNSOptions = stringSetToStringSlice(v.(*schema.Set)) 144 } 145 146 if v, ok := d.GetOk("dns_search"); ok { 147 hostConfig.DNSSearch = stringSetToStringSlice(v.(*schema.Set)) 148 } 149 150 if v, ok := d.GetOk("links"); ok { 151 hostConfig.Links = stringSetToStringSlice(v.(*schema.Set)) 152 } 153 154 if v, ok := d.GetOk("memory"); ok { 155 hostConfig.Memory = int64(v.(int)) * 1024 * 1024 156 } 157 158 if v, ok := d.GetOk("memory_swap"); ok { 159 swap := int64(v.(int)) 160 if swap > 0 { 161 swap = swap * 1024 * 1024 162 } 163 hostConfig.MemorySwap = swap 164 } 165 166 if v, ok := d.GetOk("cpu_shares"); ok { 167 hostConfig.CPUShares = int64(v.(int)) 168 } 169 170 if v, ok := d.GetOk("log_opts"); ok { 171 hostConfig.LogConfig.Config = mapTypeMapValsToString(v.(map[string]interface{})) 172 } 173 174 if v, ok := d.GetOk("network_mode"); ok { 175 hostConfig.NetworkMode = v.(string) 176 } 177 178 createOpts.HostConfig = hostConfig 179 180 var retContainer *dc.Container 181 if retContainer, err = client.CreateContainer(createOpts); err != nil { 182 return fmt.Errorf("Unable to create container: %s", err) 183 } 184 if retContainer == nil { 185 return fmt.Errorf("Returned container is nil") 186 } 187 188 d.SetId(retContainer.ID) 189 190 if v, ok := d.GetOk("networks"); ok { 191 var connectionOpts dc.NetworkConnectionOptions 192 if v, ok := d.GetOk("network_alias"); ok { 193 endpointConfig := &dc.EndpointConfig{} 194 endpointConfig.Aliases = stringSetToStringSlice(v.(*schema.Set)) 195 connectionOpts = dc.NetworkConnectionOptions{Container: retContainer.ID, EndpointConfig: endpointConfig} 196 } else { 197 connectionOpts = dc.NetworkConnectionOptions{Container: retContainer.ID} 198 } 199 200 for _, rawNetwork := range v.(*schema.Set).List() { 201 network := rawNetwork.(string) 202 if err := client.ConnectNetwork(network, connectionOpts); err != nil { 203 return fmt.Errorf("Unable to connect to network '%s': %s", network, err) 204 } 205 } 206 } 207 208 if v, ok := d.GetOk("upload"); ok { 209 for _, upload := range v.(*schema.Set).List() { 210 content := upload.(map[string]interface{})["content"].(string) 211 file := upload.(map[string]interface{})["file"].(string) 212 213 buf := new(bytes.Buffer) 214 tw := tar.NewWriter(buf) 215 hdr := &tar.Header{ 216 Name: file, 217 Mode: 0644, 218 Size: int64(len(content)), 219 } 220 if err := tw.WriteHeader(hdr); err != nil { 221 return fmt.Errorf("Error creating tar archive: %s", err) 222 } 223 if _, err := tw.Write([]byte(content)); err != nil { 224 return fmt.Errorf("Error creating tar archive: %s", err) 225 } 226 if err := tw.Close(); err != nil { 227 return fmt.Errorf("Error creating tar archive: %s", err) 228 } 229 230 uploadOpts := dc.UploadToContainerOptions{ 231 InputStream: bytes.NewReader(buf.Bytes()), 232 Path: "/", 233 } 234 235 if err := client.UploadToContainer(retContainer.ID, uploadOpts); err != nil { 236 return fmt.Errorf("Unable to upload volume content: %s", err) 237 } 238 } 239 } 240 241 creationTime = time.Now() 242 if err := client.StartContainer(retContainer.ID, nil); err != nil { 243 return fmt.Errorf("Unable to start container: %s", err) 244 } 245 246 return resourceDockerContainerRead(d, meta) 247 } 248 249 func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error { 250 client := meta.(*dc.Client) 251 252 apiContainer, err := fetchDockerContainer(d.Id(), client) 253 if err != nil { 254 return err 255 } 256 if apiContainer == nil { 257 // This container doesn't exist anymore 258 d.SetId("") 259 return nil 260 } 261 262 var container *dc.Container 263 264 loops := 1 // if it hasn't just been created, don't delay 265 if !creationTime.IsZero() { 266 loops = 30 // with 500ms spacing, 15 seconds; ought to be plenty 267 } 268 sleepTime := 500 * time.Millisecond 269 270 for i := loops; i > 0; i-- { 271 container, err = client.InspectContainer(apiContainer.ID) 272 if err != nil { 273 return fmt.Errorf("Error inspecting container %s: %s", apiContainer.ID, err) 274 } 275 276 if container.State.Running || 277 !container.State.Running && !d.Get("must_run").(bool) { 278 break 279 } 280 281 if creationTime.IsZero() { // We didn't just create it, so don't wait around 282 return resourceDockerContainerDelete(d, meta) 283 } 284 285 if container.State.FinishedAt.After(creationTime) { 286 // It exited immediately, so error out so dependent containers 287 // aren't started 288 resourceDockerContainerDelete(d, meta) 289 return fmt.Errorf("Container %s exited after creation, error was: %s", apiContainer.ID, container.State.Error) 290 } 291 292 time.Sleep(sleepTime) 293 } 294 295 // Handle the case of the for loop above running its course 296 if !container.State.Running && d.Get("must_run").(bool) { 297 resourceDockerContainerDelete(d, meta) 298 return fmt.Errorf("Container %s failed to be in running state", apiContainer.ID) 299 } 300 301 // Read Network Settings 302 if container.NetworkSettings != nil { 303 d.Set("ip_address", container.NetworkSettings.IPAddress) 304 d.Set("ip_prefix_length", container.NetworkSettings.IPPrefixLen) 305 d.Set("gateway", container.NetworkSettings.Gateway) 306 d.Set("bridge", container.NetworkSettings.Bridge) 307 } 308 309 return nil 310 } 311 312 func resourceDockerContainerUpdate(d *schema.ResourceData, meta interface{}) error { 313 return nil 314 } 315 316 func resourceDockerContainerDelete(d *schema.ResourceData, meta interface{}) error { 317 client := meta.(*dc.Client) 318 319 // Stop the container before removing if destroy_grace_seconds is defined 320 if d.Get("destroy_grace_seconds").(int) > 0 { 321 var timeout = uint(d.Get("destroy_grace_seconds").(int)) 322 if err := client.StopContainer(d.Id(), timeout); err != nil { 323 return fmt.Errorf("Error stopping container %s: %s", d.Id(), err) 324 } 325 } 326 327 removeOpts := dc.RemoveContainerOptions{ 328 ID: d.Id(), 329 RemoveVolumes: true, 330 Force: true, 331 } 332 333 if err := client.RemoveContainer(removeOpts); err != nil { 334 return fmt.Errorf("Error deleting container %s: %s", d.Id(), err) 335 } 336 337 d.SetId("") 338 return nil 339 } 340 341 func stringListToStringSlice(stringList []interface{}) []string { 342 ret := []string{} 343 for _, v := range stringList { 344 if v == nil { 345 ret = append(ret, "") 346 continue 347 } 348 ret = append(ret, v.(string)) 349 } 350 return ret 351 } 352 353 func stringSetToStringSlice(stringSet *schema.Set) []string { 354 ret := []string{} 355 if stringSet == nil { 356 return ret 357 } 358 for _, envVal := range stringSet.List() { 359 ret = append(ret, envVal.(string)) 360 } 361 return ret 362 } 363 364 func mapTypeMapValsToString(typeMap map[string]interface{}) map[string]string { 365 mapped := make(map[string]string, len(typeMap)) 366 for k, v := range typeMap { 367 mapped[k] = v.(string) 368 } 369 return mapped 370 } 371 372 func fetchDockerContainer(ID string, client *dc.Client) (*dc.APIContainers, error) { 373 apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true}) 374 375 if err != nil { 376 return nil, fmt.Errorf("Error fetching container information from Docker: %s\n", err) 377 } 378 379 for _, apiContainer := range apiContainers { 380 if apiContainer.ID == ID { 381 return &apiContainer, nil 382 } 383 } 384 385 return nil, nil 386 } 387 388 func portSetToDockerPorts(ports *schema.Set) (map[dc.Port]struct{}, map[dc.Port][]dc.PortBinding) { 389 retExposedPorts := map[dc.Port]struct{}{} 390 retPortBindings := map[dc.Port][]dc.PortBinding{} 391 392 for _, portInt := range ports.List() { 393 port := portInt.(map[string]interface{}) 394 internal := port["internal"].(int) 395 protocol := port["protocol"].(string) 396 397 exposedPort := dc.Port(strconv.Itoa(internal) + "/" + protocol) 398 retExposedPorts[exposedPort] = struct{}{} 399 400 external, extOk := port["external"].(int) 401 ip, ipOk := port["ip"].(string) 402 403 if extOk { 404 portBinding := dc.PortBinding{ 405 HostPort: strconv.Itoa(external), 406 } 407 if ipOk { 408 portBinding.HostIP = ip 409 } 410 retPortBindings[exposedPort] = append(retPortBindings[exposedPort], portBinding) 411 } 412 } 413 414 return retExposedPorts, retPortBindings 415 } 416 417 func extraHostsSetToDockerExtraHosts(extraHosts *schema.Set) []string { 418 retExtraHosts := []string{} 419 420 for _, hostInt := range extraHosts.List() { 421 host := hostInt.(map[string]interface{}) 422 ip := host["ip"].(string) 423 hostname := host["host"].(string) 424 retExtraHosts = append(retExtraHosts, hostname+":"+ip) 425 } 426 427 return retExtraHosts 428 } 429 430 func volumeSetToDockerVolumes(volumes *schema.Set) (map[string]struct{}, []string, []string, error) { 431 retVolumeMap := map[string]struct{}{} 432 retHostConfigBinds := []string{} 433 retVolumeFromContainers := []string{} 434 435 for _, volumeInt := range volumes.List() { 436 volume := volumeInt.(map[string]interface{}) 437 fromContainer := volume["from_container"].(string) 438 containerPath := volume["container_path"].(string) 439 volumeName := volume["volume_name"].(string) 440 if len(volumeName) == 0 { 441 volumeName = volume["host_path"].(string) 442 } 443 readOnly := volume["read_only"].(bool) 444 445 switch { 446 case len(fromContainer) == 0 && len(containerPath) == 0: 447 return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Volume entry without container path or source container") 448 case len(fromContainer) != 0 && len(containerPath) != 0: 449 return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Both a container and a path specified in a volume entry") 450 case len(fromContainer) != 0: 451 retVolumeFromContainers = append(retVolumeFromContainers, fromContainer) 452 case len(volumeName) != 0: 453 readWrite := "rw" 454 if readOnly { 455 readWrite = "ro" 456 } 457 retVolumeMap[containerPath] = struct{}{} 458 retHostConfigBinds = append(retHostConfigBinds, volumeName+":"+containerPath+":"+readWrite) 459 default: 460 retVolumeMap[containerPath] = struct{}{} 461 } 462 } 463 464 return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, nil 465 }