github.com/rhatdan/docker@v0.7.7-0.20180119204836-47a0dcbcd20a/daemon/cluster/executor/container/adapter.go (about) 1 package container 2 3 import ( 4 "encoding/base64" 5 "encoding/json" 6 "errors" 7 "fmt" 8 "io" 9 "os" 10 "runtime" 11 "strings" 12 "syscall" 13 "time" 14 15 "github.com/docker/distribution/reference" 16 "github.com/docker/docker/api/types" 17 "github.com/docker/docker/api/types/backend" 18 containertypes "github.com/docker/docker/api/types/container" 19 "github.com/docker/docker/api/types/events" 20 containerpkg "github.com/docker/docker/container" 21 "github.com/docker/docker/daemon/cluster/convert" 22 executorpkg "github.com/docker/docker/daemon/cluster/executor" 23 "github.com/docker/libnetwork" 24 "github.com/docker/swarmkit/agent/exec" 25 "github.com/docker/swarmkit/api" 26 "github.com/docker/swarmkit/log" 27 gogotypes "github.com/gogo/protobuf/types" 28 "github.com/opencontainers/go-digest" 29 "github.com/sirupsen/logrus" 30 "golang.org/x/net/context" 31 "golang.org/x/time/rate" 32 ) 33 34 // containerAdapter conducts remote operations for a container. All calls 35 // are mostly naked calls to the client API, seeded with information from 36 // containerConfig. 37 type containerAdapter struct { 38 backend executorpkg.Backend 39 container *containerConfig 40 dependencies exec.DependencyGetter 41 } 42 43 func newContainerAdapter(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) { 44 ctnr, err := newContainerConfig(task, node) 45 if err != nil { 46 return nil, err 47 } 48 49 return &containerAdapter{ 50 container: ctnr, 51 backend: b, 52 dependencies: dependencies, 53 }, nil 54 } 55 56 func (c *containerAdapter) pullImage(ctx context.Context) error { 57 spec := c.container.spec() 58 59 // Skip pulling if the image is referenced by image ID. 60 if _, err := digest.Parse(spec.Image); err == nil { 61 return nil 62 } 63 64 // Skip pulling if the image is referenced by digest and already 65 // exists locally. 66 named, err := reference.ParseNormalizedNamed(spec.Image) 67 if err == nil { 68 if _, ok := named.(reference.Canonical); ok { 69 _, err := c.backend.LookupImage(spec.Image) 70 if err == nil { 71 return nil 72 } 73 } 74 } 75 76 // if the image needs to be pulled, the auth config will be retrieved and updated 77 var encodedAuthConfig string 78 if spec.PullOptions != nil { 79 encodedAuthConfig = spec.PullOptions.RegistryAuth 80 } 81 82 authConfig := &types.AuthConfig{} 83 if encodedAuthConfig != "" { 84 if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { 85 logrus.Warnf("invalid authconfig: %v", err) 86 } 87 } 88 89 pr, pw := io.Pipe() 90 metaHeaders := map[string][]string{} 91 go func() { 92 // TODO @jhowardmsft LCOW Support: This will need revisiting as 93 // the stack is built up to include LCOW support for swarm. 94 platform := runtime.GOOS 95 err := c.backend.PullImage(ctx, c.container.image(), "", platform, metaHeaders, authConfig, pw) 96 pw.CloseWithError(err) 97 }() 98 99 dec := json.NewDecoder(pr) 100 dec.UseNumber() 101 m := map[string]interface{}{} 102 spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1) 103 104 lastStatus := "" 105 for { 106 if err := dec.Decode(&m); err != nil { 107 if err == io.EOF { 108 break 109 } 110 return err 111 } 112 l := log.G(ctx) 113 // limit pull progress logs unless the status changes 114 if spamLimiter.Allow() || lastStatus != m["status"] { 115 // if we have progress details, we have everything we need 116 if progress, ok := m["progressDetail"].(map[string]interface{}); ok { 117 // first, log the image and status 118 l = l.WithFields(logrus.Fields{ 119 "image": c.container.image(), 120 "status": m["status"], 121 }) 122 // then, if we have progress, log the progress 123 if progress["current"] != nil && progress["total"] != nil { 124 l = l.WithFields(logrus.Fields{ 125 "current": progress["current"], 126 "total": progress["total"], 127 }) 128 } 129 } 130 l.Debug("pull in progress") 131 } 132 // sometimes, we get no useful information at all, and add no fields 133 if status, ok := m["status"].(string); ok { 134 lastStatus = status 135 } 136 } 137 138 // if the final stream object contained an error, return it 139 if errMsg, ok := m["error"]; ok { 140 return fmt.Errorf("%v", errMsg) 141 } 142 return nil 143 } 144 145 func (c *containerAdapter) createNetworks(ctx context.Context) error { 146 for name := range c.container.networksAttachments { 147 ncr, err := c.container.networkCreateRequest(name) 148 if err != nil { 149 return err 150 } 151 152 if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing 153 if _, ok := err.(libnetwork.NetworkNameError); ok { 154 continue 155 } 156 157 return err 158 } 159 } 160 161 return nil 162 } 163 164 func (c *containerAdapter) removeNetworks(ctx context.Context) error { 165 for name, v := range c.container.networksAttachments { 166 if err := c.backend.DeleteManagedNetwork(v.Network.ID); err != nil { 167 switch err.(type) { 168 case *libnetwork.ActiveEndpointsError: 169 continue 170 case libnetwork.ErrNoSuchNetwork: 171 continue 172 default: 173 log.G(ctx).Errorf("network %s remove failed: %v", name, err) 174 return err 175 } 176 } 177 } 178 179 return nil 180 } 181 182 func (c *containerAdapter) networkAttach(ctx context.Context) error { 183 config := c.container.createNetworkingConfig(c.backend) 184 185 var ( 186 networkName string 187 networkID string 188 ) 189 190 if config != nil { 191 for n, epConfig := range config.EndpointsConfig { 192 networkName = n 193 networkID = epConfig.NetworkID 194 break 195 } 196 } 197 198 return c.backend.UpdateAttachment(networkName, networkID, c.container.networkAttachmentContainerID(), config) 199 } 200 201 func (c *containerAdapter) waitForDetach(ctx context.Context) error { 202 config := c.container.createNetworkingConfig(c.backend) 203 204 var ( 205 networkName string 206 networkID string 207 ) 208 209 if config != nil { 210 for n, epConfig := range config.EndpointsConfig { 211 networkName = n 212 networkID = epConfig.NetworkID 213 break 214 } 215 } 216 217 return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.networkAttachmentContainerID()) 218 } 219 220 func (c *containerAdapter) create(ctx context.Context) error { 221 var cr containertypes.ContainerCreateCreatedBody 222 var err error 223 if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ 224 Name: c.container.name(), 225 Config: c.container.config(), 226 HostConfig: c.container.hostConfig(), 227 // Use the first network in container create 228 NetworkingConfig: c.container.createNetworkingConfig(c.backend), 229 }); err != nil { 230 return err 231 } 232 233 // Docker daemon currently doesn't support multiple networks in container create 234 // Connect to all other networks 235 nc := c.container.connectNetworkingConfig(c.backend) 236 237 if nc != nil { 238 for n, ep := range nc.EndpointsConfig { 239 if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil { 240 return err 241 } 242 } 243 } 244 245 container := c.container.task.Spec.GetContainer() 246 if container == nil { 247 return errors.New("unable to get container from task spec") 248 } 249 250 if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil { 251 return err 252 } 253 254 // configure secrets 255 secretRefs := convert.SecretReferencesFromGRPC(container.Secrets) 256 if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil { 257 return err 258 } 259 260 configRefs := convert.ConfigReferencesFromGRPC(container.Configs) 261 if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil { 262 return err 263 } 264 265 return c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()) 266 } 267 268 // checkMounts ensures that the provided mounts won't have any host-specific 269 // problems at start up. For example, we disallow bind mounts without an 270 // existing path, which slightly different from the container API. 271 func (c *containerAdapter) checkMounts() error { 272 spec := c.container.spec() 273 for _, mount := range spec.Mounts { 274 switch mount.Type { 275 case api.MountTypeBind: 276 if _, err := os.Stat(mount.Source); os.IsNotExist(err) { 277 return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source) 278 } 279 } 280 } 281 282 return nil 283 } 284 285 func (c *containerAdapter) start(ctx context.Context) error { 286 if err := c.checkMounts(); err != nil { 287 return err 288 } 289 290 return c.backend.ContainerStart(c.container.name(), nil, "", "") 291 } 292 293 func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { 294 cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) 295 if ctx.Err() != nil { 296 return types.ContainerJSON{}, ctx.Err() 297 } 298 if err != nil { 299 return types.ContainerJSON{}, err 300 } 301 return *cs, nil 302 } 303 304 // events issues a call to the events API and returns a channel with all 305 // events. The stream of events can be shutdown by cancelling the context. 306 func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { 307 log.G(ctx).Debugf("waiting on events") 308 buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter()) 309 eventsq := make(chan events.Message, len(buffer)) 310 311 for _, event := range buffer { 312 eventsq <- event 313 } 314 315 go func() { 316 defer c.backend.UnsubscribeFromEvents(l) 317 318 for { 319 select { 320 case ev := <-l: 321 jev, ok := ev.(events.Message) 322 if !ok { 323 log.G(ctx).Warnf("unexpected event message: %q", ev) 324 continue 325 } 326 select { 327 case eventsq <- jev: 328 case <-ctx.Done(): 329 return 330 } 331 case <-ctx.Done(): 332 return 333 } 334 } 335 }() 336 337 return eventsq 338 } 339 340 func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) { 341 return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning) 342 } 343 344 func (c *containerAdapter) shutdown(ctx context.Context) error { 345 // Default stop grace period to nil (daemon will use the stopTimeout of the container) 346 var stopgrace *int 347 spec := c.container.spec() 348 if spec.StopGracePeriod != nil { 349 stopgraceValue := int(spec.StopGracePeriod.Seconds) 350 stopgrace = &stopgraceValue 351 } 352 return c.backend.ContainerStop(c.container.name(), stopgrace) 353 } 354 355 func (c *containerAdapter) terminate(ctx context.Context) error { 356 return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL)) 357 } 358 359 func (c *containerAdapter) remove(ctx context.Context) error { 360 return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{ 361 RemoveVolume: true, 362 ForceRemove: true, 363 }) 364 } 365 366 func (c *containerAdapter) createVolumes(ctx context.Context) error { 367 // Create plugin volumes that are embedded inside a Mount 368 for _, mount := range c.container.task.Spec.GetContainer().Mounts { 369 if mount.Type != api.MountTypeVolume { 370 continue 371 } 372 373 if mount.VolumeOptions == nil { 374 continue 375 } 376 377 if mount.VolumeOptions.DriverConfig == nil { 378 continue 379 } 380 381 req := c.container.volumeCreateRequest(&mount) 382 383 // Check if this volume exists on the engine 384 if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil { 385 // TODO(amitshukla): Today, volume create through the engine api does not return an error 386 // when the named volume with the same parameters already exists. 387 // It returns an error if the driver name is different - that is a valid error 388 return err 389 } 390 391 } 392 393 return nil 394 } 395 396 func (c *containerAdapter) activateServiceBinding() error { 397 return c.backend.ActivateContainerServiceBinding(c.container.name()) 398 } 399 400 func (c *containerAdapter) deactivateServiceBinding() error { 401 return c.backend.DeactivateContainerServiceBinding(c.container.name()) 402 } 403 404 func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) { 405 apiOptions := &types.ContainerLogsOptions{ 406 Follow: options.Follow, 407 408 // Always say yes to Timestamps and Details. we make the decision 409 // of whether to return these to the user or not way higher up the 410 // stack. 411 Timestamps: true, 412 Details: true, 413 } 414 415 if options.Since != nil { 416 since, err := gogotypes.TimestampFromProto(options.Since) 417 if err != nil { 418 return nil, err 419 } 420 // print since as this formatted string because the docker container 421 // logs interface expects it like this. 422 // see github.com/docker/docker/api/types/time.ParseTimestamps 423 apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond())) 424 } 425 426 if options.Tail < 0 { 427 // See protobuf documentation for details of how this works. 428 apiOptions.Tail = fmt.Sprint(-options.Tail - 1) 429 } else if options.Tail > 0 { 430 return nil, errors.New("tail relative to start of logs not supported via docker API") 431 } 432 433 if len(options.Streams) == 0 { 434 // empty == all 435 apiOptions.ShowStdout, apiOptions.ShowStderr = true, true 436 } else { 437 for _, stream := range options.Streams { 438 switch stream { 439 case api.LogStreamStdout: 440 apiOptions.ShowStdout = true 441 case api.LogStreamStderr: 442 apiOptions.ShowStderr = true 443 } 444 } 445 } 446 msgs, _, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions) 447 if err != nil { 448 return nil, err 449 } 450 return msgs, nil 451 } 452 453 // todo: typed/wrapped errors 454 func isContainerCreateNameConflict(err error) bool { 455 return strings.Contains(err.Error(), "Conflict. The name") 456 } 457 458 func isUnknownContainer(err error) bool { 459 return strings.Contains(err.Error(), "No such container:") 460 } 461 462 func isStoppedContainer(err error) bool { 463 return strings.Contains(err.Error(), "is already stopped") 464 }