github.com/jiasir/docker@v1.3.3-0.20170609024000-252e610103e7/daemon/cluster/executor/container/adapter.go (about) 1 package container 2 3 import ( 4 "encoding/base64" 5 "encoding/json" 6 "errors" 7 "fmt" 8 "io" 9 "os" 10 "strings" 11 "syscall" 12 "time" 13 14 "github.com/Sirupsen/logrus" 15 "github.com/docker/distribution/reference" 16 "github.com/docker/docker/api/types" 17 "github.com/docker/docker/api/types/backend" 18 containertypes "github.com/docker/docker/api/types/container" 19 "github.com/docker/docker/api/types/events" 20 containerpkg "github.com/docker/docker/container" 21 "github.com/docker/docker/daemon/cluster/convert" 22 executorpkg "github.com/docker/docker/daemon/cluster/executor" 23 "github.com/docker/libnetwork" 24 "github.com/docker/swarmkit/agent/exec" 25 "github.com/docker/swarmkit/api" 26 "github.com/docker/swarmkit/log" 27 gogotypes "github.com/gogo/protobuf/types" 28 "github.com/opencontainers/go-digest" 29 "golang.org/x/net/context" 30 "golang.org/x/time/rate" 31 ) 32 33 // containerAdapter conducts remote operations for a container. All calls 34 // are mostly naked calls to the client API, seeded with information from 35 // containerConfig. 36 type containerAdapter struct { 37 backend executorpkg.Backend 38 container *containerConfig 39 dependencies exec.DependencyGetter 40 } 41 42 func newContainerAdapter(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*containerAdapter, error) { 43 ctnr, err := newContainerConfig(task) 44 if err != nil { 45 return nil, err 46 } 47 48 return &containerAdapter{ 49 container: ctnr, 50 backend: b, 51 dependencies: dependencies, 52 }, nil 53 } 54 55 func (c *containerAdapter) pullImage(ctx context.Context) error { 56 spec := c.container.spec() 57 58 // Skip pulling if the image is referenced by image ID. 59 if _, err := digest.Parse(spec.Image); err == nil { 60 return nil 61 } 62 63 // Skip pulling if the image is referenced by digest and already 64 // exists locally. 65 named, err := reference.ParseNormalizedNamed(spec.Image) 66 if err == nil { 67 if _, ok := named.(reference.Canonical); ok { 68 _, err := c.backend.LookupImage(spec.Image) 69 if err == nil { 70 return nil 71 } 72 } 73 } 74 75 // if the image needs to be pulled, the auth config will be retrieved and updated 76 var encodedAuthConfig string 77 if spec.PullOptions != nil { 78 encodedAuthConfig = spec.PullOptions.RegistryAuth 79 } 80 81 authConfig := &types.AuthConfig{} 82 if encodedAuthConfig != "" { 83 if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { 84 logrus.Warnf("invalid authconfig: %v", err) 85 } 86 } 87 88 pr, pw := io.Pipe() 89 metaHeaders := map[string][]string{} 90 go func() { 91 err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw) 92 pw.CloseWithError(err) 93 }() 94 95 dec := json.NewDecoder(pr) 96 dec.UseNumber() 97 m := map[string]interface{}{} 98 spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1) 99 100 lastStatus := "" 101 for { 102 if err := dec.Decode(&m); err != nil { 103 if err == io.EOF { 104 break 105 } 106 return err 107 } 108 l := log.G(ctx) 109 // limit pull progress logs unless the status changes 110 if spamLimiter.Allow() || lastStatus != m["status"] { 111 // if we have progress details, we have everything we need 112 if progress, ok := m["progressDetail"].(map[string]interface{}); ok { 113 // first, log the image and status 114 l = l.WithFields(logrus.Fields{ 115 "image": c.container.image(), 116 "status": m["status"], 117 }) 118 // then, if we have progress, log the progress 119 if progress["current"] != nil && progress["total"] != nil { 120 l = l.WithFields(logrus.Fields{ 121 "current": progress["current"], 122 "total": progress["total"], 123 }) 124 } 125 } 126 l.Debug("pull in progress") 127 } 128 // sometimes, we get no useful information at all, and add no fields 129 if status, ok := m["status"].(string); ok { 130 lastStatus = status 131 } 132 } 133 134 // if the final stream object contained an error, return it 135 if errMsg, ok := m["error"]; ok { 136 return fmt.Errorf("%v", errMsg) 137 } 138 return nil 139 } 140 141 func (c *containerAdapter) createNetworks(ctx context.Context) error { 142 for _, network := range c.container.networks() { 143 ncr, err := c.container.networkCreateRequest(network) 144 if err != nil { 145 return err 146 } 147 148 if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing 149 if _, ok := err.(libnetwork.NetworkNameError); ok { 150 continue 151 } 152 153 return err 154 } 155 } 156 157 return nil 158 } 159 160 func (c *containerAdapter) removeNetworks(ctx context.Context) error { 161 for _, nid := range c.container.networks() { 162 if err := c.backend.DeleteManagedNetwork(nid); err != nil { 163 switch err.(type) { 164 case *libnetwork.ActiveEndpointsError: 165 continue 166 case libnetwork.ErrNoSuchNetwork: 167 continue 168 default: 169 log.G(ctx).Errorf("network %s remove failed: %v", nid, err) 170 return err 171 } 172 } 173 } 174 175 return nil 176 } 177 178 func (c *containerAdapter) networkAttach(ctx context.Context) error { 179 config := c.container.createNetworkingConfig(c.backend) 180 181 var ( 182 networkName string 183 networkID string 184 ) 185 186 if config != nil { 187 for n, epConfig := range config.EndpointsConfig { 188 networkName = n 189 networkID = epConfig.NetworkID 190 break 191 } 192 } 193 194 return c.backend.UpdateAttachment(networkName, networkID, c.container.id(), config) 195 } 196 197 func (c *containerAdapter) waitForDetach(ctx context.Context) error { 198 config := c.container.createNetworkingConfig(c.backend) 199 200 var ( 201 networkName string 202 networkID string 203 ) 204 205 if config != nil { 206 for n, epConfig := range config.EndpointsConfig { 207 networkName = n 208 networkID = epConfig.NetworkID 209 break 210 } 211 } 212 213 return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.id()) 214 } 215 216 func (c *containerAdapter) create(ctx context.Context) error { 217 var cr containertypes.ContainerCreateCreatedBody 218 var err error 219 if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ 220 Name: c.container.name(), 221 Config: c.container.config(), 222 HostConfig: c.container.hostConfig(), 223 // Use the first network in container create 224 NetworkingConfig: c.container.createNetworkingConfig(c.backend), 225 }); err != nil { 226 return err 227 } 228 229 // Docker daemon currently doesn't support multiple networks in container create 230 // Connect to all other networks 231 nc := c.container.connectNetworkingConfig(c.backend) 232 233 if nc != nil { 234 for n, ep := range nc.EndpointsConfig { 235 if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil { 236 return err 237 } 238 } 239 } 240 241 container := c.container.task.Spec.GetContainer() 242 if container == nil { 243 return errors.New("unable to get container from task spec") 244 } 245 246 if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil { 247 return err 248 } 249 250 // configure secrets 251 secretRefs := convert.SecretReferencesFromGRPC(container.Secrets) 252 if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil { 253 return err 254 } 255 256 configRefs := convert.ConfigReferencesFromGRPC(container.Configs) 257 if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil { 258 return err 259 } 260 261 if err := c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil { 262 return err 263 } 264 265 return nil 266 } 267 268 // checkMounts ensures that the provided mounts won't have any host-specific 269 // problems at start up. For example, we disallow bind mounts without an 270 // existing path, which slightly different from the container API. 271 func (c *containerAdapter) checkMounts() error { 272 spec := c.container.spec() 273 for _, mount := range spec.Mounts { 274 switch mount.Type { 275 case api.MountTypeBind: 276 if _, err := os.Stat(mount.Source); os.IsNotExist(err) { 277 return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source) 278 } 279 } 280 } 281 282 return nil 283 } 284 285 func (c *containerAdapter) start(ctx context.Context) error { 286 if err := c.checkMounts(); err != nil { 287 return err 288 } 289 290 return c.backend.ContainerStart(c.container.name(), nil, "", "") 291 } 292 293 func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { 294 cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) 295 if ctx.Err() != nil { 296 return types.ContainerJSON{}, ctx.Err() 297 } 298 if err != nil { 299 return types.ContainerJSON{}, err 300 } 301 return *cs, nil 302 } 303 304 // events issues a call to the events API and returns a channel with all 305 // events. The stream of events can be shutdown by cancelling the context. 306 func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { 307 log.G(ctx).Debugf("waiting on events") 308 buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter()) 309 eventsq := make(chan events.Message, len(buffer)) 310 311 for _, event := range buffer { 312 eventsq <- event 313 } 314 315 go func() { 316 defer c.backend.UnsubscribeFromEvents(l) 317 318 for { 319 select { 320 case ev := <-l: 321 jev, ok := ev.(events.Message) 322 if !ok { 323 log.G(ctx).Warnf("unexpected event message: %q", ev) 324 continue 325 } 326 select { 327 case eventsq <- jev: 328 case <-ctx.Done(): 329 return 330 } 331 case <-ctx.Done(): 332 return 333 } 334 } 335 }() 336 337 return eventsq 338 } 339 340 func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) { 341 return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning) 342 } 343 344 func (c *containerAdapter) shutdown(ctx context.Context) error { 345 // Default stop grace period to nil (daemon will use the stopTimeout of the container) 346 var stopgrace *int 347 spec := c.container.spec() 348 if spec.StopGracePeriod != nil { 349 stopgraceValue := int(spec.StopGracePeriod.Seconds) 350 stopgrace = &stopgraceValue 351 } 352 return c.backend.ContainerStop(c.container.name(), stopgrace) 353 } 354 355 func (c *containerAdapter) terminate(ctx context.Context) error { 356 return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL)) 357 } 358 359 func (c *containerAdapter) remove(ctx context.Context) error { 360 return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{ 361 RemoveVolume: true, 362 ForceRemove: true, 363 }) 364 } 365 366 func (c *containerAdapter) createVolumes(ctx context.Context) error { 367 // Create plugin volumes that are embedded inside a Mount 368 for _, mount := range c.container.task.Spec.GetContainer().Mounts { 369 if mount.Type != api.MountTypeVolume { 370 continue 371 } 372 373 if mount.VolumeOptions == nil { 374 continue 375 } 376 377 if mount.VolumeOptions.DriverConfig == nil { 378 continue 379 } 380 381 req := c.container.volumeCreateRequest(&mount) 382 383 // Check if this volume exists on the engine 384 if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil { 385 // TODO(amitshukla): Today, volume create through the engine api does not return an error 386 // when the named volume with the same parameters already exists. 387 // It returns an error if the driver name is different - that is a valid error 388 return err 389 } 390 391 } 392 393 return nil 394 } 395 396 func (c *containerAdapter) activateServiceBinding() error { 397 return c.backend.ActivateContainerServiceBinding(c.container.name()) 398 } 399 400 func (c *containerAdapter) deactivateServiceBinding() error { 401 return c.backend.DeactivateContainerServiceBinding(c.container.name()) 402 } 403 404 func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) { 405 apiOptions := &types.ContainerLogsOptions{ 406 Follow: options.Follow, 407 408 // Always say yes to Timestamps and Details. we make the decision 409 // of whether to return these to the user or not way higher up the 410 // stack. 411 Timestamps: true, 412 Details: true, 413 } 414 415 if options.Since != nil { 416 since, err := gogotypes.TimestampFromProto(options.Since) 417 if err != nil { 418 return nil, err 419 } 420 // print since as this formatted string because the docker container 421 // logs interface expects it like this. 422 // see github.com/docker/docker/api/types/time.ParseTimestamps 423 apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond())) 424 } 425 426 if options.Tail < 0 { 427 // See protobuf documentation for details of how this works. 428 apiOptions.Tail = fmt.Sprint(-options.Tail - 1) 429 } else if options.Tail > 0 { 430 return nil, errors.New("tail relative to start of logs not supported via docker API") 431 } 432 433 if len(options.Streams) == 0 { 434 // empty == all 435 apiOptions.ShowStdout, apiOptions.ShowStderr = true, true 436 } else { 437 for _, stream := range options.Streams { 438 switch stream { 439 case api.LogStreamStdout: 440 apiOptions.ShowStdout = true 441 case api.LogStreamStderr: 442 apiOptions.ShowStderr = true 443 } 444 } 445 } 446 msgs, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions) 447 if err != nil { 448 return nil, err 449 } 450 return msgs, nil 451 } 452 453 // todo: typed/wrapped errors 454 func isContainerCreateNameConflict(err error) bool { 455 return strings.Contains(err.Error(), "Conflict. The name") 456 } 457 458 func isUnknownContainer(err error) bool { 459 return strings.Contains(err.Error(), "No such container:") 460 } 461 462 func isStoppedContainer(err error) bool { 463 return strings.Contains(err.Error(), "is already stopped") 464 }