github.com/flavio/docker@v0.1.3-0.20170117145210-f63d1a6eec47/daemon/cluster/executor/container/adapter.go (about) 1 package container 2 3 import ( 4 "encoding/base64" 5 "encoding/json" 6 "errors" 7 "fmt" 8 "io" 9 "strings" 10 "syscall" 11 "time" 12 13 "github.com/Sirupsen/logrus" 14 "github.com/docker/docker/api/types" 15 "github.com/docker/docker/api/types/backend" 16 containertypes "github.com/docker/docker/api/types/container" 17 "github.com/docker/docker/api/types/events" 18 "github.com/docker/docker/daemon/cluster/convert" 19 executorpkg "github.com/docker/docker/daemon/cluster/executor" 20 "github.com/docker/docker/reference" 21 "github.com/docker/libnetwork" 22 "github.com/docker/swarmkit/agent/exec" 23 "github.com/docker/swarmkit/api" 24 "github.com/docker/swarmkit/log" 25 "github.com/docker/swarmkit/protobuf/ptypes" 26 "github.com/opencontainers/go-digest" 27 "golang.org/x/net/context" 28 "golang.org/x/time/rate" 29 ) 30 31 // containerAdapter conducts remote operations for a container. All calls 32 // are mostly naked calls to the client API, seeded with information from 33 // containerConfig. 34 type containerAdapter struct { 35 backend executorpkg.Backend 36 container *containerConfig 37 secrets exec.SecretGetter 38 } 39 40 func newContainerAdapter(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*containerAdapter, error) { 41 ctnr, err := newContainerConfig(task) 42 if err != nil { 43 return nil, err 44 } 45 46 return &containerAdapter{ 47 container: ctnr, 48 backend: b, 49 secrets: secrets, 50 }, nil 51 } 52 53 func (c *containerAdapter) pullImage(ctx context.Context) error { 54 spec := c.container.spec() 55 56 // Skip pulling if the image is referenced by image ID. 57 if _, err := digest.Parse(spec.Image); err == nil { 58 return nil 59 } 60 61 // Skip pulling if the image is referenced by digest and already 62 // exists locally. 63 named, err := reference.ParseNamed(spec.Image) 64 if err == nil { 65 if _, ok := named.(reference.Canonical); ok { 66 _, err := c.backend.LookupImage(spec.Image) 67 if err == nil { 68 return nil 69 } 70 } 71 } 72 73 // if the image needs to be pulled, the auth config will be retrieved and updated 74 var encodedAuthConfig string 75 if spec.PullOptions != nil { 76 encodedAuthConfig = spec.PullOptions.RegistryAuth 77 } 78 79 authConfig := &types.AuthConfig{} 80 if encodedAuthConfig != "" { 81 if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { 82 logrus.Warnf("invalid authconfig: %v", err) 83 } 84 } 85 86 pr, pw := io.Pipe() 87 metaHeaders := map[string][]string{} 88 go func() { 89 err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw) 90 pw.CloseWithError(err) 91 }() 92 93 dec := json.NewDecoder(pr) 94 dec.UseNumber() 95 m := map[string]interface{}{} 96 spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1) 97 98 lastStatus := "" 99 for { 100 if err := dec.Decode(&m); err != nil { 101 if err == io.EOF { 102 break 103 } 104 return err 105 } 106 l := log.G(ctx) 107 // limit pull progress logs unless the status changes 108 if spamLimiter.Allow() || lastStatus != m["status"] { 109 // if we have progress details, we have everything we need 110 if progress, ok := m["progressDetail"].(map[string]interface{}); ok { 111 // first, log the image and status 112 l = l.WithFields(logrus.Fields{ 113 "image": c.container.image(), 114 "status": m["status"], 115 }) 116 // then, if we have progress, log the progress 117 if progress["current"] != nil && progress["total"] != nil { 118 l = l.WithFields(logrus.Fields{ 119 "current": progress["current"], 120 "total": progress["total"], 121 }) 122 } 123 } 124 l.Debug("pull in progress") 125 } 126 // sometimes, we get no useful information at all, and add no fields 127 if status, ok := m["status"].(string); ok { 128 lastStatus = status 129 } 130 } 131 132 // if the final stream object contained an error, return it 133 if errMsg, ok := m["error"]; ok { 134 return fmt.Errorf("%v", errMsg) 135 } 136 return nil 137 } 138 139 func (c *containerAdapter) createNetworks(ctx context.Context) error { 140 for _, network := range c.container.networks() { 141 ncr, err := c.container.networkCreateRequest(network) 142 if err != nil { 143 return err 144 } 145 146 if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing 147 if _, ok := err.(libnetwork.NetworkNameError); ok { 148 continue 149 } 150 151 return err 152 } 153 } 154 155 return nil 156 } 157 158 func (c *containerAdapter) removeNetworks(ctx context.Context) error { 159 for _, nid := range c.container.networks() { 160 if err := c.backend.DeleteManagedNetwork(nid); err != nil { 161 switch err.(type) { 162 case *libnetwork.ActiveEndpointsError: 163 continue 164 case libnetwork.ErrNoSuchNetwork: 165 continue 166 default: 167 log.G(ctx).Errorf("network %s remove failed: %v", nid, err) 168 return err 169 } 170 } 171 } 172 173 return nil 174 } 175 176 func (c *containerAdapter) networkAttach(ctx context.Context) error { 177 config := c.container.createNetworkingConfig() 178 179 var ( 180 networkName string 181 networkID string 182 ) 183 184 if config != nil { 185 for n, epConfig := range config.EndpointsConfig { 186 networkName = n 187 networkID = epConfig.NetworkID 188 break 189 } 190 } 191 192 return c.backend.UpdateAttachment(networkName, networkID, c.container.id(), config) 193 } 194 195 func (c *containerAdapter) waitForDetach(ctx context.Context) error { 196 config := c.container.createNetworkingConfig() 197 198 var ( 199 networkName string 200 networkID string 201 ) 202 203 if config != nil { 204 for n, epConfig := range config.EndpointsConfig { 205 networkName = n 206 networkID = epConfig.NetworkID 207 break 208 } 209 } 210 211 return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.id()) 212 } 213 214 func (c *containerAdapter) create(ctx context.Context) error { 215 var cr containertypes.ContainerCreateCreatedBody 216 var err error 217 218 if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ 219 Name: c.container.name(), 220 Config: c.container.config(), 221 HostConfig: c.container.hostConfig(), 222 // Use the first network in container create 223 NetworkingConfig: c.container.createNetworkingConfig(), 224 }); err != nil { 225 return err 226 } 227 228 // Docker daemon currently doesn't support multiple networks in container create 229 // Connect to all other networks 230 nc := c.container.connectNetworkingConfig() 231 232 if nc != nil { 233 for n, ep := range nc.EndpointsConfig { 234 if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil { 235 return err 236 } 237 } 238 } 239 240 container := c.container.task.Spec.GetContainer() 241 if container == nil { 242 return errors.New("unable to get container from task spec") 243 } 244 245 // configure secrets 246 if err := c.backend.SetContainerSecretStore(cr.ID, c.secrets); err != nil { 247 return err 248 } 249 250 refs := convert.SecretReferencesFromGRPC(container.Secrets) 251 if err := c.backend.SetContainerSecretReferences(cr.ID, refs); err != nil { 252 return err 253 } 254 255 if err := c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil { 256 return err 257 } 258 259 return nil 260 } 261 262 func (c *containerAdapter) start(ctx context.Context) error { 263 return c.backend.ContainerStart(c.container.name(), nil, "", "") 264 } 265 266 func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { 267 cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) 268 if ctx.Err() != nil { 269 return types.ContainerJSON{}, ctx.Err() 270 } 271 if err != nil { 272 return types.ContainerJSON{}, err 273 } 274 return *cs, nil 275 } 276 277 // events issues a call to the events API and returns a channel with all 278 // events. The stream of events can be shutdown by cancelling the context. 279 func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { 280 log.G(ctx).Debugf("waiting on events") 281 buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter()) 282 eventsq := make(chan events.Message, len(buffer)) 283 284 for _, event := range buffer { 285 eventsq <- event 286 } 287 288 go func() { 289 defer c.backend.UnsubscribeFromEvents(l) 290 291 for { 292 select { 293 case ev := <-l: 294 jev, ok := ev.(events.Message) 295 if !ok { 296 log.G(ctx).Warnf("unexpected event message: %q", ev) 297 continue 298 } 299 select { 300 case eventsq <- jev: 301 case <-ctx.Done(): 302 return 303 } 304 case <-ctx.Done(): 305 return 306 } 307 } 308 }() 309 310 return eventsq 311 } 312 313 func (c *containerAdapter) wait(ctx context.Context) error { 314 return c.backend.ContainerWaitWithContext(ctx, c.container.nameOrID()) 315 } 316 317 func (c *containerAdapter) shutdown(ctx context.Context) error { 318 // Default stop grace period to nil (daemon will use the stopTimeout of the container) 319 var stopgrace *int 320 spec := c.container.spec() 321 if spec.StopGracePeriod != nil { 322 stopgraceValue := int(spec.StopGracePeriod.Seconds) 323 stopgrace = &stopgraceValue 324 } 325 return c.backend.ContainerStop(c.container.name(), stopgrace) 326 } 327 328 func (c *containerAdapter) terminate(ctx context.Context) error { 329 return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL)) 330 } 331 332 func (c *containerAdapter) remove(ctx context.Context) error { 333 return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{ 334 RemoveVolume: true, 335 ForceRemove: true, 336 }) 337 } 338 339 func (c *containerAdapter) createVolumes(ctx context.Context) error { 340 // Create plugin volumes that are embedded inside a Mount 341 for _, mount := range c.container.task.Spec.GetContainer().Mounts { 342 if mount.Type != api.MountTypeVolume { 343 continue 344 } 345 346 if mount.VolumeOptions == nil { 347 continue 348 } 349 350 if mount.VolumeOptions.DriverConfig == nil { 351 continue 352 } 353 354 req := c.container.volumeCreateRequest(&mount) 355 356 // Check if this volume exists on the engine 357 if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil { 358 // TODO(amitshukla): Today, volume create through the engine api does not return an error 359 // when the named volume with the same parameters already exists. 360 // It returns an error if the driver name is different - that is a valid error 361 return err 362 } 363 364 } 365 366 return nil 367 } 368 369 func (c *containerAdapter) activateServiceBinding() error { 370 return c.backend.ActivateContainerServiceBinding(c.container.name()) 371 } 372 373 func (c *containerAdapter) deactivateServiceBinding() error { 374 return c.backend.DeactivateContainerServiceBinding(c.container.name()) 375 } 376 377 func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (io.ReadCloser, error) { 378 reader, writer := io.Pipe() 379 380 apiOptions := &backend.ContainerLogsConfig{ 381 ContainerLogsOptions: types.ContainerLogsOptions{ 382 Follow: options.Follow, 383 384 // TODO(stevvooe): Parse timestamp out of message. This 385 // absolutely needs to be done before going to production with 386 // this, at it is completely redundant. 387 Timestamps: true, 388 Details: false, // no clue what to do with this, let's just deprecate it. 389 }, 390 OutStream: writer, 391 } 392 393 if options.Since != nil { 394 since, err := ptypes.Timestamp(options.Since) 395 if err != nil { 396 return nil, err 397 } 398 apiOptions.Since = since.Format(time.RFC3339Nano) 399 } 400 401 if options.Tail < 0 { 402 // See protobuf documentation for details of how this works. 403 apiOptions.Tail = fmt.Sprint(-options.Tail - 1) 404 } else if options.Tail > 0 { 405 return nil, errors.New("tail relative to start of logs not supported via docker API") 406 } 407 408 if len(options.Streams) == 0 { 409 // empty == all 410 apiOptions.ShowStdout, apiOptions.ShowStderr = true, true 411 } else { 412 for _, stream := range options.Streams { 413 switch stream { 414 case api.LogStreamStdout: 415 apiOptions.ShowStdout = true 416 case api.LogStreamStderr: 417 apiOptions.ShowStderr = true 418 } 419 } 420 } 421 422 chStarted := make(chan struct{}) 423 go func() { 424 defer writer.Close() 425 c.backend.ContainerLogs(ctx, c.container.name(), apiOptions, chStarted) 426 }() 427 428 return reader, nil 429 } 430 431 // todo: typed/wrapped errors 432 func isContainerCreateNameConflict(err error) bool { 433 return strings.Contains(err.Error(), "Conflict. The name") 434 } 435 436 func isUnknownContainer(err error) bool { 437 return strings.Contains(err.Error(), "No such container:") 438 } 439 440 func isStoppedContainer(err error) bool { 441 return strings.Contains(err.Error(), "is already stopped") 442 }