github.com/vieux/docker@v0.6.3-0.20161004191708-e097c2a938c7/daemon/cluster/executor/container/adapter.go (about) 1 package container 2 3 import ( 4 "encoding/base64" 5 "encoding/json" 6 "fmt" 7 "io" 8 "strings" 9 "syscall" 10 "time" 11 12 "github.com/Sirupsen/logrus" 13 "github.com/docker/docker/api/server/httputils" 14 "github.com/docker/docker/api/types" 15 "github.com/docker/docker/api/types/events" 16 "github.com/docker/docker/api/types/versions" 17 executorpkg "github.com/docker/docker/daemon/cluster/executor" 18 "github.com/docker/libnetwork" 19 "github.com/docker/swarmkit/api" 20 "github.com/docker/swarmkit/log" 21 "golang.org/x/net/context" 22 "golang.org/x/time/rate" 23 ) 24 25 // containerAdapter conducts remote operations for a container. All calls 26 // are mostly naked calls to the client API, seeded with information from 27 // containerConfig. 28 type containerAdapter struct { 29 backend executorpkg.Backend 30 container *containerConfig 31 } 32 33 func newContainerAdapter(b executorpkg.Backend, task *api.Task) (*containerAdapter, error) { 34 ctnr, err := newContainerConfig(task) 35 if err != nil { 36 return nil, err 37 } 38 39 return &containerAdapter{ 40 container: ctnr, 41 backend: b, 42 }, nil 43 } 44 45 func (c *containerAdapter) pullImage(ctx context.Context) error { 46 spec := c.container.spec() 47 48 // if the image needs to be pulled, the auth config will be retrieved and updated 49 var encodedAuthConfig string 50 if spec.PullOptions != nil { 51 encodedAuthConfig = spec.PullOptions.RegistryAuth 52 } 53 54 authConfig := &types.AuthConfig{} 55 if encodedAuthConfig != "" { 56 if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { 57 logrus.Warnf("invalid authconfig: %v", err) 58 } 59 } 60 61 pr, pw := io.Pipe() 62 metaHeaders := map[string][]string{} 63 go func() { 64 err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw) 65 pw.CloseWithError(err) 66 }() 67 68 dec := json.NewDecoder(pr) 69 dec.UseNumber() 70 m := map[string]interface{}{} 71 spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1) 72 73 lastStatus := "" 74 for { 75 if err := dec.Decode(&m); err != nil { 76 if err == io.EOF { 77 break 78 } 79 return err 80 } 81 l := log.G(ctx) 82 // limit pull progress logs unless the status changes 83 if spamLimiter.Allow() || lastStatus != m["status"] { 84 // if we have progress details, we have everything we need 85 if progress, ok := m["progressDetail"].(map[string]interface{}); ok { 86 // first, log the image and status 87 l = l.WithFields(logrus.Fields{ 88 "image": c.container.image(), 89 "status": m["status"], 90 }) 91 // then, if we have progress, log the progress 92 if progress["current"] != nil && progress["total"] != nil { 93 l = l.WithFields(logrus.Fields{ 94 "current": progress["current"], 95 "total": progress["total"], 96 }) 97 } 98 } 99 l.Debug("pull in progress") 100 } 101 // sometimes, we get no useful information at all, and add no fields 102 if status, ok := m["status"].(string); ok { 103 lastStatus = status 104 } 105 } 106 107 // if the final stream object contained an error, return it 108 if errMsg, ok := m["error"]; ok { 109 return fmt.Errorf("%v", errMsg) 110 } 111 return nil 112 } 113 114 func (c *containerAdapter) createNetworks(ctx context.Context) error { 115 for _, network := range c.container.networks() { 116 ncr, err := c.container.networkCreateRequest(network) 117 if err != nil { 118 return err 119 } 120 121 if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing 122 if _, ok := err.(libnetwork.NetworkNameError); ok { 123 continue 124 } 125 126 return err 127 } 128 } 129 130 return nil 131 } 132 133 func (c *containerAdapter) removeNetworks(ctx context.Context) error { 134 for _, nid := range c.container.networks() { 135 if err := c.backend.DeleteManagedNetwork(nid); err != nil { 136 if _, ok := err.(*libnetwork.ActiveEndpointsError); ok { 137 continue 138 } 139 log.G(ctx).Errorf("network %s remove failed: %v", nid, err) 140 return err 141 } 142 } 143 144 return nil 145 } 146 147 func (c *containerAdapter) networkAttach(ctx context.Context) error { 148 config := c.container.createNetworkingConfig() 149 150 var ( 151 networkName string 152 networkID string 153 ) 154 155 if config != nil { 156 for n, epConfig := range config.EndpointsConfig { 157 networkName = n 158 networkID = epConfig.NetworkID 159 break 160 } 161 } 162 163 return c.backend.UpdateAttachment(networkName, networkID, c.container.id(), config) 164 } 165 166 func (c *containerAdapter) waitForDetach(ctx context.Context) error { 167 config := c.container.createNetworkingConfig() 168 169 var ( 170 networkName string 171 networkID string 172 ) 173 174 if config != nil { 175 for n, epConfig := range config.EndpointsConfig { 176 networkName = n 177 networkID = epConfig.NetworkID 178 break 179 } 180 } 181 182 return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.id()) 183 } 184 185 func (c *containerAdapter) create(ctx context.Context) error { 186 var cr types.ContainerCreateResponse 187 var err error 188 version := httputils.VersionFromContext(ctx) 189 validateHostname := versions.GreaterThanOrEqualTo(version, "1.24") 190 191 if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ 192 Name: c.container.name(), 193 Config: c.container.config(), 194 HostConfig: c.container.hostConfig(), 195 // Use the first network in container create 196 NetworkingConfig: c.container.createNetworkingConfig(), 197 }, validateHostname); err != nil { 198 return err 199 } 200 201 // Docker daemon currently doesn't support multiple networks in container create 202 // Connect to all other networks 203 nc := c.container.connectNetworkingConfig() 204 205 if nc != nil { 206 for n, ep := range nc.EndpointsConfig { 207 if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil { 208 return err 209 } 210 } 211 } 212 213 if err := c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil { 214 return err 215 } 216 217 return nil 218 } 219 220 func (c *containerAdapter) start(ctx context.Context) error { 221 version := httputils.VersionFromContext(ctx) 222 validateHostname := versions.GreaterThanOrEqualTo(version, "1.24") 223 return c.backend.ContainerStart(c.container.name(), nil, validateHostname, "") 224 } 225 226 func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { 227 cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) 228 if ctx.Err() != nil { 229 return types.ContainerJSON{}, ctx.Err() 230 } 231 if err != nil { 232 return types.ContainerJSON{}, err 233 } 234 return *cs, nil 235 } 236 237 // events issues a call to the events API and returns a channel with all 238 // events. The stream of events can be shutdown by cancelling the context. 239 func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { 240 log.G(ctx).Debugf("waiting on events") 241 buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter()) 242 eventsq := make(chan events.Message, len(buffer)) 243 244 for _, event := range buffer { 245 eventsq <- event 246 } 247 248 go func() { 249 defer c.backend.UnsubscribeFromEvents(l) 250 251 for { 252 select { 253 case ev := <-l: 254 jev, ok := ev.(events.Message) 255 if !ok { 256 log.G(ctx).Warnf("unexpected event message: %q", ev) 257 continue 258 } 259 select { 260 case eventsq <- jev: 261 case <-ctx.Done(): 262 return 263 } 264 case <-ctx.Done(): 265 return 266 } 267 } 268 }() 269 270 return eventsq 271 } 272 273 func (c *containerAdapter) wait(ctx context.Context) error { 274 return c.backend.ContainerWaitWithContext(ctx, c.container.nameOrID()) 275 } 276 277 func (c *containerAdapter) shutdown(ctx context.Context) error { 278 // Default stop grace period to 10s. 279 stopgrace := 10 280 spec := c.container.spec() 281 if spec.StopGracePeriod != nil { 282 stopgrace = int(spec.StopGracePeriod.Seconds) 283 } 284 return c.backend.ContainerStop(c.container.name(), stopgrace) 285 } 286 287 func (c *containerAdapter) terminate(ctx context.Context) error { 288 return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL)) 289 } 290 291 func (c *containerAdapter) remove(ctx context.Context) error { 292 return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{ 293 RemoveVolume: true, 294 ForceRemove: true, 295 }) 296 } 297 298 func (c *containerAdapter) createVolumes(ctx context.Context) error { 299 // Create plugin volumes that are embedded inside a Mount 300 for _, mount := range c.container.task.Spec.GetContainer().Mounts { 301 if mount.Type != api.MountTypeVolume { 302 continue 303 } 304 305 if mount.VolumeOptions == nil { 306 continue 307 } 308 309 if mount.VolumeOptions.DriverConfig == nil { 310 continue 311 } 312 313 req := c.container.volumeCreateRequest(&mount) 314 315 // Check if this volume exists on the engine 316 if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil { 317 // TODO(amitshukla): Today, volume create through the engine api does not return an error 318 // when the named volume with the same parameters already exists. 319 // It returns an error if the driver name is different - that is a valid error 320 return err 321 } 322 323 } 324 325 return nil 326 } 327 328 // todo: typed/wrapped errors 329 func isContainerCreateNameConflict(err error) bool { 330 return strings.Contains(err.Error(), "Conflict. The name") 331 } 332 333 func isUnknownContainer(err error) bool { 334 return strings.Contains(err.Error(), "No such container:") 335 } 336 337 func isStoppedContainer(err error) bool { 338 return strings.Contains(err.Error(), "is already stopped") 339 }