github.com/openshift/moby-moby@v1.13.2-0.20170601211448-f5ec1e2936dc/daemon/volumes.go (about) 1 package daemon 2 3 import ( 4 "errors" 5 "fmt" 6 "os" 7 "path/filepath" 8 "reflect" 9 "strings" 10 11 "github.com/Sirupsen/logrus" 12 dockererrors "github.com/docker/docker/api/errors" 13 "github.com/docker/docker/api/types" 14 containertypes "github.com/docker/docker/api/types/container" 15 mounttypes "github.com/docker/docker/api/types/mount" 16 "github.com/docker/docker/container" 17 "github.com/docker/docker/volume" 18 "github.com/docker/docker/volume/drivers" 19 ) 20 21 var ( 22 // ErrVolumeReadonly is used to signal an error when trying to copy data into 23 // a volume mount that is not writable. 24 ErrVolumeReadonly = errors.New("mounted volume is marked read-only") 25 ) 26 27 type mounts []container.Mount 28 29 // volumeToAPIType converts a volume.Volume to the type used by the Engine API 30 func volumeToAPIType(v volume.Volume) *types.Volume { 31 tv := &types.Volume{ 32 Name: v.Name(), 33 Driver: v.DriverName(), 34 } 35 if v, ok := v.(volume.DetailedVolume); ok { 36 tv.Labels = v.Labels() 37 tv.Options = v.Options() 38 tv.Scope = v.Scope() 39 } 40 41 return tv 42 } 43 44 // Len returns the number of mounts. Used in sorting. 45 func (m mounts) Len() int { 46 return len(m) 47 } 48 49 // Less returns true if the number of parts (a/b/c would be 3 parts) in the 50 // mount indexed by parameter 1 is less than that of the mount indexed by 51 // parameter 2. Used in sorting. 52 func (m mounts) Less(i, j int) bool { 53 return m.parts(i) < m.parts(j) 54 } 55 56 // Swap swaps two items in an array of mounts. Used in sorting 57 func (m mounts) Swap(i, j int) { 58 m[i], m[j] = m[j], m[i] 59 } 60 61 // parts returns the number of parts in the destination of a mount. Used in sorting. 62 func (m mounts) parts(i int) int { 63 return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) 64 } 65 66 // registerMountPoints initializes the container mount points with the configured volumes and bind mounts. 67 // It follows the next sequence to decide what to mount in each final destination: 68 // 69 // 1. Select the previously configured mount points for the containers, if any. 70 // 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. 71 // 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. 72 // 4. Cleanup old volumes that are about to be reassigned. 73 func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) { 74 binds := map[string]bool{} 75 mountPoints := map[string]*volume.MountPoint{} 76 defer func() { 77 // clean up the container mountpoints once return with error 78 if retErr != nil { 79 for _, m := range mountPoints { 80 if m.Volume == nil { 81 continue 82 } 83 daemon.volumes.Dereference(m.Volume, container.ID) 84 } 85 } 86 }() 87 88 dereferenceIfExists := func(destination string) { 89 if v, ok := mountPoints[destination]; ok { 90 logrus.Debugf("Duplicate mount point '%s'", destination) 91 if v.Volume != nil { 92 daemon.volumes.Dereference(v.Volume, container.ID) 93 } 94 } 95 } 96 97 // 1. Read already configured mount points. 98 for destination, point := range container.MountPoints { 99 mountPoints[destination] = point 100 } 101 102 // 2. Read volumes from other containers. 103 for _, v := range hostConfig.VolumesFrom { 104 containerID, mode, err := volume.ParseVolumesFrom(v) 105 if err != nil { 106 return err 107 } 108 109 c, err := daemon.GetContainer(containerID) 110 if err != nil { 111 return err 112 } 113 114 for _, m := range c.MountPoints { 115 cp := &volume.MountPoint{ 116 Type: m.Type, 117 Name: m.Name, 118 Source: m.Source, 119 RW: m.RW && volume.ReadWrite(mode), 120 Driver: m.Driver, 121 Destination: m.Destination, 122 Propagation: m.Propagation, 123 Spec: m.Spec, 124 CopyData: false, 125 } 126 127 if len(cp.Source) == 0 { 128 v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) 129 if err != nil { 130 return err 131 } 132 cp.Volume = v 133 } 134 dereferenceIfExists(cp.Destination) 135 mountPoints[cp.Destination] = cp 136 } 137 } 138 139 // 3. Read bind mounts 140 for _, b := range hostConfig.Binds { 141 bind, err := volume.ParseMountRaw(b, hostConfig.VolumeDriver) 142 if err != nil { 143 return err 144 } 145 146 // #10618 147 _, tmpfsExists := hostConfig.Tmpfs[bind.Destination] 148 if binds[bind.Destination] || tmpfsExists { 149 return fmt.Errorf("Duplicate mount point '%s'", bind.Destination) 150 } 151 152 if bind.Type == mounttypes.TypeVolume { 153 // create the volume 154 v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil) 155 if err != nil { 156 return err 157 } 158 bind.Volume = v 159 bind.Source = v.Path() 160 // bind.Name is an already existing volume, we need to use that here 161 bind.Driver = v.DriverName() 162 if bind.Driver == volume.DefaultDriverName { 163 setBindModeIfNull(bind) 164 } 165 } 166 167 binds[bind.Destination] = true 168 dereferenceIfExists(bind.Destination) 169 mountPoints[bind.Destination] = bind 170 } 171 172 for _, cfg := range hostConfig.Mounts { 173 mp, err := volume.ParseMountSpec(cfg) 174 if err != nil { 175 return dockererrors.NewBadRequestError(err) 176 } 177 178 if binds[mp.Destination] { 179 return fmt.Errorf("Duplicate mount point '%s'", cfg.Target) 180 } 181 182 if mp.Type == mounttypes.TypeVolume { 183 var v volume.Volume 184 if cfg.VolumeOptions != nil { 185 var driverOpts map[string]string 186 if cfg.VolumeOptions.DriverConfig != nil { 187 driverOpts = cfg.VolumeOptions.DriverConfig.Options 188 } 189 v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, driverOpts, cfg.VolumeOptions.Labels) 190 } else { 191 v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, nil, nil) 192 } 193 if err != nil { 194 return err 195 } 196 197 mp.Volume = v 198 mp.Name = v.Name() 199 mp.Driver = v.DriverName() 200 201 // only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow 202 if cv, ok := v.(interface { 203 CachedPath() string 204 }); ok { 205 mp.Source = cv.CachedPath() 206 } 207 } 208 209 binds[mp.Destination] = true 210 dereferenceIfExists(mp.Destination) 211 mountPoints[mp.Destination] = mp 212 } 213 214 container.Lock() 215 216 // 4. Cleanup old volumes that are about to be reassigned. 217 for _, m := range mountPoints { 218 if m.BackwardsCompatible() { 219 if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { 220 daemon.volumes.Dereference(mp.Volume, container.ID) 221 } 222 } 223 } 224 container.MountPoints = mountPoints 225 226 container.Unlock() 227 228 return nil 229 } 230 231 // lazyInitializeVolume initializes a mountpoint's volume if needed. 232 // This happens after a daemon restart. 233 func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { 234 if len(m.Driver) > 0 && m.Volume == nil { 235 v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) 236 if err != nil { 237 return err 238 } 239 m.Volume = v 240 } 241 return nil 242 } 243 244 // backportMountSpec resolves mount specs (introduced in 1.13) from pre-1.13 245 // mount configurations 246 // The container lock should not be held when calling this function. 247 // Changes are only made in-memory and may make changes to containers referenced 248 // by `container.HostConfig.VolumesFrom` 249 func (daemon *Daemon) backportMountSpec(container *container.Container) { 250 container.Lock() 251 defer container.Unlock() 252 253 maybeUpdate := make(map[string]bool) 254 for _, mp := range container.MountPoints { 255 if mp.Spec.Source != "" && mp.Type != "" { 256 continue 257 } 258 maybeUpdate[mp.Destination] = true 259 } 260 if len(maybeUpdate) == 0 { 261 return 262 } 263 264 mountSpecs := make(map[string]bool, len(container.HostConfig.Mounts)) 265 for _, m := range container.HostConfig.Mounts { 266 mountSpecs[m.Target] = true 267 } 268 269 binds := make(map[string]*volume.MountPoint, len(container.HostConfig.Binds)) 270 for _, rawSpec := range container.HostConfig.Binds { 271 mp, err := volume.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver) 272 if err != nil { 273 logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport") 274 continue 275 } 276 binds[mp.Destination] = mp 277 } 278 279 volumesFrom := make(map[string]volume.MountPoint) 280 for _, fromSpec := range container.HostConfig.VolumesFrom { 281 from, _, err := volume.ParseVolumesFrom(fromSpec) 282 if err != nil { 283 logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport") 284 continue 285 } 286 fromC, err := daemon.GetContainer(from) 287 if err != nil { 288 logrus.WithError(err).WithField("from-container", from).Error("Error looking up volumes-from container") 289 continue 290 } 291 292 // make sure from container's specs have been backported 293 daemon.backportMountSpec(fromC) 294 295 fromC.Lock() 296 for t, mp := range fromC.MountPoints { 297 volumesFrom[t] = *mp 298 } 299 fromC.Unlock() 300 } 301 302 needsUpdate := func(containerMount, other *volume.MountPoint) bool { 303 if containerMount.Type != other.Type || !reflect.DeepEqual(containerMount.Spec, other.Spec) { 304 return true 305 } 306 return false 307 } 308 309 // main 310 for _, cm := range container.MountPoints { 311 if !maybeUpdate[cm.Destination] { 312 continue 313 } 314 // nothing to backport if from hostconfig.Mounts 315 if mountSpecs[cm.Destination] { 316 continue 317 } 318 319 if mp, exists := binds[cm.Destination]; exists { 320 if needsUpdate(cm, mp) { 321 cm.Spec = mp.Spec 322 cm.Type = mp.Type 323 } 324 continue 325 } 326 327 if cm.Name != "" { 328 if mp, exists := volumesFrom[cm.Destination]; exists { 329 if needsUpdate(cm, &mp) { 330 cm.Spec = mp.Spec 331 cm.Type = mp.Type 332 } 333 continue 334 } 335 336 if cm.Type != "" { 337 // probably specified via the hostconfig.Mounts 338 continue 339 } 340 341 // anon volume 342 cm.Type = mounttypes.TypeVolume 343 cm.Spec.Type = mounttypes.TypeVolume 344 } else { 345 if cm.Type != "" { 346 // already updated 347 continue 348 } 349 350 cm.Type = mounttypes.TypeBind 351 cm.Spec.Type = mounttypes.TypeBind 352 cm.Spec.Source = cm.Source 353 if cm.Propagation != "" { 354 cm.Spec.BindOptions = &mounttypes.BindOptions{ 355 Propagation: cm.Propagation, 356 } 357 } 358 } 359 360 cm.Spec.Target = cm.Destination 361 cm.Spec.ReadOnly = !cm.RW 362 } 363 } 364 365 func (daemon *Daemon) traverseLocalVolumes(fn func(volume.Volume) error) error { 366 localVolumeDriver, err := volumedrivers.GetDriver(volume.DefaultDriverName) 367 if err != nil { 368 return fmt.Errorf("can't retrieve local volume driver: %v", err) 369 } 370 vols, err := localVolumeDriver.List() 371 if err != nil { 372 return fmt.Errorf("can't retrieve local volumes: %v", err) 373 } 374 375 for _, v := range vols { 376 name := v.Name() 377 _, err := daemon.volumes.Get(name) 378 if err != nil { 379 logrus.Warnf("failed to retrieve volume %s from store: %v", name, err) 380 } 381 382 err = fn(v) 383 if err != nil { 384 return err 385 } 386 } 387 388 return nil 389 }