github.com/mforkel/docker-ce-i386@v17.12.1-ce-rc2+incompatible/components/engine/daemon/volumes.go (about)

     1  package daemon
     2  
     3  import (
     4  	"fmt"
     5  	"os"
     6  	"path/filepath"
     7  	"reflect"
     8  	"strings"
     9  	"time"
    10  
    11  	"github.com/docker/docker/api/types"
    12  	containertypes "github.com/docker/docker/api/types/container"
    13  	"github.com/docker/docker/api/types/mount"
    14  	mounttypes "github.com/docker/docker/api/types/mount"
    15  	"github.com/docker/docker/container"
    16  	"github.com/docker/docker/volume"
    17  	"github.com/docker/docker/volume/drivers"
    18  	"github.com/pkg/errors"
    19  	"github.com/sirupsen/logrus"
    20  )
    21  
    22  var (
    23  	// ErrVolumeReadonly is used to signal an error when trying to copy data into
    24  	// a volume mount that is not writable.
    25  	ErrVolumeReadonly = errors.New("mounted volume is marked read-only")
    26  )
    27  
    28  type mounts []container.Mount
    29  
    30  // volumeToAPIType converts a volume.Volume to the type used by the Engine API
    31  func volumeToAPIType(v volume.Volume) *types.Volume {
    32  	createdAt, _ := v.CreatedAt()
    33  	tv := &types.Volume{
    34  		Name:      v.Name(),
    35  		Driver:    v.DriverName(),
    36  		CreatedAt: createdAt.Format(time.RFC3339),
    37  	}
    38  	if v, ok := v.(volume.DetailedVolume); ok {
    39  		tv.Labels = v.Labels()
    40  		tv.Options = v.Options()
    41  		tv.Scope = v.Scope()
    42  	}
    43  
    44  	return tv
    45  }
    46  
    47  // Len returns the number of mounts. Used in sorting.
    48  func (m mounts) Len() int {
    49  	return len(m)
    50  }
    51  
    52  // Less returns true if the number of parts (a/b/c would be 3 parts) in the
    53  // mount indexed by parameter 1 is less than that of the mount indexed by
    54  // parameter 2. Used in sorting.
    55  func (m mounts) Less(i, j int) bool {
    56  	return m.parts(i) < m.parts(j)
    57  }
    58  
    59  // Swap swaps two items in an array of mounts. Used in sorting
    60  func (m mounts) Swap(i, j int) {
    61  	m[i], m[j] = m[j], m[i]
    62  }
    63  
    64  // parts returns the number of parts in the destination of a mount. Used in sorting.
    65  func (m mounts) parts(i int) int {
    66  	return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator))
    67  }
    68  
    69  // registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
    70  // It follows the next sequence to decide what to mount in each final destination:
    71  //
    72  // 1. Select the previously configured mount points for the containers, if any.
    73  // 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
    74  // 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
    75  // 4. Cleanup old volumes that are about to be reassigned.
    76  func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) {
    77  	binds := map[string]bool{}
    78  	mountPoints := map[string]*volume.MountPoint{}
    79  	parser := volume.NewParser(container.OS)
    80  	defer func() {
    81  		// clean up the container mountpoints once return with error
    82  		if retErr != nil {
    83  			for _, m := range mountPoints {
    84  				if m.Volume == nil {
    85  					continue
    86  				}
    87  				daemon.volumes.Dereference(m.Volume, container.ID)
    88  			}
    89  		}
    90  	}()
    91  
    92  	dereferenceIfExists := func(destination string) {
    93  		if v, ok := mountPoints[destination]; ok {
    94  			logrus.Debugf("Duplicate mount point '%s'", destination)
    95  			if v.Volume != nil {
    96  				daemon.volumes.Dereference(v.Volume, container.ID)
    97  			}
    98  		}
    99  	}
   100  
   101  	// 1. Read already configured mount points.
   102  	for destination, point := range container.MountPoints {
   103  		mountPoints[destination] = point
   104  	}
   105  
   106  	// 2. Read volumes from other containers.
   107  	for _, v := range hostConfig.VolumesFrom {
   108  		containerID, mode, err := parser.ParseVolumesFrom(v)
   109  		if err != nil {
   110  			return err
   111  		}
   112  
   113  		c, err := daemon.GetContainer(containerID)
   114  		if err != nil {
   115  			return err
   116  		}
   117  
   118  		for _, m := range c.MountPoints {
   119  			cp := &volume.MountPoint{
   120  				Type:        m.Type,
   121  				Name:        m.Name,
   122  				Source:      m.Source,
   123  				RW:          m.RW && parser.ReadWrite(mode),
   124  				Driver:      m.Driver,
   125  				Destination: m.Destination,
   126  				Propagation: m.Propagation,
   127  				Spec:        m.Spec,
   128  				CopyData:    false,
   129  			}
   130  
   131  			if len(cp.Source) == 0 {
   132  				v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID)
   133  				if err != nil {
   134  					return err
   135  				}
   136  				cp.Volume = v
   137  			}
   138  			dereferenceIfExists(cp.Destination)
   139  			mountPoints[cp.Destination] = cp
   140  		}
   141  	}
   142  
   143  	// 3. Read bind mounts
   144  	for _, b := range hostConfig.Binds {
   145  		bind, err := parser.ParseMountRaw(b, hostConfig.VolumeDriver)
   146  		if err != nil {
   147  			return err
   148  		}
   149  		needsSlavePropagation, err := daemon.validateBindDaemonRoot(bind.Spec)
   150  		if err != nil {
   151  			return err
   152  		}
   153  		if needsSlavePropagation {
   154  			bind.Propagation = mount.PropagationRSlave
   155  		}
   156  
   157  		// #10618
   158  		_, tmpfsExists := hostConfig.Tmpfs[bind.Destination]
   159  		if binds[bind.Destination] || tmpfsExists {
   160  			return duplicateMountPointError(bind.Destination)
   161  		}
   162  
   163  		if bind.Type == mounttypes.TypeVolume {
   164  			// create the volume
   165  			v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil)
   166  			if err != nil {
   167  				return err
   168  			}
   169  			bind.Volume = v
   170  			bind.Source = v.Path()
   171  			// bind.Name is an already existing volume, we need to use that here
   172  			bind.Driver = v.DriverName()
   173  			if bind.Driver == volume.DefaultDriverName {
   174  				setBindModeIfNull(bind)
   175  			}
   176  		}
   177  
   178  		binds[bind.Destination] = true
   179  		dereferenceIfExists(bind.Destination)
   180  		mountPoints[bind.Destination] = bind
   181  	}
   182  
   183  	for _, cfg := range hostConfig.Mounts {
   184  		mp, err := parser.ParseMountSpec(cfg)
   185  		if err != nil {
   186  			return validationError{err}
   187  		}
   188  		needsSlavePropagation, err := daemon.validateBindDaemonRoot(mp.Spec)
   189  		if err != nil {
   190  			return err
   191  		}
   192  		if needsSlavePropagation {
   193  			mp.Propagation = mount.PropagationRSlave
   194  		}
   195  
   196  		if binds[mp.Destination] {
   197  			return duplicateMountPointError(cfg.Target)
   198  		}
   199  
   200  		if mp.Type == mounttypes.TypeVolume {
   201  			var v volume.Volume
   202  			if cfg.VolumeOptions != nil {
   203  				var driverOpts map[string]string
   204  				if cfg.VolumeOptions.DriverConfig != nil {
   205  					driverOpts = cfg.VolumeOptions.DriverConfig.Options
   206  				}
   207  				v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, driverOpts, cfg.VolumeOptions.Labels)
   208  			} else {
   209  				v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, nil, nil)
   210  			}
   211  			if err != nil {
   212  				return err
   213  			}
   214  
   215  			mp.Volume = v
   216  			mp.Name = v.Name()
   217  			mp.Driver = v.DriverName()
   218  
   219  			// only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow
   220  			if cv, ok := v.(interface {
   221  				CachedPath() string
   222  			}); ok {
   223  				mp.Source = cv.CachedPath()
   224  			}
   225  			if mp.Driver == volume.DefaultDriverName {
   226  				setBindModeIfNull(mp)
   227  			}
   228  		}
   229  
   230  		binds[mp.Destination] = true
   231  		dereferenceIfExists(mp.Destination)
   232  		mountPoints[mp.Destination] = mp
   233  	}
   234  
   235  	container.Lock()
   236  
   237  	// 4. Cleanup old volumes that are about to be reassigned.
   238  	for _, m := range mountPoints {
   239  		if parser.IsBackwardCompatible(m) {
   240  			if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil {
   241  				daemon.volumes.Dereference(mp.Volume, container.ID)
   242  			}
   243  		}
   244  	}
   245  	container.MountPoints = mountPoints
   246  
   247  	container.Unlock()
   248  
   249  	return nil
   250  }
   251  
   252  // lazyInitializeVolume initializes a mountpoint's volume if needed.
   253  // This happens after a daemon restart.
   254  func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error {
   255  	if len(m.Driver) > 0 && m.Volume == nil {
   256  		v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID)
   257  		if err != nil {
   258  			return err
   259  		}
   260  		m.Volume = v
   261  	}
   262  	return nil
   263  }
   264  
   265  // backportMountSpec resolves mount specs (introduced in 1.13) from pre-1.13
   266  // mount configurations
   267  // The container lock should not be held when calling this function.
   268  // Changes are only made in-memory and may make changes to containers referenced
   269  // by `container.HostConfig.VolumesFrom`
   270  func (daemon *Daemon) backportMountSpec(container *container.Container) {
   271  	container.Lock()
   272  	defer container.Unlock()
   273  
   274  	parser := volume.NewParser(container.OS)
   275  
   276  	maybeUpdate := make(map[string]bool)
   277  	for _, mp := range container.MountPoints {
   278  		if mp.Spec.Source != "" && mp.Type != "" {
   279  			continue
   280  		}
   281  		maybeUpdate[mp.Destination] = true
   282  	}
   283  	if len(maybeUpdate) == 0 {
   284  		return
   285  	}
   286  
   287  	mountSpecs := make(map[string]bool, len(container.HostConfig.Mounts))
   288  	for _, m := range container.HostConfig.Mounts {
   289  		mountSpecs[m.Target] = true
   290  	}
   291  
   292  	binds := make(map[string]*volume.MountPoint, len(container.HostConfig.Binds))
   293  	for _, rawSpec := range container.HostConfig.Binds {
   294  		mp, err := parser.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver)
   295  		if err != nil {
   296  			logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport")
   297  			continue
   298  		}
   299  		binds[mp.Destination] = mp
   300  	}
   301  
   302  	volumesFrom := make(map[string]volume.MountPoint)
   303  	for _, fromSpec := range container.HostConfig.VolumesFrom {
   304  		from, _, err := parser.ParseVolumesFrom(fromSpec)
   305  		if err != nil {
   306  			logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport")
   307  			continue
   308  		}
   309  		fromC, err := daemon.GetContainer(from)
   310  		if err != nil {
   311  			logrus.WithError(err).WithField("from-container", from).Error("Error looking up volumes-from container")
   312  			continue
   313  		}
   314  
   315  		// make sure from container's specs have been backported
   316  		daemon.backportMountSpec(fromC)
   317  
   318  		fromC.Lock()
   319  		for t, mp := range fromC.MountPoints {
   320  			volumesFrom[t] = *mp
   321  		}
   322  		fromC.Unlock()
   323  	}
   324  
   325  	needsUpdate := func(containerMount, other *volume.MountPoint) bool {
   326  		if containerMount.Type != other.Type || !reflect.DeepEqual(containerMount.Spec, other.Spec) {
   327  			return true
   328  		}
   329  		return false
   330  	}
   331  
   332  	// main
   333  	for _, cm := range container.MountPoints {
   334  		if !maybeUpdate[cm.Destination] {
   335  			continue
   336  		}
   337  		// nothing to backport if from hostconfig.Mounts
   338  		if mountSpecs[cm.Destination] {
   339  			continue
   340  		}
   341  
   342  		if mp, exists := binds[cm.Destination]; exists {
   343  			if needsUpdate(cm, mp) {
   344  				cm.Spec = mp.Spec
   345  				cm.Type = mp.Type
   346  			}
   347  			continue
   348  		}
   349  
   350  		if cm.Name != "" {
   351  			if mp, exists := volumesFrom[cm.Destination]; exists {
   352  				if needsUpdate(cm, &mp) {
   353  					cm.Spec = mp.Spec
   354  					cm.Type = mp.Type
   355  				}
   356  				continue
   357  			}
   358  
   359  			if cm.Type != "" {
   360  				// probably specified via the hostconfig.Mounts
   361  				continue
   362  			}
   363  
   364  			// anon volume
   365  			cm.Type = mounttypes.TypeVolume
   366  			cm.Spec.Type = mounttypes.TypeVolume
   367  		} else {
   368  			if cm.Type != "" {
   369  				// already updated
   370  				continue
   371  			}
   372  
   373  			cm.Type = mounttypes.TypeBind
   374  			cm.Spec.Type = mounttypes.TypeBind
   375  			cm.Spec.Source = cm.Source
   376  			if cm.Propagation != "" {
   377  				cm.Spec.BindOptions = &mounttypes.BindOptions{
   378  					Propagation: cm.Propagation,
   379  				}
   380  			}
   381  		}
   382  
   383  		cm.Spec.Target = cm.Destination
   384  		cm.Spec.ReadOnly = !cm.RW
   385  	}
   386  }
   387  
   388  func (daemon *Daemon) traverseLocalVolumes(fn func(volume.Volume) error) error {
   389  	localVolumeDriver, err := volumedrivers.GetDriver(volume.DefaultDriverName)
   390  	if err != nil {
   391  		return fmt.Errorf("can't retrieve local volume driver: %v", err)
   392  	}
   393  	vols, err := localVolumeDriver.List()
   394  	if err != nil {
   395  		return fmt.Errorf("can't retrieve local volumes: %v", err)
   396  	}
   397  
   398  	for _, v := range vols {
   399  		name := v.Name()
   400  		vol, err := daemon.volumes.Get(name)
   401  		if err != nil {
   402  			logrus.Warnf("failed to retrieve volume %s from store: %v", name, err)
   403  		} else {
   404  			// daemon.volumes.Get will return DetailedVolume
   405  			v = vol
   406  		}
   407  
   408  		err = fn(v)
   409  		if err != nil {
   410  			return err
   411  		}
   412  	}
   413  
   414  	return nil
   415  }