github.com/docker/docker@v299999999.0.0-20200612211812-aaf470eca7b5+incompatible/daemon/volumes.go (about)

     1  package daemon // import "github.com/docker/docker/daemon"
     2  
     3  import (
     4  	"context"
     5  	"os"
     6  	"path/filepath"
     7  	"reflect"
     8  	"strings"
     9  	"time"
    10  
    11  	"github.com/docker/docker/api/types"
    12  	containertypes "github.com/docker/docker/api/types/container"
    13  	"github.com/docker/docker/api/types/mount"
    14  	mounttypes "github.com/docker/docker/api/types/mount"
    15  	"github.com/docker/docker/container"
    16  	"github.com/docker/docker/errdefs"
    17  	"github.com/docker/docker/volume"
    18  	volumemounts "github.com/docker/docker/volume/mounts"
    19  	"github.com/docker/docker/volume/service"
    20  	volumeopts "github.com/docker/docker/volume/service/opts"
    21  	"github.com/pkg/errors"
    22  	"github.com/sirupsen/logrus"
    23  )
    24  
    25  var (
    26  	// ErrVolumeReadonly is used to signal an error when trying to copy data into
    27  	// a volume mount that is not writable.
    28  	ErrVolumeReadonly = errors.New("mounted volume is marked read-only")
    29  )
    30  
    31  type mounts []container.Mount
    32  
    33  // Len returns the number of mounts. Used in sorting.
    34  func (m mounts) Len() int {
    35  	return len(m)
    36  }
    37  
    38  // Less returns true if the number of parts (a/b/c would be 3 parts) in the
    39  // mount indexed by parameter 1 is less than that of the mount indexed by
    40  // parameter 2. Used in sorting.
    41  func (m mounts) Less(i, j int) bool {
    42  	return m.parts(i) < m.parts(j)
    43  }
    44  
    45  // Swap swaps two items in an array of mounts. Used in sorting
    46  func (m mounts) Swap(i, j int) {
    47  	m[i], m[j] = m[j], m[i]
    48  }
    49  
    50  // parts returns the number of parts in the destination of a mount. Used in sorting.
    51  func (m mounts) parts(i int) int {
    52  	return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator))
    53  }
    54  
    55  // registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
    56  // It follows the next sequence to decide what to mount in each final destination:
    57  //
    58  // 1. Select the previously configured mount points for the containers, if any.
    59  // 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
    60  // 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
    61  // 4. Cleanup old volumes that are about to be reassigned.
    62  func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) {
    63  	binds := map[string]bool{}
    64  	mountPoints := map[string]*volumemounts.MountPoint{}
    65  	parser := volumemounts.NewParser(container.OS)
    66  
    67  	ctx := context.TODO()
    68  	defer func() {
    69  		// clean up the container mountpoints once return with error
    70  		if retErr != nil {
    71  			for _, m := range mountPoints {
    72  				if m.Volume == nil {
    73  					continue
    74  				}
    75  				daemon.volumes.Release(ctx, m.Volume.Name(), container.ID)
    76  			}
    77  		}
    78  	}()
    79  
    80  	dereferenceIfExists := func(destination string) {
    81  		if v, ok := mountPoints[destination]; ok {
    82  			logrus.Debugf("Duplicate mount point '%s'", destination)
    83  			if v.Volume != nil {
    84  				daemon.volumes.Release(ctx, v.Volume.Name(), container.ID)
    85  			}
    86  		}
    87  	}
    88  
    89  	// 1. Read already configured mount points.
    90  	for destination, point := range container.MountPoints {
    91  		mountPoints[destination] = point
    92  	}
    93  
    94  	// 2. Read volumes from other containers.
    95  	for _, v := range hostConfig.VolumesFrom {
    96  		containerID, mode, err := parser.ParseVolumesFrom(v)
    97  		if err != nil {
    98  			return err
    99  		}
   100  
   101  		c, err := daemon.GetContainer(containerID)
   102  		if err != nil {
   103  			return err
   104  		}
   105  
   106  		for _, m := range c.MountPoints {
   107  			cp := &volumemounts.MountPoint{
   108  				Type:        m.Type,
   109  				Name:        m.Name,
   110  				Source:      m.Source,
   111  				RW:          m.RW && parser.ReadWrite(mode),
   112  				Driver:      m.Driver,
   113  				Destination: m.Destination,
   114  				Propagation: m.Propagation,
   115  				Spec:        m.Spec,
   116  				CopyData:    false,
   117  			}
   118  
   119  			if len(cp.Source) == 0 {
   120  				v, err := daemon.volumes.Get(ctx, cp.Name, volumeopts.WithGetDriver(cp.Driver), volumeopts.WithGetReference(container.ID))
   121  				if err != nil {
   122  					return err
   123  				}
   124  				cp.Volume = &volumeWrapper{v: v, s: daemon.volumes}
   125  			}
   126  			dereferenceIfExists(cp.Destination)
   127  			mountPoints[cp.Destination] = cp
   128  		}
   129  	}
   130  
   131  	// 3. Read bind mounts
   132  	for _, b := range hostConfig.Binds {
   133  		bind, err := parser.ParseMountRaw(b, hostConfig.VolumeDriver)
   134  		if err != nil {
   135  			return err
   136  		}
   137  		needsSlavePropagation, err := daemon.validateBindDaemonRoot(bind.Spec)
   138  		if err != nil {
   139  			return err
   140  		}
   141  		if needsSlavePropagation {
   142  			bind.Propagation = mount.PropagationRSlave
   143  		}
   144  
   145  		// #10618
   146  		_, tmpfsExists := hostConfig.Tmpfs[bind.Destination]
   147  		if binds[bind.Destination] || tmpfsExists {
   148  			return duplicateMountPointError(bind.Destination)
   149  		}
   150  
   151  		if bind.Type == mounttypes.TypeVolume {
   152  			// create the volume
   153  			v, err := daemon.volumes.Create(ctx, bind.Name, bind.Driver, volumeopts.WithCreateReference(container.ID))
   154  			if err != nil {
   155  				return err
   156  			}
   157  			bind.Volume = &volumeWrapper{v: v, s: daemon.volumes}
   158  			bind.Source = v.Mountpoint
   159  			// bind.Name is an already existing volume, we need to use that here
   160  			bind.Driver = v.Driver
   161  			if bind.Driver == volume.DefaultDriverName {
   162  				setBindModeIfNull(bind)
   163  			}
   164  		}
   165  
   166  		binds[bind.Destination] = true
   167  		dereferenceIfExists(bind.Destination)
   168  		mountPoints[bind.Destination] = bind
   169  	}
   170  
   171  	for _, cfg := range hostConfig.Mounts {
   172  		mp, err := parser.ParseMountSpec(cfg)
   173  		if err != nil {
   174  			return errdefs.InvalidParameter(err)
   175  		}
   176  		needsSlavePropagation, err := daemon.validateBindDaemonRoot(mp.Spec)
   177  		if err != nil {
   178  			return err
   179  		}
   180  		if needsSlavePropagation {
   181  			mp.Propagation = mount.PropagationRSlave
   182  		}
   183  
   184  		if binds[mp.Destination] {
   185  			return duplicateMountPointError(cfg.Target)
   186  		}
   187  
   188  		if mp.Type == mounttypes.TypeVolume {
   189  			var v *types.Volume
   190  			if cfg.VolumeOptions != nil {
   191  				var driverOpts map[string]string
   192  				if cfg.VolumeOptions.DriverConfig != nil {
   193  					driverOpts = cfg.VolumeOptions.DriverConfig.Options
   194  				}
   195  				v, err = daemon.volumes.Create(ctx,
   196  					mp.Name,
   197  					mp.Driver,
   198  					volumeopts.WithCreateReference(container.ID),
   199  					volumeopts.WithCreateOptions(driverOpts),
   200  					volumeopts.WithCreateLabels(cfg.VolumeOptions.Labels),
   201  				)
   202  			} else {
   203  				v, err = daemon.volumes.Create(ctx, mp.Name, mp.Driver, volumeopts.WithCreateReference(container.ID))
   204  			}
   205  			if err != nil {
   206  				return err
   207  			}
   208  
   209  			mp.Volume = &volumeWrapper{v: v, s: daemon.volumes}
   210  			mp.Name = v.Name
   211  			mp.Driver = v.Driver
   212  
   213  			// need to selinux-relabel local mounts
   214  			mp.Source = v.Mountpoint
   215  			if mp.Driver == volume.DefaultDriverName {
   216  				setBindModeIfNull(mp)
   217  			}
   218  		}
   219  
   220  		if mp.Type == mounttypes.TypeBind {
   221  			mp.SkipMountpointCreation = true
   222  		}
   223  
   224  		binds[mp.Destination] = true
   225  		dereferenceIfExists(mp.Destination)
   226  		mountPoints[mp.Destination] = mp
   227  	}
   228  
   229  	container.Lock()
   230  
   231  	// 4. Cleanup old volumes that are about to be reassigned.
   232  	for _, m := range mountPoints {
   233  		if parser.IsBackwardCompatible(m) {
   234  			if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil {
   235  				daemon.volumes.Release(ctx, mp.Volume.Name(), container.ID)
   236  			}
   237  		}
   238  	}
   239  	container.MountPoints = mountPoints
   240  
   241  	container.Unlock()
   242  
   243  	return nil
   244  }
   245  
   246  // lazyInitializeVolume initializes a mountpoint's volume if needed.
   247  // This happens after a daemon restart.
   248  func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volumemounts.MountPoint) error {
   249  	if len(m.Driver) > 0 && m.Volume == nil {
   250  		v, err := daemon.volumes.Get(context.TODO(), m.Name, volumeopts.WithGetDriver(m.Driver), volumeopts.WithGetReference(containerID))
   251  		if err != nil {
   252  			return err
   253  		}
   254  		m.Volume = &volumeWrapper{v: v, s: daemon.volumes}
   255  	}
   256  	return nil
   257  }
   258  
   259  // backportMountSpec resolves mount specs (introduced in 1.13) from pre-1.13
   260  // mount configurations
   261  // The container lock should not be held when calling this function.
   262  // Changes are only made in-memory and may make changes to containers referenced
   263  // by `container.HostConfig.VolumesFrom`
   264  func (daemon *Daemon) backportMountSpec(container *container.Container) {
   265  	container.Lock()
   266  	defer container.Unlock()
   267  
   268  	parser := volumemounts.NewParser(container.OS)
   269  
   270  	maybeUpdate := make(map[string]bool)
   271  	for _, mp := range container.MountPoints {
   272  		if mp.Spec.Source != "" && mp.Type != "" {
   273  			continue
   274  		}
   275  		maybeUpdate[mp.Destination] = true
   276  	}
   277  	if len(maybeUpdate) == 0 {
   278  		return
   279  	}
   280  
   281  	mountSpecs := make(map[string]bool, len(container.HostConfig.Mounts))
   282  	for _, m := range container.HostConfig.Mounts {
   283  		mountSpecs[m.Target] = true
   284  	}
   285  
   286  	binds := make(map[string]*volumemounts.MountPoint, len(container.HostConfig.Binds))
   287  	for _, rawSpec := range container.HostConfig.Binds {
   288  		mp, err := parser.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver)
   289  		if err != nil {
   290  			logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport")
   291  			continue
   292  		}
   293  		binds[mp.Destination] = mp
   294  	}
   295  
   296  	volumesFrom := make(map[string]volumemounts.MountPoint)
   297  	for _, fromSpec := range container.HostConfig.VolumesFrom {
   298  		from, _, err := parser.ParseVolumesFrom(fromSpec)
   299  		if err != nil {
   300  			logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport")
   301  			continue
   302  		}
   303  		fromC, err := daemon.GetContainer(from)
   304  		if err != nil {
   305  			logrus.WithError(err).WithField("from-container", from).Error("Error looking up volumes-from container")
   306  			continue
   307  		}
   308  
   309  		// make sure from container's specs have been backported
   310  		daemon.backportMountSpec(fromC)
   311  
   312  		fromC.Lock()
   313  		for t, mp := range fromC.MountPoints {
   314  			volumesFrom[t] = *mp
   315  		}
   316  		fromC.Unlock()
   317  	}
   318  
   319  	needsUpdate := func(containerMount, other *volumemounts.MountPoint) bool {
   320  		if containerMount.Type != other.Type || !reflect.DeepEqual(containerMount.Spec, other.Spec) {
   321  			return true
   322  		}
   323  		return false
   324  	}
   325  
   326  	// main
   327  	for _, cm := range container.MountPoints {
   328  		if !maybeUpdate[cm.Destination] {
   329  			continue
   330  		}
   331  		// nothing to backport if from hostconfig.Mounts
   332  		if mountSpecs[cm.Destination] {
   333  			continue
   334  		}
   335  
   336  		if mp, exists := binds[cm.Destination]; exists {
   337  			if needsUpdate(cm, mp) {
   338  				cm.Spec = mp.Spec
   339  				cm.Type = mp.Type
   340  			}
   341  			continue
   342  		}
   343  
   344  		if cm.Name != "" {
   345  			if mp, exists := volumesFrom[cm.Destination]; exists {
   346  				if needsUpdate(cm, &mp) {
   347  					cm.Spec = mp.Spec
   348  					cm.Type = mp.Type
   349  				}
   350  				continue
   351  			}
   352  
   353  			if cm.Type != "" {
   354  				// probably specified via the hostconfig.Mounts
   355  				continue
   356  			}
   357  
   358  			// anon volume
   359  			cm.Type = mounttypes.TypeVolume
   360  			cm.Spec.Type = mounttypes.TypeVolume
   361  		} else {
   362  			if cm.Type != "" {
   363  				// already updated
   364  				continue
   365  			}
   366  
   367  			cm.Type = mounttypes.TypeBind
   368  			cm.Spec.Type = mounttypes.TypeBind
   369  			cm.Spec.Source = cm.Source
   370  			if cm.Propagation != "" {
   371  				cm.Spec.BindOptions = &mounttypes.BindOptions{
   372  					Propagation: cm.Propagation,
   373  				}
   374  			}
   375  		}
   376  
   377  		cm.Spec.Target = cm.Destination
   378  		cm.Spec.ReadOnly = !cm.RW
   379  	}
   380  }
   381  
   382  // VolumesService is used to perform volume operations
   383  func (daemon *Daemon) VolumesService() *service.VolumesService {
   384  	return daemon.volumes
   385  }
   386  
   387  type volumeMounter interface {
   388  	Mount(ctx context.Context, v *types.Volume, ref string) (string, error)
   389  	Unmount(ctx context.Context, v *types.Volume, ref string) error
   390  }
   391  
   392  type volumeWrapper struct {
   393  	v *types.Volume
   394  	s volumeMounter
   395  }
   396  
   397  func (v *volumeWrapper) Name() string {
   398  	return v.v.Name
   399  }
   400  
   401  func (v *volumeWrapper) DriverName() string {
   402  	return v.v.Driver
   403  }
   404  
   405  func (v *volumeWrapper) Path() string {
   406  	return v.v.Mountpoint
   407  }
   408  
   409  func (v *volumeWrapper) Mount(ref string) (string, error) {
   410  	return v.s.Mount(context.TODO(), v.v, ref)
   411  }
   412  
   413  func (v *volumeWrapper) Unmount(ref string) error {
   414  	return v.s.Unmount(context.TODO(), v.v, ref)
   415  }
   416  
   417  func (v *volumeWrapper) CreatedAt() (time.Time, error) {
   418  	return time.Time{}, errors.New("not implemented")
   419  }
   420  
   421  func (v *volumeWrapper) Status() map[string]interface{} {
   422  	return v.v.Status
   423  }