github.com/rhatdan/docker@v0.7.7-0.20180119204836-47a0dcbcd20a/daemon/volumes.go (about)

     1  package daemon
     2  
     3  import (
     4  	"fmt"
     5  	"os"
     6  	"path/filepath"
     7  	"reflect"
     8  	"strings"
     9  	"time"
    10  
    11  	"github.com/docker/docker/api/types"
    12  	containertypes "github.com/docker/docker/api/types/container"
    13  	mounttypes "github.com/docker/docker/api/types/mount"
    14  	"github.com/docker/docker/container"
    15  	"github.com/docker/docker/errdefs"
    16  	"github.com/docker/docker/volume"
    17  	"github.com/docker/docker/volume/drivers"
    18  	"github.com/pkg/errors"
    19  	"github.com/sirupsen/logrus"
    20  )
    21  
    22  var (
    23  	// ErrVolumeReadonly is used to signal an error when trying to copy data into
    24  	// a volume mount that is not writable.
    25  	ErrVolumeReadonly = errors.New("mounted volume is marked read-only")
    26  )
    27  
    28  type mounts []container.Mount
    29  
    30  // volumeToAPIType converts a volume.Volume to the type used by the Engine API
    31  func volumeToAPIType(v volume.Volume) *types.Volume {
    32  	createdAt, _ := v.CreatedAt()
    33  	tv := &types.Volume{
    34  		Name:      v.Name(),
    35  		Driver:    v.DriverName(),
    36  		CreatedAt: createdAt.Format(time.RFC3339),
    37  	}
    38  	if v, ok := v.(volume.DetailedVolume); ok {
    39  		tv.Labels = v.Labels()
    40  		tv.Options = v.Options()
    41  		tv.Scope = v.Scope()
    42  	}
    43  
    44  	return tv
    45  }
    46  
    47  // Len returns the number of mounts. Used in sorting.
    48  func (m mounts) Len() int {
    49  	return len(m)
    50  }
    51  
    52  // Less returns true if the number of parts (a/b/c would be 3 parts) in the
    53  // mount indexed by parameter 1 is less than that of the mount indexed by
    54  // parameter 2. Used in sorting.
    55  func (m mounts) Less(i, j int) bool {
    56  	return m.parts(i) < m.parts(j)
    57  }
    58  
    59  // Swap swaps two items in an array of mounts. Used in sorting
    60  func (m mounts) Swap(i, j int) {
    61  	m[i], m[j] = m[j], m[i]
    62  }
    63  
    64  // parts returns the number of parts in the destination of a mount. Used in sorting.
    65  func (m mounts) parts(i int) int {
    66  	return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator))
    67  }
    68  
    69  // registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
    70  // It follows the next sequence to decide what to mount in each final destination:
    71  //
    72  // 1. Select the previously configured mount points for the containers, if any.
    73  // 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
    74  // 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
    75  // 4. Cleanup old volumes that are about to be reassigned.
    76  func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) {
    77  	binds := map[string]bool{}
    78  	mountPoints := map[string]*volume.MountPoint{}
    79  	parser := volume.NewParser(container.OS)
    80  	defer func() {
    81  		// clean up the container mountpoints once return with error
    82  		if retErr != nil {
    83  			for _, m := range mountPoints {
    84  				if m.Volume == nil {
    85  					continue
    86  				}
    87  				daemon.volumes.Dereference(m.Volume, container.ID)
    88  			}
    89  		}
    90  	}()
    91  
    92  	dereferenceIfExists := func(destination string) {
    93  		if v, ok := mountPoints[destination]; ok {
    94  			logrus.Debugf("Duplicate mount point '%s'", destination)
    95  			if v.Volume != nil {
    96  				daemon.volumes.Dereference(v.Volume, container.ID)
    97  			}
    98  		}
    99  	}
   100  
   101  	// 1. Read already configured mount points.
   102  	for destination, point := range container.MountPoints {
   103  		mountPoints[destination] = point
   104  	}
   105  
   106  	// 2. Read volumes from other containers.
   107  	for _, v := range hostConfig.VolumesFrom {
   108  		containerID, mode, err := parser.ParseVolumesFrom(v)
   109  		if err != nil {
   110  			return err
   111  		}
   112  
   113  		c, err := daemon.GetContainer(containerID)
   114  		if err != nil {
   115  			return err
   116  		}
   117  
   118  		for _, m := range c.MountPoints {
   119  			cp := &volume.MountPoint{
   120  				Type:        m.Type,
   121  				Name:        m.Name,
   122  				Source:      m.Source,
   123  				RW:          m.RW && parser.ReadWrite(mode),
   124  				Driver:      m.Driver,
   125  				Destination: m.Destination,
   126  				Propagation: m.Propagation,
   127  				Spec:        m.Spec,
   128  				CopyData:    false,
   129  			}
   130  
   131  			if len(cp.Source) == 0 {
   132  				v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID)
   133  				if err != nil {
   134  					return err
   135  				}
   136  				cp.Volume = v
   137  			}
   138  			dereferenceIfExists(cp.Destination)
   139  			mountPoints[cp.Destination] = cp
   140  		}
   141  	}
   142  
   143  	// 3. Read bind mounts
   144  	for _, b := range hostConfig.Binds {
   145  		bind, err := parser.ParseMountRaw(b, hostConfig.VolumeDriver)
   146  		if err != nil {
   147  			return err
   148  		}
   149  
   150  		// #10618
   151  		_, tmpfsExists := hostConfig.Tmpfs[bind.Destination]
   152  		if binds[bind.Destination] || tmpfsExists {
   153  			return duplicateMountPointError(bind.Destination)
   154  		}
   155  
   156  		if bind.Type == mounttypes.TypeVolume {
   157  			// create the volume
   158  			v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil)
   159  			if err != nil {
   160  				return err
   161  			}
   162  			bind.Volume = v
   163  			bind.Source = v.Path()
   164  			// bind.Name is an already existing volume, we need to use that here
   165  			bind.Driver = v.DriverName()
   166  			if bind.Driver == volume.DefaultDriverName {
   167  				setBindModeIfNull(bind)
   168  			}
   169  		}
   170  
   171  		binds[bind.Destination] = true
   172  		dereferenceIfExists(bind.Destination)
   173  		mountPoints[bind.Destination] = bind
   174  	}
   175  
   176  	for _, cfg := range hostConfig.Mounts {
   177  		mp, err := parser.ParseMountSpec(cfg)
   178  		if err != nil {
   179  			return errdefs.InvalidParameter(err)
   180  		}
   181  
   182  		if binds[mp.Destination] {
   183  			return duplicateMountPointError(cfg.Target)
   184  		}
   185  
   186  		if mp.Type == mounttypes.TypeVolume {
   187  			var v volume.Volume
   188  			if cfg.VolumeOptions != nil {
   189  				var driverOpts map[string]string
   190  				if cfg.VolumeOptions.DriverConfig != nil {
   191  					driverOpts = cfg.VolumeOptions.DriverConfig.Options
   192  				}
   193  				v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, driverOpts, cfg.VolumeOptions.Labels)
   194  			} else {
   195  				v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, nil, nil)
   196  			}
   197  			if err != nil {
   198  				return err
   199  			}
   200  
   201  			mp.Volume = v
   202  			mp.Name = v.Name()
   203  			mp.Driver = v.DriverName()
   204  
   205  			// only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow
   206  			if cv, ok := v.(interface {
   207  				CachedPath() string
   208  			}); ok {
   209  				mp.Source = cv.CachedPath()
   210  			}
   211  			if mp.Driver == volume.DefaultDriverName {
   212  				setBindModeIfNull(mp)
   213  			}
   214  		}
   215  
   216  		binds[mp.Destination] = true
   217  		dereferenceIfExists(mp.Destination)
   218  		mountPoints[mp.Destination] = mp
   219  	}
   220  
   221  	container.Lock()
   222  
   223  	// 4. Cleanup old volumes that are about to be reassigned.
   224  	for _, m := range mountPoints {
   225  		if parser.IsBackwardCompatible(m) {
   226  			if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil {
   227  				daemon.volumes.Dereference(mp.Volume, container.ID)
   228  			}
   229  		}
   230  	}
   231  	container.MountPoints = mountPoints
   232  
   233  	container.Unlock()
   234  
   235  	return nil
   236  }
   237  
   238  // lazyInitializeVolume initializes a mountpoint's volume if needed.
   239  // This happens after a daemon restart.
   240  func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error {
   241  	if len(m.Driver) > 0 && m.Volume == nil {
   242  		v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID)
   243  		if err != nil {
   244  			return err
   245  		}
   246  		m.Volume = v
   247  	}
   248  	return nil
   249  }
   250  
   251  // backportMountSpec resolves mount specs (introduced in 1.13) from pre-1.13
   252  // mount configurations
   253  // The container lock should not be held when calling this function.
   254  // Changes are only made in-memory and may make changes to containers referenced
   255  // by `container.HostConfig.VolumesFrom`
   256  func (daemon *Daemon) backportMountSpec(container *container.Container) {
   257  	container.Lock()
   258  	defer container.Unlock()
   259  
   260  	parser := volume.NewParser(container.OS)
   261  
   262  	maybeUpdate := make(map[string]bool)
   263  	for _, mp := range container.MountPoints {
   264  		if mp.Spec.Source != "" && mp.Type != "" {
   265  			continue
   266  		}
   267  		maybeUpdate[mp.Destination] = true
   268  	}
   269  	if len(maybeUpdate) == 0 {
   270  		return
   271  	}
   272  
   273  	mountSpecs := make(map[string]bool, len(container.HostConfig.Mounts))
   274  	for _, m := range container.HostConfig.Mounts {
   275  		mountSpecs[m.Target] = true
   276  	}
   277  
   278  	binds := make(map[string]*volume.MountPoint, len(container.HostConfig.Binds))
   279  	for _, rawSpec := range container.HostConfig.Binds {
   280  		mp, err := parser.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver)
   281  		if err != nil {
   282  			logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport")
   283  			continue
   284  		}
   285  		binds[mp.Destination] = mp
   286  	}
   287  
   288  	volumesFrom := make(map[string]volume.MountPoint)
   289  	for _, fromSpec := range container.HostConfig.VolumesFrom {
   290  		from, _, err := parser.ParseVolumesFrom(fromSpec)
   291  		if err != nil {
   292  			logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport")
   293  			continue
   294  		}
   295  		fromC, err := daemon.GetContainer(from)
   296  		if err != nil {
   297  			logrus.WithError(err).WithField("from-container", from).Error("Error looking up volumes-from container")
   298  			continue
   299  		}
   300  
   301  		// make sure from container's specs have been backported
   302  		daemon.backportMountSpec(fromC)
   303  
   304  		fromC.Lock()
   305  		for t, mp := range fromC.MountPoints {
   306  			volumesFrom[t] = *mp
   307  		}
   308  		fromC.Unlock()
   309  	}
   310  
   311  	needsUpdate := func(containerMount, other *volume.MountPoint) bool {
   312  		if containerMount.Type != other.Type || !reflect.DeepEqual(containerMount.Spec, other.Spec) {
   313  			return true
   314  		}
   315  		return false
   316  	}
   317  
   318  	// main
   319  	for _, cm := range container.MountPoints {
   320  		if !maybeUpdate[cm.Destination] {
   321  			continue
   322  		}
   323  		// nothing to backport if from hostconfig.Mounts
   324  		if mountSpecs[cm.Destination] {
   325  			continue
   326  		}
   327  
   328  		if mp, exists := binds[cm.Destination]; exists {
   329  			if needsUpdate(cm, mp) {
   330  				cm.Spec = mp.Spec
   331  				cm.Type = mp.Type
   332  			}
   333  			continue
   334  		}
   335  
   336  		if cm.Name != "" {
   337  			if mp, exists := volumesFrom[cm.Destination]; exists {
   338  				if needsUpdate(cm, &mp) {
   339  					cm.Spec = mp.Spec
   340  					cm.Type = mp.Type
   341  				}
   342  				continue
   343  			}
   344  
   345  			if cm.Type != "" {
   346  				// probably specified via the hostconfig.Mounts
   347  				continue
   348  			}
   349  
   350  			// anon volume
   351  			cm.Type = mounttypes.TypeVolume
   352  			cm.Spec.Type = mounttypes.TypeVolume
   353  		} else {
   354  			if cm.Type != "" {
   355  				// already updated
   356  				continue
   357  			}
   358  
   359  			cm.Type = mounttypes.TypeBind
   360  			cm.Spec.Type = mounttypes.TypeBind
   361  			cm.Spec.Source = cm.Source
   362  			if cm.Propagation != "" {
   363  				cm.Spec.BindOptions = &mounttypes.BindOptions{
   364  					Propagation: cm.Propagation,
   365  				}
   366  			}
   367  		}
   368  
   369  		cm.Spec.Target = cm.Destination
   370  		cm.Spec.ReadOnly = !cm.RW
   371  	}
   372  }
   373  
   374  func (daemon *Daemon) traverseLocalVolumes(fn func(volume.Volume) error) error {
   375  	localVolumeDriver, err := volumedrivers.GetDriver(volume.DefaultDriverName)
   376  	if err != nil {
   377  		return fmt.Errorf("can't retrieve local volume driver: %v", err)
   378  	}
   379  	vols, err := localVolumeDriver.List()
   380  	if err != nil {
   381  		return fmt.Errorf("can't retrieve local volumes: %v", err)
   382  	}
   383  
   384  	for _, v := range vols {
   385  		name := v.Name()
   386  		vol, err := daemon.volumes.Get(name)
   387  		if err != nil {
   388  			logrus.Warnf("failed to retrieve volume %s from store: %v", name, err)
   389  		} else {
   390  			// daemon.volumes.Get will return DetailedVolume
   391  			v = vol
   392  		}
   393  
   394  		err = fn(v)
   395  		if err != nil {
   396  			return err
   397  		}
   398  	}
   399  
   400  	return nil
   401  }