github.com/jiasir/docker@v1.3.3-0.20170609024000-252e610103e7/daemon/volumes.go (about)

     1  package daemon
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"os"
     7  	"path/filepath"
     8  	"reflect"
     9  	"strings"
    10  	"time"
    11  
    12  	"github.com/Sirupsen/logrus"
    13  	dockererrors "github.com/docker/docker/api/errors"
    14  	"github.com/docker/docker/api/types"
    15  	containertypes "github.com/docker/docker/api/types/container"
    16  	mounttypes "github.com/docker/docker/api/types/mount"
    17  	"github.com/docker/docker/container"
    18  	"github.com/docker/docker/volume"
    19  	"github.com/docker/docker/volume/drivers"
    20  )
    21  
    22  var (
    23  	// ErrVolumeReadonly is used to signal an error when trying to copy data into
    24  	// a volume mount that is not writable.
    25  	ErrVolumeReadonly = errors.New("mounted volume is marked read-only")
    26  )
    27  
    28  type mounts []container.Mount
    29  
    30  // volumeToAPIType converts a volume.Volume to the type used by the Engine API
    31  func volumeToAPIType(v volume.Volume) *types.Volume {
    32  	createdAt, _ := v.CreatedAt()
    33  	tv := &types.Volume{
    34  		Name:      v.Name(),
    35  		Driver:    v.DriverName(),
    36  		CreatedAt: createdAt.Format(time.RFC3339),
    37  	}
    38  	if v, ok := v.(volume.DetailedVolume); ok {
    39  		tv.Labels = v.Labels()
    40  		tv.Options = v.Options()
    41  		tv.Scope = v.Scope()
    42  	}
    43  
    44  	return tv
    45  }
    46  
    47  // Len returns the number of mounts. Used in sorting.
    48  func (m mounts) Len() int {
    49  	return len(m)
    50  }
    51  
    52  // Less returns true if the number of parts (a/b/c would be 3 parts) in the
    53  // mount indexed by parameter 1 is less than that of the mount indexed by
    54  // parameter 2. Used in sorting.
    55  func (m mounts) Less(i, j int) bool {
    56  	return m.parts(i) < m.parts(j)
    57  }
    58  
    59  // Swap swaps two items in an array of mounts. Used in sorting
    60  func (m mounts) Swap(i, j int) {
    61  	m[i], m[j] = m[j], m[i]
    62  }
    63  
    64  // parts returns the number of parts in the destination of a mount. Used in sorting.
    65  func (m mounts) parts(i int) int {
    66  	return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator))
    67  }
    68  
    69  // registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
    70  // It follows the next sequence to decide what to mount in each final destination:
    71  //
    72  // 1. Select the previously configured mount points for the containers, if any.
    73  // 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
    74  // 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
    75  // 4. Cleanup old volumes that are about to be reassigned.
    76  func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) {
    77  	binds := map[string]bool{}
    78  	mountPoints := map[string]*volume.MountPoint{}
    79  	defer func() {
    80  		// clean up the container mountpoints once return with error
    81  		if retErr != nil {
    82  			for _, m := range mountPoints {
    83  				if m.Volume == nil {
    84  					continue
    85  				}
    86  				daemon.volumes.Dereference(m.Volume, container.ID)
    87  			}
    88  		}
    89  	}()
    90  
    91  	dereferenceIfExists := func(destination string) {
    92  		if v, ok := mountPoints[destination]; ok {
    93  			logrus.Debugf("Duplicate mount point '%s'", destination)
    94  			if v.Volume != nil {
    95  				daemon.volumes.Dereference(v.Volume, container.ID)
    96  			}
    97  		}
    98  	}
    99  
   100  	// 1. Read already configured mount points.
   101  	for destination, point := range container.MountPoints {
   102  		mountPoints[destination] = point
   103  	}
   104  
   105  	// 2. Read volumes from other containers.
   106  	for _, v := range hostConfig.VolumesFrom {
   107  		containerID, mode, err := volume.ParseVolumesFrom(v)
   108  		if err != nil {
   109  			return err
   110  		}
   111  
   112  		c, err := daemon.GetContainer(containerID)
   113  		if err != nil {
   114  			return err
   115  		}
   116  
   117  		for _, m := range c.MountPoints {
   118  			cp := &volume.MountPoint{
   119  				Type:        m.Type,
   120  				Name:        m.Name,
   121  				Source:      m.Source,
   122  				RW:          m.RW && volume.ReadWrite(mode),
   123  				Driver:      m.Driver,
   124  				Destination: m.Destination,
   125  				Propagation: m.Propagation,
   126  				Spec:        m.Spec,
   127  				CopyData:    false,
   128  			}
   129  
   130  			if len(cp.Source) == 0 {
   131  				v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID)
   132  				if err != nil {
   133  					return err
   134  				}
   135  				cp.Volume = v
   136  			}
   137  			dereferenceIfExists(cp.Destination)
   138  			mountPoints[cp.Destination] = cp
   139  		}
   140  	}
   141  
   142  	// 3. Read bind mounts
   143  	for _, b := range hostConfig.Binds {
   144  		bind, err := volume.ParseMountRaw(b, hostConfig.VolumeDriver)
   145  		if err != nil {
   146  			return err
   147  		}
   148  
   149  		// #10618
   150  		_, tmpfsExists := hostConfig.Tmpfs[bind.Destination]
   151  		if binds[bind.Destination] || tmpfsExists {
   152  			return fmt.Errorf("Duplicate mount point '%s'", bind.Destination)
   153  		}
   154  
   155  		if bind.Type == mounttypes.TypeVolume {
   156  			// create the volume
   157  			v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil)
   158  			if err != nil {
   159  				return err
   160  			}
   161  			bind.Volume = v
   162  			bind.Source = v.Path()
   163  			// bind.Name is an already existing volume, we need to use that here
   164  			bind.Driver = v.DriverName()
   165  			if bind.Driver == volume.DefaultDriverName {
   166  				setBindModeIfNull(bind)
   167  			}
   168  		}
   169  
   170  		binds[bind.Destination] = true
   171  		dereferenceIfExists(bind.Destination)
   172  		mountPoints[bind.Destination] = bind
   173  	}
   174  
   175  	for _, cfg := range hostConfig.Mounts {
   176  		mp, err := volume.ParseMountSpec(cfg)
   177  		if err != nil {
   178  			return dockererrors.NewBadRequestError(err)
   179  		}
   180  
   181  		if binds[mp.Destination] {
   182  			return fmt.Errorf("Duplicate mount point '%s'", cfg.Target)
   183  		}
   184  
   185  		if mp.Type == mounttypes.TypeVolume {
   186  			var v volume.Volume
   187  			if cfg.VolumeOptions != nil {
   188  				var driverOpts map[string]string
   189  				if cfg.VolumeOptions.DriverConfig != nil {
   190  					driverOpts = cfg.VolumeOptions.DriverConfig.Options
   191  				}
   192  				v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, driverOpts, cfg.VolumeOptions.Labels)
   193  			} else {
   194  				v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, nil, nil)
   195  			}
   196  			if err != nil {
   197  				return err
   198  			}
   199  
   200  			mp.Volume = v
   201  			mp.Name = v.Name()
   202  			mp.Driver = v.DriverName()
   203  
   204  			// only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow
   205  			if cv, ok := v.(interface {
   206  				CachedPath() string
   207  			}); ok {
   208  				mp.Source = cv.CachedPath()
   209  			}
   210  		}
   211  
   212  		binds[mp.Destination] = true
   213  		dereferenceIfExists(mp.Destination)
   214  		mountPoints[mp.Destination] = mp
   215  	}
   216  
   217  	container.Lock()
   218  
   219  	// 4. Cleanup old volumes that are about to be reassigned.
   220  	for _, m := range mountPoints {
   221  		if m.BackwardsCompatible() {
   222  			if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil {
   223  				daemon.volumes.Dereference(mp.Volume, container.ID)
   224  			}
   225  		}
   226  	}
   227  	container.MountPoints = mountPoints
   228  
   229  	container.Unlock()
   230  
   231  	return nil
   232  }
   233  
   234  // lazyInitializeVolume initializes a mountpoint's volume if needed.
   235  // This happens after a daemon restart.
   236  func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error {
   237  	if len(m.Driver) > 0 && m.Volume == nil {
   238  		v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID)
   239  		if err != nil {
   240  			return err
   241  		}
   242  		m.Volume = v
   243  	}
   244  	return nil
   245  }
   246  
   247  // backportMountSpec resolves mount specs (introduced in 1.13) from pre-1.13
   248  // mount configurations
   249  // The container lock should not be held when calling this function.
   250  // Changes are only made in-memory and may make changes to containers referenced
   251  // by `container.HostConfig.VolumesFrom`
   252  func (daemon *Daemon) backportMountSpec(container *container.Container) {
   253  	container.Lock()
   254  	defer container.Unlock()
   255  
   256  	maybeUpdate := make(map[string]bool)
   257  	for _, mp := range container.MountPoints {
   258  		if mp.Spec.Source != "" && mp.Type != "" {
   259  			continue
   260  		}
   261  		maybeUpdate[mp.Destination] = true
   262  	}
   263  	if len(maybeUpdate) == 0 {
   264  		return
   265  	}
   266  
   267  	mountSpecs := make(map[string]bool, len(container.HostConfig.Mounts))
   268  	for _, m := range container.HostConfig.Mounts {
   269  		mountSpecs[m.Target] = true
   270  	}
   271  
   272  	binds := make(map[string]*volume.MountPoint, len(container.HostConfig.Binds))
   273  	for _, rawSpec := range container.HostConfig.Binds {
   274  		mp, err := volume.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver)
   275  		if err != nil {
   276  			logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport")
   277  			continue
   278  		}
   279  		binds[mp.Destination] = mp
   280  	}
   281  
   282  	volumesFrom := make(map[string]volume.MountPoint)
   283  	for _, fromSpec := range container.HostConfig.VolumesFrom {
   284  		from, _, err := volume.ParseVolumesFrom(fromSpec)
   285  		if err != nil {
   286  			logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport")
   287  			continue
   288  		}
   289  		fromC, err := daemon.GetContainer(from)
   290  		if err != nil {
   291  			logrus.WithError(err).WithField("from-container", from).Error("Error looking up volumes-from container")
   292  			continue
   293  		}
   294  
   295  		// make sure from container's specs have been backported
   296  		daemon.backportMountSpec(fromC)
   297  
   298  		fromC.Lock()
   299  		for t, mp := range fromC.MountPoints {
   300  			volumesFrom[t] = *mp
   301  		}
   302  		fromC.Unlock()
   303  	}
   304  
   305  	needsUpdate := func(containerMount, other *volume.MountPoint) bool {
   306  		if containerMount.Type != other.Type || !reflect.DeepEqual(containerMount.Spec, other.Spec) {
   307  			return true
   308  		}
   309  		return false
   310  	}
   311  
   312  	// main
   313  	for _, cm := range container.MountPoints {
   314  		if !maybeUpdate[cm.Destination] {
   315  			continue
   316  		}
   317  		// nothing to backport if from hostconfig.Mounts
   318  		if mountSpecs[cm.Destination] {
   319  			continue
   320  		}
   321  
   322  		if mp, exists := binds[cm.Destination]; exists {
   323  			if needsUpdate(cm, mp) {
   324  				cm.Spec = mp.Spec
   325  				cm.Type = mp.Type
   326  			}
   327  			continue
   328  		}
   329  
   330  		if cm.Name != "" {
   331  			if mp, exists := volumesFrom[cm.Destination]; exists {
   332  				if needsUpdate(cm, &mp) {
   333  					cm.Spec = mp.Spec
   334  					cm.Type = mp.Type
   335  				}
   336  				continue
   337  			}
   338  
   339  			if cm.Type != "" {
   340  				// probably specified via the hostconfig.Mounts
   341  				continue
   342  			}
   343  
   344  			// anon volume
   345  			cm.Type = mounttypes.TypeVolume
   346  			cm.Spec.Type = mounttypes.TypeVolume
   347  		} else {
   348  			if cm.Type != "" {
   349  				// already updated
   350  				continue
   351  			}
   352  
   353  			cm.Type = mounttypes.TypeBind
   354  			cm.Spec.Type = mounttypes.TypeBind
   355  			cm.Spec.Source = cm.Source
   356  			if cm.Propagation != "" {
   357  				cm.Spec.BindOptions = &mounttypes.BindOptions{
   358  					Propagation: cm.Propagation,
   359  				}
   360  			}
   361  		}
   362  
   363  		cm.Spec.Target = cm.Destination
   364  		cm.Spec.ReadOnly = !cm.RW
   365  	}
   366  }
   367  
   368  func (daemon *Daemon) traverseLocalVolumes(fn func(volume.Volume) error) error {
   369  	localVolumeDriver, err := volumedrivers.GetDriver(volume.DefaultDriverName)
   370  	if err != nil {
   371  		return fmt.Errorf("can't retrieve local volume driver: %v", err)
   372  	}
   373  	vols, err := localVolumeDriver.List()
   374  	if err != nil {
   375  		return fmt.Errorf("can't retrieve local volumes: %v", err)
   376  	}
   377  
   378  	for _, v := range vols {
   379  		name := v.Name()
   380  		vol, err := daemon.volumes.Get(name)
   381  		if err != nil {
   382  			logrus.Warnf("failed to retrieve volume %s from store: %v", name, err)
   383  		} else {
   384  			// daemon.volumes.Get will return DetailedVolume
   385  			v = vol
   386  		}
   387  
   388  		err = fn(v)
   389  		if err != nil {
   390  			return err
   391  		}
   392  	}
   393  
   394  	return nil
   395  }