github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/sentry/mm/io.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package mm
    16  
    17  import (
    18  	"github.com/SagerNet/gvisor/pkg/context"
    19  	"github.com/SagerNet/gvisor/pkg/hostarch"
    20  	"github.com/SagerNet/gvisor/pkg/safemem"
    21  	"github.com/SagerNet/gvisor/pkg/sentry/platform"
    22  	"github.com/SagerNet/gvisor/pkg/syserror"
    23  	"github.com/SagerNet/gvisor/pkg/usermem"
    24  )
    25  
    26  // There are two supported ways to copy data to/from application virtual
    27  // memory:
    28  //
    29  // 1. Internally-mapped copying: Determine the platform.File that backs the
    30  // copied-to/from virtual address, obtain a mapping of its pages, and read or
    31  // write to the mapping.
    32  //
    33  // 2. AddressSpace copying: If platform.Platform.SupportsAddressSpaceIO() is
    34  // true, AddressSpace permissions are applicable, and an AddressSpace is
    35  // available, copy directly through the AddressSpace, handling faults as
    36  // needed.
    37  //
    38  // (Given that internally-mapped copying requires that backing memory is always
    39  // implemented using a host file descriptor, we could also preadv/pwritev to it
    40  // instead. But this would incur a host syscall for each use of the mapped
    41  // page, whereas mmap is a one-time cost.)
    42  //
    43  // The fixed overhead of internally-mapped copying is expected to be higher
    44  // than that of AddressSpace copying since the former always needs to translate
    45  // addresses, whereas the latter only needs to do so when faults occur.
    46  // However, the throughput of internally-mapped copying is expected to be
    47  // somewhat higher than that of AddressSpace copying due to the high cost of
    48  // page faults and because implementations of the latter usually rely on
    49  // safecopy, which doesn't use AVX registers. So we prefer to use AddressSpace
    50  // copying (when available) for smaller copies, and switch to internally-mapped
    51  // copying once a size threshold is exceeded.
    52  const (
    53  	// copyMapMinBytes is the size threshold for switching to internally-mapped
    54  	// copying in CopyOut, CopyIn, and ZeroOut.
    55  	copyMapMinBytes = 32 << 10 // 32 KB
    56  
    57  	// rwMapMinBytes is the size threshold for switching to internally-mapped
    58  	// copying in CopyOutFrom and CopyInTo. It's lower than copyMapMinBytes
    59  	// since AddressSpace copying in this case requires additional buffering;
    60  	// see CopyOutFrom for details.
    61  	rwMapMinBytes = 512
    62  )
    63  
    64  // CheckIORange is similar to hostarch.Addr.ToRange, but applies bounds checks
    65  // consistent with Linux's arch/x86/include/asm/uaccess.h:access_ok().
    66  //
    67  // Preconditions: length >= 0.
    68  func (mm *MemoryManager) CheckIORange(addr hostarch.Addr, length int64) (hostarch.AddrRange, bool) {
    69  	// Note that access_ok() constrains end even if length == 0.
    70  	ar, ok := addr.ToRange(uint64(length))
    71  	return ar, (ok && ar.End <= mm.layout.MaxAddr)
    72  }
    73  
    74  // checkIOVec applies bound checks consistent with Linux's
    75  // arch/x86/include/asm/uaccess.h:access_ok() to ars.
    76  func (mm *MemoryManager) checkIOVec(ars hostarch.AddrRangeSeq) bool {
    77  	for !ars.IsEmpty() {
    78  		ar := ars.Head()
    79  		if _, ok := mm.CheckIORange(ar.Start, int64(ar.Length())); !ok {
    80  			return false
    81  		}
    82  		ars = ars.Tail()
    83  	}
    84  	return true
    85  }
    86  
    87  func (mm *MemoryManager) asioEnabled(opts usermem.IOOpts) bool {
    88  	return mm.haveASIO && !opts.IgnorePermissions && opts.AddressSpaceActive
    89  }
    90  
    91  // translateIOError converts errors to EFAULT, as is usually reported for all
    92  // I/O errors originating from MM in Linux.
    93  func translateIOError(ctx context.Context, err error) error {
    94  	if err == nil {
    95  		return nil
    96  	}
    97  	if logIOErrors {
    98  		ctx.Debugf("MM I/O error: %v", err)
    99  	}
   100  	return syserror.EFAULT
   101  }
   102  
   103  // CopyOut implements usermem.IO.CopyOut.
   104  func (mm *MemoryManager) CopyOut(ctx context.Context, addr hostarch.Addr, src []byte, opts usermem.IOOpts) (int, error) {
   105  	ar, ok := mm.CheckIORange(addr, int64(len(src)))
   106  	if !ok {
   107  		return 0, syserror.EFAULT
   108  	}
   109  
   110  	if len(src) == 0 {
   111  		return 0, nil
   112  	}
   113  
   114  	// Do AddressSpace IO if applicable.
   115  	if mm.asioEnabled(opts) && len(src) < copyMapMinBytes {
   116  		return mm.asCopyOut(ctx, addr, src)
   117  	}
   118  
   119  	// Go through internal mappings.
   120  	n64, err := mm.withInternalMappings(ctx, ar, hostarch.Write, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
   121  		n, err := safemem.CopySeq(ims, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(src)))
   122  		return n, translateIOError(ctx, err)
   123  	})
   124  	return int(n64), err
   125  }
   126  
   127  func (mm *MemoryManager) asCopyOut(ctx context.Context, addr hostarch.Addr, src []byte) (int, error) {
   128  	var done int
   129  	for {
   130  		n, err := mm.as.CopyOut(addr+hostarch.Addr(done), src[done:])
   131  		done += n
   132  		if err == nil {
   133  			return done, nil
   134  		}
   135  		if f, ok := err.(platform.SegmentationFault); ok {
   136  			ar, _ := addr.ToRange(uint64(len(src)))
   137  			if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Write); err != nil {
   138  				return done, err
   139  			}
   140  			continue
   141  		}
   142  		return done, translateIOError(ctx, err)
   143  	}
   144  }
   145  
   146  // CopyIn implements usermem.IO.CopyIn.
   147  func (mm *MemoryManager) CopyIn(ctx context.Context, addr hostarch.Addr, dst []byte, opts usermem.IOOpts) (int, error) {
   148  	ar, ok := mm.CheckIORange(addr, int64(len(dst)))
   149  	if !ok {
   150  		return 0, syserror.EFAULT
   151  	}
   152  
   153  	if len(dst) == 0 {
   154  		return 0, nil
   155  	}
   156  
   157  	// Do AddressSpace IO if applicable.
   158  	if mm.asioEnabled(opts) && len(dst) < copyMapMinBytes {
   159  		return mm.asCopyIn(ctx, addr, dst)
   160  	}
   161  
   162  	// Go through internal mappings.
   163  	n64, err := mm.withInternalMappings(ctx, ar, hostarch.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
   164  		n, err := safemem.CopySeq(safemem.BlockSeqOf(safemem.BlockFromSafeSlice(dst)), ims)
   165  		return n, translateIOError(ctx, err)
   166  	})
   167  	return int(n64), err
   168  }
   169  
   170  func (mm *MemoryManager) asCopyIn(ctx context.Context, addr hostarch.Addr, dst []byte) (int, error) {
   171  	var done int
   172  	for {
   173  		n, err := mm.as.CopyIn(addr+hostarch.Addr(done), dst[done:])
   174  		done += n
   175  		if err == nil {
   176  			return done, nil
   177  		}
   178  		if f, ok := err.(platform.SegmentationFault); ok {
   179  			ar, _ := addr.ToRange(uint64(len(dst)))
   180  			if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Read); err != nil {
   181  				return done, err
   182  			}
   183  			continue
   184  		}
   185  		return done, translateIOError(ctx, err)
   186  	}
   187  }
   188  
   189  // ZeroOut implements usermem.IO.ZeroOut.
   190  func (mm *MemoryManager) ZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64, opts usermem.IOOpts) (int64, error) {
   191  	ar, ok := mm.CheckIORange(addr, toZero)
   192  	if !ok {
   193  		return 0, syserror.EFAULT
   194  	}
   195  
   196  	if toZero == 0 {
   197  		return 0, nil
   198  	}
   199  
   200  	// Do AddressSpace IO if applicable.
   201  	if mm.asioEnabled(opts) && toZero < copyMapMinBytes {
   202  		return mm.asZeroOut(ctx, addr, toZero)
   203  	}
   204  
   205  	// Go through internal mappings.
   206  	return mm.withInternalMappings(ctx, ar, hostarch.Write, opts.IgnorePermissions, func(dsts safemem.BlockSeq) (uint64, error) {
   207  		n, err := safemem.ZeroSeq(dsts)
   208  		return n, translateIOError(ctx, err)
   209  	})
   210  }
   211  
   212  func (mm *MemoryManager) asZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64) (int64, error) {
   213  	var done int64
   214  	for {
   215  		n, err := mm.as.ZeroOut(addr+hostarch.Addr(done), uintptr(toZero-done))
   216  		done += int64(n)
   217  		if err == nil {
   218  			return done, nil
   219  		}
   220  		if f, ok := err.(platform.SegmentationFault); ok {
   221  			ar, _ := addr.ToRange(uint64(toZero))
   222  			if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Write); err != nil {
   223  				return done, err
   224  			}
   225  			continue
   226  		}
   227  		return done, translateIOError(ctx, err)
   228  	}
   229  }
   230  
   231  // CopyOutFrom implements usermem.IO.CopyOutFrom.
   232  func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars hostarch.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) {
   233  	if !mm.checkIOVec(ars) {
   234  		return 0, syserror.EFAULT
   235  	}
   236  
   237  	if ars.NumBytes() == 0 {
   238  		return 0, nil
   239  	}
   240  
   241  	// Do AddressSpace IO if applicable.
   242  	if mm.asioEnabled(opts) && ars.NumBytes() < rwMapMinBytes {
   243  		// We have to introduce a buffered copy, instead of just passing a
   244  		// safemem.BlockSeq representing addresses in the AddressSpace to src.
   245  		// This is because usermem.IO.CopyOutFrom() guarantees that it calls
   246  		// src.ReadToBlocks() at most once, which is incompatible with handling
   247  		// faults between calls. In the future, this is probably best resolved
   248  		// by introducing a CopyOutFrom variant or option that allows it to
   249  		// call src.ReadToBlocks() any number of times.
   250  		//
   251  		// This issue applies to CopyInTo as well.
   252  		buf := make([]byte, int(ars.NumBytes()))
   253  		bufN, bufErr := src.ReadToBlocks(safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf)))
   254  		var done int64
   255  		for done < int64(bufN) {
   256  			ar := ars.Head()
   257  			cplen := int64(ar.Length())
   258  			if cplen > int64(bufN)-done {
   259  				cplen = int64(bufN) - done
   260  			}
   261  			n, err := mm.asCopyOut(ctx, ar.Start, buf[int(done):int(done+cplen)])
   262  			done += int64(n)
   263  			if err != nil {
   264  				return done, err
   265  			}
   266  			ars = ars.Tail()
   267  		}
   268  		// Do not convert errors returned by src to EFAULT.
   269  		return done, bufErr
   270  	}
   271  
   272  	// Go through internal mappings.
   273  	return mm.withVecInternalMappings(ctx, ars, hostarch.Write, opts.IgnorePermissions, src.ReadToBlocks)
   274  }
   275  
   276  // CopyInTo implements usermem.IO.CopyInTo.
   277  func (mm *MemoryManager) CopyInTo(ctx context.Context, ars hostarch.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) {
   278  	if !mm.checkIOVec(ars) {
   279  		return 0, syserror.EFAULT
   280  	}
   281  
   282  	if ars.NumBytes() == 0 {
   283  		return 0, nil
   284  	}
   285  
   286  	// Do AddressSpace IO if applicable.
   287  	if mm.asioEnabled(opts) && ars.NumBytes() < rwMapMinBytes {
   288  		buf := make([]byte, int(ars.NumBytes()))
   289  		var done int
   290  		var bufErr error
   291  		for !ars.IsEmpty() {
   292  			ar := ars.Head()
   293  			var n int
   294  			n, bufErr = mm.asCopyIn(ctx, ar.Start, buf[done:done+int(ar.Length())])
   295  			done += n
   296  			if bufErr != nil {
   297  				break
   298  			}
   299  			ars = ars.Tail()
   300  		}
   301  		n, err := dst.WriteFromBlocks(safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf[:done])))
   302  		if err != nil {
   303  			return int64(n), err
   304  		}
   305  		// Do not convert errors returned by dst to EFAULT.
   306  		return int64(n), bufErr
   307  	}
   308  
   309  	// Go through internal mappings.
   310  	return mm.withVecInternalMappings(ctx, ars, hostarch.Read, opts.IgnorePermissions, dst.WriteFromBlocks)
   311  }
   312  
   313  // SwapUint32 implements usermem.IO.SwapUint32.
   314  func (mm *MemoryManager) SwapUint32(ctx context.Context, addr hostarch.Addr, new uint32, opts usermem.IOOpts) (uint32, error) {
   315  	ar, ok := mm.CheckIORange(addr, 4)
   316  	if !ok {
   317  		return 0, syserror.EFAULT
   318  	}
   319  
   320  	// Do AddressSpace IO if applicable.
   321  	if mm.haveASIO && opts.AddressSpaceActive && !opts.IgnorePermissions {
   322  		for {
   323  			old, err := mm.as.SwapUint32(addr, new)
   324  			if err == nil {
   325  				return old, nil
   326  			}
   327  			if f, ok := err.(platform.SegmentationFault); ok {
   328  				if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.ReadWrite); err != nil {
   329  					return 0, err
   330  				}
   331  				continue
   332  			}
   333  			return 0, translateIOError(ctx, err)
   334  		}
   335  	}
   336  
   337  	// Go through internal mappings.
   338  	var old uint32
   339  	_, err := mm.withInternalMappings(ctx, ar, hostarch.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
   340  		if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
   341  			// Atomicity is unachievable across mappings.
   342  			return 0, syserror.EFAULT
   343  		}
   344  		im := ims.Head()
   345  		var err error
   346  		old, err = safemem.SwapUint32(im, new)
   347  		if err != nil {
   348  			return 0, translateIOError(ctx, err)
   349  		}
   350  		// Return the number of bytes read.
   351  		return 4, nil
   352  	})
   353  	return old, err
   354  }
   355  
   356  // CompareAndSwapUint32 implements usermem.IO.CompareAndSwapUint32.
   357  func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr hostarch.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) {
   358  	ar, ok := mm.CheckIORange(addr, 4)
   359  	if !ok {
   360  		return 0, syserror.EFAULT
   361  	}
   362  
   363  	// Do AddressSpace IO if applicable.
   364  	if mm.haveASIO && opts.AddressSpaceActive && !opts.IgnorePermissions {
   365  		for {
   366  			prev, err := mm.as.CompareAndSwapUint32(addr, old, new)
   367  			if err == nil {
   368  				return prev, nil
   369  			}
   370  			if f, ok := err.(platform.SegmentationFault); ok {
   371  				if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.ReadWrite); err != nil {
   372  					return 0, err
   373  				}
   374  				continue
   375  			}
   376  			return 0, translateIOError(ctx, err)
   377  		}
   378  	}
   379  
   380  	// Go through internal mappings.
   381  	var prev uint32
   382  	_, err := mm.withInternalMappings(ctx, ar, hostarch.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
   383  		if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
   384  			// Atomicity is unachievable across mappings.
   385  			return 0, syserror.EFAULT
   386  		}
   387  		im := ims.Head()
   388  		var err error
   389  		prev, err = safemem.CompareAndSwapUint32(im, old, new)
   390  		if err != nil {
   391  			return 0, translateIOError(ctx, err)
   392  		}
   393  		// Return the number of bytes read.
   394  		return 4, nil
   395  	})
   396  	return prev, err
   397  }
   398  
   399  // LoadUint32 implements usermem.IO.LoadUint32.
   400  func (mm *MemoryManager) LoadUint32(ctx context.Context, addr hostarch.Addr, opts usermem.IOOpts) (uint32, error) {
   401  	ar, ok := mm.CheckIORange(addr, 4)
   402  	if !ok {
   403  		return 0, syserror.EFAULT
   404  	}
   405  
   406  	// Do AddressSpace IO if applicable.
   407  	if mm.haveASIO && opts.AddressSpaceActive && !opts.IgnorePermissions {
   408  		for {
   409  			val, err := mm.as.LoadUint32(addr)
   410  			if err == nil {
   411  				return val, nil
   412  			}
   413  			if f, ok := err.(platform.SegmentationFault); ok {
   414  				if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Read); err != nil {
   415  					return 0, err
   416  				}
   417  				continue
   418  			}
   419  			return 0, translateIOError(ctx, err)
   420  		}
   421  	}
   422  
   423  	// Go through internal mappings.
   424  	var val uint32
   425  	_, err := mm.withInternalMappings(ctx, ar, hostarch.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
   426  		if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
   427  			// Atomicity is unachievable across mappings.
   428  			return 0, syserror.EFAULT
   429  		}
   430  		im := ims.Head()
   431  		var err error
   432  		val, err = safemem.LoadUint32(im)
   433  		if err != nil {
   434  			return 0, translateIOError(ctx, err)
   435  		}
   436  		// Return the number of bytes read.
   437  		return 4, nil
   438  	})
   439  	return val, err
   440  }
   441  
   442  // handleASIOFault handles a page fault at address addr for an AddressSpaceIO
   443  // operation spanning ioar.
   444  //
   445  // Preconditions:
   446  // * mm.as != nil.
   447  // * ioar.Length() != 0.
   448  // * ioar.Contains(addr).
   449  func (mm *MemoryManager) handleASIOFault(ctx context.Context, addr hostarch.Addr, ioar hostarch.AddrRange, at hostarch.AccessType) error {
   450  	// Try to map all remaining pages in the I/O operation. This RoundUp can't
   451  	// overflow because otherwise it would have been caught by CheckIORange.
   452  	end, _ := ioar.End.RoundUp()
   453  	ar := hostarch.AddrRange{addr.RoundDown(), end}
   454  
   455  	// Don't bother trying existingPMAsLocked; in most cases, if we did have
   456  	// existing pmas, we wouldn't have faulted.
   457  
   458  	// Ensure that we have usable vmas. Here and below, only return early if we
   459  	// can't map the first (faulting) page; failure to map later pages are
   460  	// silently ignored. This maximizes partial success.
   461  	mm.mappingMu.RLock()
   462  	vseg, vend, err := mm.getVMAsLocked(ctx, ar, at, false)
   463  	if vendaddr := vend.Start(); vendaddr < ar.End {
   464  		if vendaddr <= ar.Start {
   465  			mm.mappingMu.RUnlock()
   466  			return translateIOError(ctx, err)
   467  		}
   468  		ar.End = vendaddr
   469  	}
   470  
   471  	// Ensure that we have usable pmas.
   472  	mm.activeMu.Lock()
   473  	pseg, pend, err := mm.getPMAsLocked(ctx, vseg, ar, at)
   474  	mm.mappingMu.RUnlock()
   475  	if pendaddr := pend.Start(); pendaddr < ar.End {
   476  		if pendaddr <= ar.Start {
   477  			mm.activeMu.Unlock()
   478  			return translateIOError(ctx, err)
   479  		}
   480  		ar.End = pendaddr
   481  	}
   482  
   483  	// Downgrade to a read-lock on activeMu since we don't need to mutate pmas
   484  	// anymore.
   485  	mm.activeMu.DowngradeLock()
   486  
   487  	err = mm.mapASLocked(pseg, ar, false)
   488  	mm.activeMu.RUnlock()
   489  	return translateIOError(ctx, err)
   490  }
   491  
   492  // withInternalMappings ensures that pmas exist for all addresses in ar,
   493  // support access of type (at, ignorePermissions), and have internal mappings
   494  // cached. It then calls f with mm.activeMu locked for reading, passing
   495  // internal mappings for the subrange of ar for which this property holds.
   496  //
   497  // withInternalMappings takes a function returning uint64 since many safemem
   498  // functions have this property, but returns an int64 since this is usually
   499  // more useful for usermem.IO methods.
   500  //
   501  // Preconditions: 0 < ar.Length() <= math.MaxInt64.
   502  func (mm *MemoryManager) withInternalMappings(ctx context.Context, ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) {
   503  	// If pmas are already available, we can do IO without touching mm.vmas or
   504  	// mm.mappingMu.
   505  	mm.activeMu.RLock()
   506  	if pseg := mm.existingPMAsLocked(ar, at, ignorePermissions, true /* needInternalMappings */); pseg.Ok() {
   507  		n, err := f(mm.internalMappingsLocked(pseg, ar))
   508  		mm.activeMu.RUnlock()
   509  		// Do not convert errors returned by f to EFAULT.
   510  		return int64(n), err
   511  	}
   512  	mm.activeMu.RUnlock()
   513  
   514  	// Ensure that we have usable vmas.
   515  	mm.mappingMu.RLock()
   516  	vseg, vend, verr := mm.getVMAsLocked(ctx, ar, at, ignorePermissions)
   517  	if vendaddr := vend.Start(); vendaddr < ar.End {
   518  		if vendaddr <= ar.Start {
   519  			mm.mappingMu.RUnlock()
   520  			return 0, translateIOError(ctx, verr)
   521  		}
   522  		ar.End = vendaddr
   523  	}
   524  
   525  	// Ensure that we have usable pmas.
   526  	mm.activeMu.Lock()
   527  	pseg, pend, perr := mm.getPMAsLocked(ctx, vseg, ar, at)
   528  	mm.mappingMu.RUnlock()
   529  	if pendaddr := pend.Start(); pendaddr < ar.End {
   530  		if pendaddr <= ar.Start {
   531  			mm.activeMu.Unlock()
   532  			return 0, translateIOError(ctx, perr)
   533  		}
   534  		ar.End = pendaddr
   535  	}
   536  	imend, imerr := mm.getPMAInternalMappingsLocked(pseg, ar)
   537  	mm.activeMu.DowngradeLock()
   538  	if imendaddr := imend.Start(); imendaddr < ar.End {
   539  		if imendaddr <= ar.Start {
   540  			mm.activeMu.RUnlock()
   541  			return 0, translateIOError(ctx, imerr)
   542  		}
   543  		ar.End = imendaddr
   544  	}
   545  
   546  	// Do I/O.
   547  	un, err := f(mm.internalMappingsLocked(pseg, ar))
   548  	mm.activeMu.RUnlock()
   549  	n := int64(un)
   550  
   551  	// Return the first error in order of progress through ar.
   552  	if err != nil {
   553  		// Do not convert errors returned by f to EFAULT.
   554  		return n, err
   555  	}
   556  	if imerr != nil {
   557  		return n, translateIOError(ctx, imerr)
   558  	}
   559  	if perr != nil {
   560  		return n, translateIOError(ctx, perr)
   561  	}
   562  	return n, translateIOError(ctx, verr)
   563  }
   564  
   565  // withVecInternalMappings ensures that pmas exist for all addresses in ars,
   566  // support access of type (at, ignorePermissions), and have internal mappings
   567  // cached. It then calls f with mm.activeMu locked for reading, passing
   568  // internal mappings for the subset of ars for which this property holds.
   569  //
   570  // Preconditions: !ars.IsEmpty().
   571  func (mm *MemoryManager) withVecInternalMappings(ctx context.Context, ars hostarch.AddrRangeSeq, at hostarch.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) {
   572  	// withInternalMappings is faster than withVecInternalMappings because of
   573  	// iterator plumbing (this isn't generally practical in the vector case due
   574  	// to iterator invalidation between AddrRanges). Use it if possible.
   575  	if ars.NumRanges() == 1 {
   576  		return mm.withInternalMappings(ctx, ars.Head(), at, ignorePermissions, f)
   577  	}
   578  
   579  	// If pmas are already available, we can do IO without touching mm.vmas or
   580  	// mm.mappingMu.
   581  	mm.activeMu.RLock()
   582  	if mm.existingVecPMAsLocked(ars, at, ignorePermissions, true /* needInternalMappings */) {
   583  		n, err := f(mm.vecInternalMappingsLocked(ars))
   584  		mm.activeMu.RUnlock()
   585  		// Do not convert errors returned by f to EFAULT.
   586  		return int64(n), err
   587  	}
   588  	mm.activeMu.RUnlock()
   589  
   590  	// Ensure that we have usable vmas.
   591  	mm.mappingMu.RLock()
   592  	vars, verr := mm.getVecVMAsLocked(ctx, ars, at, ignorePermissions)
   593  	if vars.NumBytes() == 0 {
   594  		mm.mappingMu.RUnlock()
   595  		return 0, translateIOError(ctx, verr)
   596  	}
   597  
   598  	// Ensure that we have usable pmas.
   599  	mm.activeMu.Lock()
   600  	pars, perr := mm.getVecPMAsLocked(ctx, vars, at)
   601  	mm.mappingMu.RUnlock()
   602  	if pars.NumBytes() == 0 {
   603  		mm.activeMu.Unlock()
   604  		return 0, translateIOError(ctx, perr)
   605  	}
   606  	imars, imerr := mm.getVecPMAInternalMappingsLocked(pars)
   607  	mm.activeMu.DowngradeLock()
   608  	if imars.NumBytes() == 0 {
   609  		mm.activeMu.RUnlock()
   610  		return 0, translateIOError(ctx, imerr)
   611  	}
   612  
   613  	// Do I/O.
   614  	un, err := f(mm.vecInternalMappingsLocked(imars))
   615  	mm.activeMu.RUnlock()
   616  	n := int64(un)
   617  
   618  	// Return the first error in order of progress through ars.
   619  	if err != nil {
   620  		// Do not convert errors from f to EFAULT.
   621  		return n, err
   622  	}
   623  	if imerr != nil {
   624  		return n, translateIOError(ctx, imerr)
   625  	}
   626  	if perr != nil {
   627  		return n, translateIOError(ctx, perr)
   628  	}
   629  	return n, translateIOError(ctx, verr)
   630  }
   631  
   632  // truncatedAddrRangeSeq returns a copy of ars, but with the end truncated to
   633  // at most address end on AddrRange arsit.Head(). It is used in vector I/O paths to
   634  // truncate hostarch.AddrRangeSeq when errors occur.
   635  //
   636  // Preconditions:
   637  // * !arsit.IsEmpty().
   638  // * end <= arsit.Head().End.
   639  func truncatedAddrRangeSeq(ars, arsit hostarch.AddrRangeSeq, end hostarch.Addr) hostarch.AddrRangeSeq {
   640  	ar := arsit.Head()
   641  	if end <= ar.Start {
   642  		return ars.TakeFirst64(ars.NumBytes() - arsit.NumBytes())
   643  	}
   644  	return ars.TakeFirst64(ars.NumBytes() - arsit.NumBytes() + int64(end-ar.Start))
   645  }