github.com/metacubex/gvisor@v0.0.0-20240320004321-933faba989ec/pkg/sentry/mm/vma.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package mm
    16  
    17  import (
    18  	"fmt"
    19  	"sync/atomic"
    20  
    21  	"github.com/metacubex/gvisor/pkg/abi/linux"
    22  	"github.com/metacubex/gvisor/pkg/context"
    23  	"github.com/metacubex/gvisor/pkg/errors/linuxerr"
    24  	"github.com/metacubex/gvisor/pkg/hostarch"
    25  	"github.com/metacubex/gvisor/pkg/sentry/arch"
    26  	"github.com/metacubex/gvisor/pkg/sentry/kernel/auth"
    27  	"github.com/metacubex/gvisor/pkg/sentry/limits"
    28  	"github.com/metacubex/gvisor/pkg/sentry/memmap"
    29  )
    30  
    31  // Caller provides the droppedIDs slice to collect dropped mapping
    32  // identities. The caller must drop the references on these identities outside a
    33  // mm.mappingMu critical section. droppedIDs has append-like semantics, multiple
    34  // calls to functions that drop mapping identities within a scope should reuse
    35  // the same slice.
    36  //
    37  // Preconditions:
    38  //   - mm.mappingMu must be locked for writing.
    39  //   - opts must be valid as defined by the checks in MMap.
    40  func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOpts, droppedIDs []memmap.MappingIdentity) (vmaIterator, hostarch.AddrRange, []memmap.MappingIdentity, error) {
    41  	if opts.MaxPerms != opts.MaxPerms.Effective() {
    42  		panic(fmt.Sprintf("Non-effective MaxPerms %s cannot be enforced", opts.MaxPerms))
    43  	}
    44  
    45  	// Find a usable range.
    46  	addr, err := mm.findAvailableLocked(opts.Length, findAvailableOpts{
    47  		Addr:     opts.Addr,
    48  		Fixed:    opts.Fixed,
    49  		Unmap:    opts.Unmap,
    50  		Map32Bit: opts.Map32Bit,
    51  	})
    52  	if err != nil {
    53  		// Can't force without opts.Unmap and opts.Fixed.
    54  		if opts.Force && opts.Unmap && opts.Fixed {
    55  			addr = opts.Addr
    56  		} else {
    57  			return vmaIterator{}, hostarch.AddrRange{}, droppedIDs, err
    58  		}
    59  	}
    60  	ar, _ := addr.ToRange(opts.Length)
    61  
    62  	// Check against RLIMIT_AS.
    63  	newUsageAS := mm.usageAS + opts.Length
    64  	if opts.Unmap {
    65  		newUsageAS -= uint64(mm.vmas.SpanRange(ar))
    66  	}
    67  	if limitAS := limits.FromContext(ctx).Get(limits.AS).Cur; newUsageAS > limitAS {
    68  		return vmaIterator{}, hostarch.AddrRange{}, droppedIDs, linuxerr.ENOMEM
    69  	}
    70  
    71  	if opts.MLockMode != memmap.MLockNone {
    72  		// Check against RLIMIT_MEMLOCK.
    73  		if creds := auth.CredentialsFromContext(ctx); !creds.HasCapabilityIn(linux.CAP_IPC_LOCK, creds.UserNamespace.Root()) {
    74  			mlockLimit := limits.FromContext(ctx).Get(limits.MemoryLocked).Cur
    75  			if mlockLimit == 0 {
    76  				return vmaIterator{}, hostarch.AddrRange{}, droppedIDs, linuxerr.EPERM
    77  			}
    78  			newLockedAS := mm.lockedAS + opts.Length
    79  			if opts.Unmap {
    80  				newLockedAS -= mm.mlockedBytesRangeLocked(ar)
    81  			}
    82  			if newLockedAS > mlockLimit {
    83  				return vmaIterator{}, hostarch.AddrRange{}, droppedIDs, linuxerr.EAGAIN
    84  			}
    85  		}
    86  	}
    87  
    88  	// Remove overwritten mappings. This ordering is consistent with Linux:
    89  	// compare Linux's mm/mmap.c:mmap_region() => do_munmap(),
    90  	// file->f_op->mmap().
    91  	var vgap vmaGapIterator
    92  	if opts.Unmap {
    93  		vgap, droppedIDs = mm.unmapLocked(ctx, ar, droppedIDs)
    94  	} else {
    95  		vgap = mm.vmas.FindGap(ar.Start)
    96  	}
    97  
    98  	// Inform the Mappable, if any, of the new mapping.
    99  	if opts.Mappable != nil {
   100  		// The expression for writable is vma.canWriteMappableLocked(), but we
   101  		// don't yet have a vma.
   102  		if err := opts.Mappable.AddMapping(ctx, mm, ar, opts.Offset, !opts.Private && opts.MaxPerms.Write); err != nil {
   103  			return vmaIterator{}, hostarch.AddrRange{}, droppedIDs, err
   104  		}
   105  	}
   106  
   107  	// Take a reference on opts.MappingIdentity before inserting the vma since
   108  	// vma merging can drop the reference.
   109  	if opts.MappingIdentity != nil {
   110  		opts.MappingIdentity.IncRef()
   111  	}
   112  
   113  	// Finally insert the vma.
   114  	v := vma{
   115  		mappable:       opts.Mappable,
   116  		off:            opts.Offset,
   117  		realPerms:      opts.Perms,
   118  		effectivePerms: opts.Perms.Effective(),
   119  		maxPerms:       opts.MaxPerms,
   120  		private:        opts.Private,
   121  		growsDown:      opts.GrowsDown,
   122  		mlockMode:      opts.MLockMode,
   123  		numaPolicy:     linux.MPOL_DEFAULT,
   124  		id:             opts.MappingIdentity,
   125  		hint:           opts.Hint,
   126  	}
   127  
   128  	vseg := mm.vmas.Insert(vgap, ar, v)
   129  	mm.usageAS += opts.Length
   130  	if v.isPrivateDataLocked() {
   131  		mm.dataAS += opts.Length
   132  	}
   133  	if opts.MLockMode != memmap.MLockNone {
   134  		mm.lockedAS += opts.Length
   135  	}
   136  
   137  	return vseg, ar, droppedIDs, nil
   138  }
   139  
   140  type findAvailableOpts struct {
   141  	// These fields are equivalent to those in memmap.MMapOpts, except that:
   142  	//
   143  	//	- Addr must be page-aligned.
   144  	//
   145  	//	- Unmap allows existing guard pages in the returned range.
   146  
   147  	Addr     hostarch.Addr
   148  	Fixed    bool
   149  	Unmap    bool
   150  	Map32Bit bool
   151  }
   152  
   153  // map32Start/End are the bounds to which MAP_32BIT mappings are constrained,
   154  // and are equivalent to Linux's MAP32_BASE and MAP32_MAX respectively.
   155  const (
   156  	map32Start = 0x40000000
   157  	map32End   = 0x80000000
   158  )
   159  
   160  // findAvailableLocked finds an allocatable range.
   161  //
   162  // Preconditions: mm.mappingMu must be locked.
   163  func (mm *MemoryManager) findAvailableLocked(length uint64, opts findAvailableOpts) (hostarch.Addr, error) {
   164  	if opts.Fixed {
   165  		opts.Map32Bit = false
   166  	}
   167  	allowedAR := mm.applicationAddrRange()
   168  	if opts.Map32Bit {
   169  		allowedAR = allowedAR.Intersect(hostarch.AddrRange{map32Start, map32End})
   170  	}
   171  
   172  	// Does the provided suggestion work?
   173  	if ar, ok := opts.Addr.ToRange(length); ok {
   174  		if allowedAR.IsSupersetOf(ar) {
   175  			if opts.Unmap {
   176  				return ar.Start, nil
   177  			}
   178  			// Check for the presence of an existing vma or guard page.
   179  			if vgap := mm.vmas.FindGap(ar.Start); vgap.Ok() && vgap.availableRange().IsSupersetOf(ar) {
   180  				return ar.Start, nil
   181  			}
   182  		}
   183  	}
   184  
   185  	// Fixed mappings accept only the requested address.
   186  	if opts.Fixed {
   187  		return 0, linuxerr.ENOMEM
   188  	}
   189  
   190  	// Prefer hugepage alignment if a hugepage or more is requested.
   191  	alignment := uint64(hostarch.PageSize)
   192  	if length >= hostarch.HugePageSize {
   193  		alignment = hostarch.HugePageSize
   194  	}
   195  
   196  	if opts.Map32Bit {
   197  		return mm.findLowestAvailableLocked(length, alignment, allowedAR)
   198  	}
   199  	if mm.layout.DefaultDirection == arch.MmapBottomUp {
   200  		return mm.findLowestAvailableLocked(length, alignment, hostarch.AddrRange{mm.layout.BottomUpBase, mm.layout.MaxAddr})
   201  	}
   202  	return mm.findHighestAvailableLocked(length, alignment, hostarch.AddrRange{mm.layout.MinAddr, mm.layout.TopDownBase})
   203  }
   204  
   205  func (mm *MemoryManager) applicationAddrRange() hostarch.AddrRange {
   206  	return hostarch.AddrRange{mm.layout.MinAddr, mm.layout.MaxAddr}
   207  }
   208  
   209  // Preconditions: mm.mappingMu must be locked.
   210  func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bounds hostarch.AddrRange) (hostarch.Addr, error) {
   211  	for gap := mm.vmas.LowerBoundGap(bounds.Start); gap.Ok() && gap.Start() < bounds.End; gap = gap.NextLargeEnoughGap(hostarch.Addr(length)) {
   212  		if gr := gap.availableRange().Intersect(bounds); uint64(gr.Length()) >= length {
   213  			// Can we shift up to match the alignment?
   214  			if offset := uint64(gr.Start) % alignment; offset != 0 {
   215  				if uint64(gr.Length()) >= length+alignment-offset {
   216  					// Yes, we're aligned.
   217  					return gr.Start + hostarch.Addr(alignment-offset), nil
   218  				}
   219  			}
   220  
   221  			// Either aligned perfectly, or can't align it.
   222  			return gr.Start, nil
   223  		}
   224  	}
   225  	return 0, linuxerr.ENOMEM
   226  }
   227  
   228  // Preconditions: mm.mappingMu must be locked.
   229  func (mm *MemoryManager) findHighestAvailableLocked(length, alignment uint64, bounds hostarch.AddrRange) (hostarch.Addr, error) {
   230  	for gap := mm.vmas.UpperBoundGap(bounds.End); gap.Ok() && gap.End() > bounds.Start; gap = gap.PrevLargeEnoughGap(hostarch.Addr(length)) {
   231  		if gr := gap.availableRange().Intersect(bounds); uint64(gr.Length()) >= length {
   232  			// Can we shift down to match the alignment?
   233  			start := gr.End - hostarch.Addr(length)
   234  			if offset := uint64(start) % alignment; offset != 0 {
   235  				if gr.Start <= start-hostarch.Addr(offset) {
   236  					// Yes, we're aligned.
   237  					return start - hostarch.Addr(offset), nil
   238  				}
   239  			}
   240  
   241  			// Either aligned perfectly, or can't align it.
   242  			return start, nil
   243  		}
   244  	}
   245  	return 0, linuxerr.ENOMEM
   246  }
   247  
   248  // Preconditions: mm.mappingMu must be locked.
   249  func (mm *MemoryManager) mlockedBytesRangeLocked(ar hostarch.AddrRange) uint64 {
   250  	var total uint64
   251  	for vseg := mm.vmas.LowerBoundSegment(ar.Start); vseg.Ok() && vseg.Start() < ar.End; vseg = vseg.NextSegment() {
   252  		if vseg.ValuePtr().mlockMode != memmap.MLockNone {
   253  			total += uint64(vseg.Range().Intersect(ar).Length())
   254  		}
   255  	}
   256  	return total
   257  }
   258  
   259  // getVMAsLocked ensures that vmas exist for all addresses in ar, and support
   260  // access of type (at, ignorePermissions). It returns:
   261  //
   262  //   - An iterator to the vma containing ar.Start. If no vma contains ar.Start,
   263  //     the iterator is unspecified.
   264  //
   265  //   - An iterator to the gap after the last vma containing an address in ar. If
   266  //     vmas exist for no addresses in ar, the iterator is to a gap that begins
   267  //     before ar.Start.
   268  //
   269  //   - An error that is non-nil if vmas exist for only a subset of ar.
   270  //
   271  // Preconditions:
   272  //   - mm.mappingMu must be locked for reading; it may be temporarily unlocked.
   273  //   - ar.Length() != 0.
   274  func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool) (vmaIterator, vmaGapIterator, error) {
   275  	if checkInvariants {
   276  		if !ar.WellFormed() || ar.Length() == 0 {
   277  			panic(fmt.Sprintf("invalid ar: %v", ar))
   278  		}
   279  	}
   280  
   281  	// Inline mm.vmas.LowerBoundSegment so that we have the preceding gap if
   282  	// !vbegin.Ok().
   283  	vbegin, vgap := mm.vmas.Find(ar.Start)
   284  	if !vbegin.Ok() {
   285  		vbegin = vgap.NextSegment()
   286  		// vseg.Ok() is checked before entering the following loop.
   287  	} else {
   288  		vgap = vbegin.PrevGap()
   289  	}
   290  
   291  	addr := ar.Start
   292  	vseg := vbegin
   293  	for vseg.Ok() {
   294  		// Loop invariants: vgap = vseg.PrevGap(); addr < vseg.End().
   295  		vma := vseg.ValuePtr()
   296  		if addr < vseg.Start() {
   297  			// TODO(jamieliu): Implement vma.growsDown here.
   298  			return vbegin, vgap, linuxerr.EFAULT
   299  		}
   300  
   301  		perms := vma.effectivePerms
   302  		if ignorePermissions {
   303  			perms = vma.maxPerms
   304  		}
   305  		if !perms.SupersetOf(at) {
   306  			return vbegin, vgap, linuxerr.EPERM
   307  		}
   308  
   309  		addr = vseg.End()
   310  		vgap = vseg.NextGap()
   311  		if addr >= ar.End {
   312  			return vbegin, vgap, nil
   313  		}
   314  		vseg = vgap.NextSegment()
   315  	}
   316  
   317  	// Ran out of vmas before ar.End.
   318  	return vbegin, vgap, linuxerr.EFAULT
   319  }
   320  
   321  // getVecVMAsLocked ensures that vmas exist for all addresses in ars, and
   322  // support access to type of (at, ignorePermissions). It returns the subset of
   323  // ars for which vmas exist. If this is not equal to ars, it returns a non-nil
   324  // error explaining why.
   325  //
   326  // Preconditions: mm.mappingMu must be locked for reading; it may be
   327  // temporarily unlocked.
   328  //
   329  // Postconditions: ars is not mutated.
   330  func (mm *MemoryManager) getVecVMAsLocked(ctx context.Context, ars hostarch.AddrRangeSeq, at hostarch.AccessType, ignorePermissions bool) (hostarch.AddrRangeSeq, error) {
   331  	for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() {
   332  		ar := arsit.Head()
   333  		if ar.Length() == 0 {
   334  			continue
   335  		}
   336  		if _, vend, err := mm.getVMAsLocked(ctx, ar, at, ignorePermissions); err != nil {
   337  			return truncatedAddrRangeSeq(ars, arsit, vend.Start()), err
   338  		}
   339  	}
   340  	return ars, nil
   341  }
   342  
   343  // vma extension will not shrink the number of unmapped bytes between the start
   344  // of a growsDown vma and the end of its predecessor non-growsDown vma below
   345  // guardBytes.
   346  //
   347  // guardBytes is equivalent to Linux's stack_guard_gap after upstream
   348  // 1be7107fbe18 "mm: larger stack guard gap, between vmas".
   349  const guardBytes = 256 * hostarch.PageSize
   350  
   351  // unmapLocked unmaps all addresses in ar and returns the resulting gap in
   352  // mm.vmas.
   353  //
   354  // Caller provides the droppedIDs slice to collect dropped mapping
   355  // identities. The caller must drop the references on these identities outside a
   356  // mm.mappingMu critical section. droppedIDs has append-like semantics, multiple
   357  // calls to functions that drop mapping identities within a scope should reuse
   358  // the same slice.
   359  //
   360  // Preconditions:
   361  //   - mm.mappingMu must be locked for writing.
   362  //   - ar.Length() != 0.
   363  //   - ar must be page-aligned.
   364  func (mm *MemoryManager) unmapLocked(ctx context.Context, ar hostarch.AddrRange, droppedIDs []memmap.MappingIdentity) (vmaGapIterator, []memmap.MappingIdentity) {
   365  	if checkInvariants {
   366  		if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
   367  			panic(fmt.Sprintf("invalid ar: %v", ar))
   368  		}
   369  	}
   370  
   371  	// AddressSpace mappings and pmas must be invalidated before
   372  	// mm.removeVMAsLocked() => memmap.Mappable.RemoveMapping().
   373  	mm.Invalidate(ar, memmap.InvalidateOpts{InvalidatePrivate: true})
   374  	return mm.removeVMAsLocked(ctx, ar, droppedIDs)
   375  }
   376  
   377  // removeVMAsLocked removes vmas for addresses in ar and returns the
   378  // resulting gap in mm.vmas.
   379  //
   380  // Caller provides the droppedIDs slice to collect dropped mapping
   381  // identities. The caller must drop the references on these identities outside a
   382  // mm.mappingMu critical section. droppedIDs has append-like semantics, multiple
   383  // calls to functions that drop mapping identities within a scope should reuse
   384  // the same slice.
   385  //
   386  // Preconditions:
   387  //   - mm.mappingMu must be locked for writing.
   388  //   - ar.Length() != 0.
   389  //   - ar must be page-aligned.
   390  func (mm *MemoryManager) removeVMAsLocked(ctx context.Context, ar hostarch.AddrRange, droppedIDs []memmap.MappingIdentity) (vmaGapIterator, []memmap.MappingIdentity) {
   391  	if checkInvariants {
   392  		if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
   393  			panic(fmt.Sprintf("invalid ar: %v", ar))
   394  		}
   395  	}
   396  	vseg, vgap := mm.vmas.Find(ar.Start)
   397  	if vgap.Ok() {
   398  		vseg = vgap.NextSegment()
   399  	}
   400  	for vseg.Ok() && vseg.Start() < ar.End {
   401  		vseg = mm.vmas.Isolate(vseg, ar)
   402  		vmaAR := vseg.Range()
   403  		vma := vseg.ValuePtr()
   404  		if vma.mappable != nil {
   405  			vma.mappable.RemoveMapping(ctx, mm, vmaAR, vma.off, vma.canWriteMappableLocked())
   406  		}
   407  		if vma.id != nil {
   408  			droppedIDs = append(droppedIDs, vma.id)
   409  		}
   410  		mm.usageAS -= uint64(vmaAR.Length())
   411  		if vma.isPrivateDataLocked() {
   412  			mm.dataAS -= uint64(vmaAR.Length())
   413  		}
   414  		if vma.mlockMode != memmap.MLockNone {
   415  			mm.lockedAS -= uint64(vmaAR.Length())
   416  		}
   417  		vgap = mm.vmas.Remove(vseg)
   418  		vseg = vgap.NextSegment()
   419  	}
   420  	return vgap, droppedIDs
   421  }
   422  
   423  // canWriteMappableLocked returns true if it is possible for vma.mappable to be
   424  // written to via this vma, i.e. if it is possible that
   425  // vma.mappable.Translate(at.Write=true) may be called as a result of this vma.
   426  // This includes via I/O with usermem.IOOpts.IgnorePermissions = true, such as
   427  // PTRACE_POKEDATA.
   428  //
   429  // canWriteMappableLocked is equivalent to Linux's VM_SHARED.
   430  //
   431  // Preconditions: mm.mappingMu must be locked.
   432  func (v *vma) canWriteMappableLocked() bool {
   433  	return !v.private && v.maxPerms.Write
   434  }
   435  
   436  // isPrivateDataLocked identify the data segments - private, writable, not stack
   437  //
   438  // Preconditions: mm.mappingMu must be locked.
   439  func (v *vma) isPrivateDataLocked() bool {
   440  	return v.realPerms.Write && v.private && !v.growsDown
   441  }
   442  
   443  // vmaSetFunctions implements segment.Functions for vmaSet.
   444  type vmaSetFunctions struct{}
   445  
   446  func (vmaSetFunctions) MinKey() hostarch.Addr {
   447  	return 0
   448  }
   449  
   450  func (vmaSetFunctions) MaxKey() hostarch.Addr {
   451  	return ^hostarch.Addr(0)
   452  }
   453  
   454  func (vmaSetFunctions) ClearValue(vma *vma) {
   455  	vma.mappable = nil
   456  	vma.id = nil
   457  	vma.hint = ""
   458  	atomic.StoreUintptr(&vma.lastFault, 0)
   459  }
   460  
   461  func (vmaSetFunctions) Merge(ar1 hostarch.AddrRange, vma1 vma, ar2 hostarch.AddrRange, vma2 vma) (vma, bool) {
   462  	if vma1.mappable != vma2.mappable ||
   463  		(vma1.mappable != nil && vma1.off+uint64(ar1.Length()) != vma2.off) ||
   464  		vma1.realPerms != vma2.realPerms ||
   465  		vma1.maxPerms != vma2.maxPerms ||
   466  		vma1.private != vma2.private ||
   467  		vma1.growsDown != vma2.growsDown ||
   468  		vma1.mlockMode != vma2.mlockMode ||
   469  		vma1.numaPolicy != vma2.numaPolicy ||
   470  		vma1.numaNodemask != vma2.numaNodemask ||
   471  		vma1.dontfork != vma2.dontfork ||
   472  		vma1.id != vma2.id ||
   473  		vma1.hint != vma2.hint {
   474  		return vma{}, false
   475  	}
   476  
   477  	if vma2.id != nil {
   478  		// This DecRef() will never be the final ref, since the vma1 is
   479  		// currently holding a ref to the same mapping identity. Thus, we don't
   480  		// need to worry about whether we're in a mm.mappingMu critical section.
   481  		vma2.id.DecRef(context.Background())
   482  	}
   483  	return vma1, true
   484  }
   485  
   486  func (vmaSetFunctions) Split(ar hostarch.AddrRange, v vma, split hostarch.Addr) (vma, vma) {
   487  	v2 := v
   488  	if v2.mappable != nil {
   489  		v2.off += uint64(split - ar.Start)
   490  	}
   491  	if v2.id != nil {
   492  		v2.id.IncRef()
   493  	}
   494  	return v, v2
   495  }
   496  
   497  // Preconditions:
   498  //   - vseg.ValuePtr().mappable != nil.
   499  //   - vseg.Range().Contains(addr).
   500  func (vseg vmaIterator) mappableOffsetAt(addr hostarch.Addr) uint64 {
   501  	if checkInvariants {
   502  		if !vseg.Ok() {
   503  			panic("terminal vma iterator")
   504  		}
   505  		if vseg.ValuePtr().mappable == nil {
   506  			panic("Mappable offset is meaningless for anonymous vma")
   507  		}
   508  		if !vseg.Range().Contains(addr) {
   509  			panic(fmt.Sprintf("addr %v out of bounds %v", addr, vseg.Range()))
   510  		}
   511  	}
   512  
   513  	vma := vseg.ValuePtr()
   514  	vstart := vseg.Start()
   515  	return vma.off + uint64(addr-vstart)
   516  }
   517  
   518  // Preconditions: vseg.ValuePtr().mappable != nil.
   519  func (vseg vmaIterator) mappableRange() memmap.MappableRange {
   520  	return vseg.mappableRangeOf(vseg.Range())
   521  }
   522  
   523  // Preconditions:
   524  //   - vseg.ValuePtr().mappable != nil.
   525  //   - vseg.Range().IsSupersetOf(ar).
   526  //   - ar.Length() != 0.
   527  func (vseg vmaIterator) mappableRangeOf(ar hostarch.AddrRange) memmap.MappableRange {
   528  	if checkInvariants {
   529  		if !vseg.Ok() {
   530  			panic("terminal vma iterator")
   531  		}
   532  		if vseg.ValuePtr().mappable == nil {
   533  			panic("MappableRange is meaningless for anonymous vma")
   534  		}
   535  		if !ar.WellFormed() || ar.Length() == 0 {
   536  			panic(fmt.Sprintf("invalid ar: %v", ar))
   537  		}
   538  		if !vseg.Range().IsSupersetOf(ar) {
   539  			panic(fmt.Sprintf("ar %v out of bounds %v", ar, vseg.Range()))
   540  		}
   541  	}
   542  
   543  	vma := vseg.ValuePtr()
   544  	vstart := vseg.Start()
   545  	return memmap.MappableRange{vma.off + uint64(ar.Start-vstart), vma.off + uint64(ar.End-vstart)}
   546  }
   547  
   548  // Preconditions:
   549  //   - vseg.ValuePtr().mappable != nil.
   550  //   - vseg.mappableRange().IsSupersetOf(mr).
   551  //   - mr.Length() != 0.
   552  func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) hostarch.AddrRange {
   553  	if checkInvariants {
   554  		if !vseg.Ok() {
   555  			panic("terminal vma iterator")
   556  		}
   557  		if vseg.ValuePtr().mappable == nil {
   558  			panic("MappableRange is meaningless for anonymous vma")
   559  		}
   560  		if !mr.WellFormed() || mr.Length() == 0 {
   561  			panic(fmt.Sprintf("invalid mr: %v", mr))
   562  		}
   563  		if !vseg.mappableRange().IsSupersetOf(mr) {
   564  			panic(fmt.Sprintf("mr %v out of bounds %v", mr, vseg.mappableRange()))
   565  		}
   566  	}
   567  
   568  	vma := vseg.ValuePtr()
   569  	vstart := vseg.Start()
   570  	return hostarch.AddrRange{vstart + hostarch.Addr(mr.Start-vma.off), vstart + hostarch.Addr(mr.End-vma.off)}
   571  }
   572  
   573  // seekNextLowerBound returns mm.vmas.LowerBoundSegment(addr), but does so by
   574  // scanning linearly forward from vseg.
   575  //
   576  // Preconditions:
   577  //   - mm.mappingMu must be locked.
   578  //   - addr >= vseg.Start().
   579  func (vseg vmaIterator) seekNextLowerBound(addr hostarch.Addr) vmaIterator {
   580  	if checkInvariants {
   581  		if !vseg.Ok() {
   582  			panic("terminal vma iterator")
   583  		}
   584  		if addr < vseg.Start() {
   585  			panic(fmt.Sprintf("can't seek forward to %#x from %#x", addr, vseg.Start()))
   586  		}
   587  	}
   588  	for vseg.Ok() && addr >= vseg.End() {
   589  		vseg = vseg.NextSegment()
   590  	}
   591  	return vseg
   592  }
   593  
   594  // availableRange returns the subset of vgap.Range() in which new vmas may be
   595  // created without MMapOpts.Unmap == true.
   596  func (vgap vmaGapIterator) availableRange() hostarch.AddrRange {
   597  	ar := vgap.Range()
   598  	next := vgap.NextSegment()
   599  	if !next.Ok() || !next.ValuePtr().growsDown {
   600  		return ar
   601  	}
   602  	// Exclude guard pages.
   603  	if ar.Length() < guardBytes {
   604  		return hostarch.AddrRange{ar.Start, ar.Start}
   605  	}
   606  	ar.End -= guardBytes
   607  	return ar
   608  }