github.com/metacubex/gvisor@v0.0.0-20240320004321-933faba989ec/pkg/sentry/mm/address_space.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package mm
    16  
    17  import (
    18  	"fmt"
    19  
    20  	"github.com/metacubex/gvisor/pkg/context"
    21  	"github.com/metacubex/gvisor/pkg/hostarch"
    22  	"github.com/metacubex/gvisor/pkg/sentry/memmap"
    23  	"github.com/metacubex/gvisor/pkg/sentry/platform"
    24  )
    25  
    26  // AddressSpace returns the platform.AddressSpace bound to mm.
    27  //
    28  // Preconditions: The caller must have called mm.Activate().
    29  func (mm *MemoryManager) AddressSpace() platform.AddressSpace {
    30  	if mm.active.Load() == 0 {
    31  		panic("trying to use inactive address space?")
    32  	}
    33  	return mm.as
    34  }
    35  
    36  // Activate ensures this MemoryManager has a platform.AddressSpace.
    37  //
    38  // The caller must not hold any locks when calling Activate.
    39  //
    40  // When this MemoryManager is no longer needed by a task, it should call
    41  // Deactivate to release the reference.
    42  func (mm *MemoryManager) Activate(ctx context.Context) error {
    43  	// Fast path: the MemoryManager already has an active
    44  	// platform.AddressSpace, and we just need to indicate that we need it too.
    45  	for {
    46  		active := mm.active.Load()
    47  		if active == 0 {
    48  			// Fall back to the slow path.
    49  			break
    50  		}
    51  		if mm.active.CompareAndSwap(active, active+1) {
    52  			return nil
    53  		}
    54  	}
    55  
    56  	for {
    57  		// Slow path: may need to synchronize with other goroutines changing
    58  		// mm.active to or from zero.
    59  		mm.activeMu.Lock()
    60  		// Inline Unlock instead of using a defer for performance since this
    61  		// method is commonly in the hot-path.
    62  
    63  		// Check if we raced with another goroutine performing activation.
    64  		if mm.active.Load() > 0 {
    65  			// This can't race; Deactivate can't decrease mm.active from 1 to 0
    66  			// without holding activeMu.
    67  			mm.active.Add(1)
    68  			mm.activeMu.Unlock()
    69  			return nil
    70  		}
    71  
    72  		// Do we have a context? If so, then we never unmapped it. This can
    73  		// only be the case if !mm.p.CooperativelySchedulesAddressSpace().
    74  		if mm.as != nil {
    75  			mm.active.Store(1)
    76  			mm.activeMu.Unlock()
    77  			return nil
    78  		}
    79  
    80  		// Get a new address space. We must force unmapping by passing nil to
    81  		// NewAddressSpace if requested. (As in the nil interface object, not a
    82  		// typed nil.)
    83  		mappingsID := (any)(mm)
    84  		if mm.unmapAllOnActivate {
    85  			mappingsID = nil
    86  		}
    87  		as, c, err := mm.p.NewAddressSpace(mappingsID)
    88  		if err != nil {
    89  			mm.activeMu.Unlock()
    90  			return err
    91  		}
    92  		if as == nil {
    93  			// AddressSpace is unavailable, we must wait.
    94  			//
    95  			// activeMu must not be held while waiting, as the user of the address
    96  			// space we are waiting on may attempt to take activeMu.
    97  			mm.activeMu.Unlock()
    98  
    99  			sleep := mm.p.CooperativelySchedulesAddressSpace() && mm.sleepForActivation
   100  			if sleep {
   101  				// Mark this task sleeping while waiting for the address space to
   102  				// prevent the watchdog from reporting it as a stuck task.
   103  				ctx.UninterruptibleSleepStart(false)
   104  			}
   105  			<-c
   106  			if sleep {
   107  				ctx.UninterruptibleSleepFinish(false)
   108  			}
   109  			continue
   110  		}
   111  
   112  		// Okay, we could restore all mappings at this point.
   113  		// But forget that. Let's just let them fault in.
   114  		mm.as = as
   115  
   116  		// Unmapping is done, if necessary.
   117  		mm.unmapAllOnActivate = false
   118  
   119  		// Now that m.as has been assigned, we can set m.active to a non-zero value
   120  		// to enable the fast path.
   121  		mm.active.Store(1)
   122  
   123  		mm.activeMu.Unlock()
   124  		return nil
   125  	}
   126  }
   127  
   128  // Deactivate releases a reference to the MemoryManager.
   129  func (mm *MemoryManager) Deactivate() {
   130  	// Fast path: this is not the last goroutine to deactivate the
   131  	// MemoryManager.
   132  	for {
   133  		active := mm.active.Load()
   134  		if active == 1 {
   135  			// Fall back to the slow path.
   136  			break
   137  		}
   138  		if mm.active.CompareAndSwap(active, active-1) {
   139  			return
   140  		}
   141  	}
   142  
   143  	mm.activeMu.Lock()
   144  	// Same as Activate.
   145  
   146  	// Still active?
   147  	if mm.active.Add(-1) > 0 {
   148  		mm.activeMu.Unlock()
   149  		return
   150  	}
   151  
   152  	// Can we hold on to the address space?
   153  	if !mm.p.CooperativelySchedulesAddressSpace() {
   154  		mm.activeMu.Unlock()
   155  		return
   156  	}
   157  
   158  	// Release the address space.
   159  	mm.as.Release()
   160  
   161  	// Lost it.
   162  	mm.as = nil
   163  	mm.activeMu.Unlock()
   164  }
   165  
   166  // mapASLocked maps addresses in ar into mm.as.
   167  //
   168  // Preconditions:
   169  //   - mm.activeMu must be locked.
   170  //   - mm.as != nil.
   171  //   - ar.Length() != 0.
   172  //   - ar must be page-aligned.
   173  //   - pseg == mm.pmas.LowerBoundSegment(ar.Start).
   174  func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar hostarch.AddrRange, platformEffect memmap.MMapPlatformEffect) error {
   175  	// By default, map entire pmas at a time, under the assumption that there
   176  	// is no cost to mapping more of a pma than necessary.
   177  	mapAR := hostarch.AddrRange{0, ^hostarch.Addr(hostarch.PageSize - 1)}
   178  	if platformEffect != memmap.PlatformEffectDefault {
   179  		// When explicitly committing, only map ar, since overmapping may incur
   180  		// unexpected resource usage. When explicitly populating, do the same
   181  		// since an underlying device file may be sensitive to the mapped
   182  		// range.
   183  		mapAR = ar
   184  	} else if mapUnit := mm.p.MapUnit(); mapUnit != 0 {
   185  		// Limit the range we map to ar, aligned to mapUnit.
   186  		mapMask := hostarch.Addr(mapUnit - 1)
   187  		mapAR.Start = ar.Start &^ mapMask
   188  		// If rounding ar.End up overflows, just keep the existing mapAR.End.
   189  		if end := (ar.End + mapMask) &^ mapMask; end >= ar.End {
   190  			mapAR.End = end
   191  		}
   192  	}
   193  	if checkInvariants {
   194  		if !mapAR.IsSupersetOf(ar) {
   195  			panic(fmt.Sprintf("mapAR %#v is not a superset of ar %#v", mapAR, ar))
   196  		}
   197  	}
   198  
   199  	// Since this checks ar.End and not mapAR.End, we will never map a pma that
   200  	// is not required.
   201  	for pseg.Ok() && pseg.Start() < ar.End {
   202  		pma := pseg.ValuePtr()
   203  		pmaAR := pseg.Range()
   204  		pmaMapAR := pmaAR.Intersect(mapAR)
   205  		perms := pma.effectivePerms
   206  		if pma.needCOW {
   207  			perms.Write = false
   208  		}
   209  		if perms.Any() { // MapFile precondition
   210  			if err := mm.as.MapFile(pmaMapAR.Start, pma.file, pseg.fileRangeOf(pmaMapAR), perms, platformEffect == memmap.PlatformEffectCommit); err != nil {
   211  				return err
   212  			}
   213  		}
   214  		pseg = pseg.NextSegment()
   215  	}
   216  	return nil
   217  }
   218  
   219  // unmapASLocked removes all AddressSpace mappings for addresses in ar.
   220  //
   221  // Preconditions: mm.activeMu must be locked.
   222  func (mm *MemoryManager) unmapASLocked(ar hostarch.AddrRange) {
   223  	if mm.as == nil {
   224  		// No AddressSpace? Force all mappings to be unmapped on the next
   225  		// Activate.
   226  		mm.unmapAllOnActivate = true
   227  		return
   228  	}
   229  
   230  	// unmapASLocked doesn't require vmas or pmas to exist for ar, so it can be
   231  	// passed ranges that include addresses that can't be mapped by the
   232  	// application.
   233  	ar = ar.Intersect(mm.applicationAddrRange())
   234  
   235  	// Note that this AddressSpace may or may not be active. If the
   236  	// platform does not require cooperative sharing of AddressSpaces, they
   237  	// are retained between Deactivate/Activate calls. Despite not being
   238  	// active, it is still valid to perform operations on these address
   239  	// spaces.
   240  	mm.as.Unmap(ar.Start, uint64(ar.Length()))
   241  }