github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/pkg/sentry/mm/address_space.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package mm
    16  
    17  import (
    18  	"fmt"
    19  
    20  	"github.com/nicocha30/gvisor-ligolo/pkg/context"
    21  	"github.com/nicocha30/gvisor-ligolo/pkg/hostarch"
    22  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/platform"
    23  )
    24  
    25  // AddressSpace returns the platform.AddressSpace bound to mm.
    26  //
    27  // Preconditions: The caller must have called mm.Activate().
    28  func (mm *MemoryManager) AddressSpace() platform.AddressSpace {
    29  	if mm.active.Load() == 0 {
    30  		panic("trying to use inactive address space?")
    31  	}
    32  	return mm.as
    33  }
    34  
    35  // Activate ensures this MemoryManager has a platform.AddressSpace.
    36  //
    37  // The caller must not hold any locks when calling Activate.
    38  //
    39  // When this MemoryManager is no longer needed by a task, it should call
    40  // Deactivate to release the reference.
    41  func (mm *MemoryManager) Activate(ctx context.Context) error {
    42  	// Fast path: the MemoryManager already has an active
    43  	// platform.AddressSpace, and we just need to indicate that we need it too.
    44  	for {
    45  		active := mm.active.Load()
    46  		if active == 0 {
    47  			// Fall back to the slow path.
    48  			break
    49  		}
    50  		if mm.active.CompareAndSwap(active, active+1) {
    51  			return nil
    52  		}
    53  	}
    54  
    55  	for {
    56  		// Slow path: may need to synchronize with other goroutines changing
    57  		// mm.active to or from zero.
    58  		mm.activeMu.Lock()
    59  		// Inline Unlock instead of using a defer for performance since this
    60  		// method is commonly in the hot-path.
    61  
    62  		// Check if we raced with another goroutine performing activation.
    63  		if mm.active.Load() > 0 {
    64  			// This can't race; Deactivate can't decrease mm.active from 1 to 0
    65  			// without holding activeMu.
    66  			mm.active.Add(1)
    67  			mm.activeMu.Unlock()
    68  			return nil
    69  		}
    70  
    71  		// Do we have a context? If so, then we never unmapped it. This can
    72  		// only be the case if !mm.p.CooperativelySchedulesAddressSpace().
    73  		if mm.as != nil {
    74  			mm.active.Store(1)
    75  			mm.activeMu.Unlock()
    76  			return nil
    77  		}
    78  
    79  		// Get a new address space. We must force unmapping by passing nil to
    80  		// NewAddressSpace if requested. (As in the nil interface object, not a
    81  		// typed nil.)
    82  		mappingsID := (any)(mm)
    83  		if mm.unmapAllOnActivate {
    84  			mappingsID = nil
    85  		}
    86  		as, c, err := mm.p.NewAddressSpace(mappingsID)
    87  		if err != nil {
    88  			mm.activeMu.Unlock()
    89  			return err
    90  		}
    91  		if as == nil {
    92  			// AddressSpace is unavailable, we must wait.
    93  			//
    94  			// activeMu must not be held while waiting, as the user of the address
    95  			// space we are waiting on may attempt to take activeMu.
    96  			mm.activeMu.Unlock()
    97  
    98  			sleep := mm.p.CooperativelySchedulesAddressSpace() && mm.sleepForActivation
    99  			if sleep {
   100  				// Mark this task sleeping while waiting for the address space to
   101  				// prevent the watchdog from reporting it as a stuck task.
   102  				ctx.UninterruptibleSleepStart(false)
   103  			}
   104  			<-c
   105  			if sleep {
   106  				ctx.UninterruptibleSleepFinish(false)
   107  			}
   108  			continue
   109  		}
   110  
   111  		// Okay, we could restore all mappings at this point.
   112  		// But forget that. Let's just let them fault in.
   113  		mm.as = as
   114  
   115  		// Unmapping is done, if necessary.
   116  		mm.unmapAllOnActivate = false
   117  
   118  		// Now that m.as has been assigned, we can set m.active to a non-zero value
   119  		// to enable the fast path.
   120  		mm.active.Store(1)
   121  
   122  		mm.activeMu.Unlock()
   123  		return nil
   124  	}
   125  }
   126  
   127  // Deactivate releases a reference to the MemoryManager.
   128  func (mm *MemoryManager) Deactivate() {
   129  	// Fast path: this is not the last goroutine to deactivate the
   130  	// MemoryManager.
   131  	for {
   132  		active := mm.active.Load()
   133  		if active == 1 {
   134  			// Fall back to the slow path.
   135  			break
   136  		}
   137  		if mm.active.CompareAndSwap(active, active-1) {
   138  			return
   139  		}
   140  	}
   141  
   142  	mm.activeMu.Lock()
   143  	// Same as Activate.
   144  
   145  	// Still active?
   146  	if mm.active.Add(-1) > 0 {
   147  		mm.activeMu.Unlock()
   148  		return
   149  	}
   150  
   151  	// Can we hold on to the address space?
   152  	if !mm.p.CooperativelySchedulesAddressSpace() {
   153  		mm.activeMu.Unlock()
   154  		return
   155  	}
   156  
   157  	// Release the address space.
   158  	mm.as.Release()
   159  
   160  	// Lost it.
   161  	mm.as = nil
   162  	mm.activeMu.Unlock()
   163  }
   164  
   165  // mapASLocked maps addresses in ar into mm.as. If precommit is true, mappings
   166  // for all addresses in ar should be precommitted.
   167  //
   168  // Preconditions:
   169  //   - mm.activeMu must be locked.
   170  //   - mm.as != nil.
   171  //   - ar.Length() != 0.
   172  //   - ar must be page-aligned.
   173  //   - pseg == mm.pmas.LowerBoundSegment(ar.Start).
   174  func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar hostarch.AddrRange, precommit bool) error {
   175  	// By default, map entire pmas at a time, under the assumption that there
   176  	// is no cost to mapping more of a pma than necessary.
   177  	mapAR := hostarch.AddrRange{0, ^hostarch.Addr(hostarch.PageSize - 1)}
   178  	if precommit {
   179  		// When explicitly precommitting, only map ar, since overmapping may
   180  		// incur unexpected resource usage.
   181  		mapAR = ar
   182  	} else if mapUnit := mm.p.MapUnit(); mapUnit != 0 {
   183  		// Limit the range we map to ar, aligned to mapUnit.
   184  		mapMask := hostarch.Addr(mapUnit - 1)
   185  		mapAR.Start = ar.Start &^ mapMask
   186  		// If rounding ar.End up overflows, just keep the existing mapAR.End.
   187  		if end := (ar.End + mapMask) &^ mapMask; end >= ar.End {
   188  			mapAR.End = end
   189  		}
   190  	}
   191  	if checkInvariants {
   192  		if !mapAR.IsSupersetOf(ar) {
   193  			panic(fmt.Sprintf("mapAR %#v is not a superset of ar %#v", mapAR, ar))
   194  		}
   195  	}
   196  
   197  	// Since this checks ar.End and not mapAR.End, we will never map a pma that
   198  	// is not required.
   199  	for pseg.Ok() && pseg.Start() < ar.End {
   200  		pma := pseg.ValuePtr()
   201  		pmaAR := pseg.Range()
   202  		pmaMapAR := pmaAR.Intersect(mapAR)
   203  		perms := pma.effectivePerms
   204  		if pma.needCOW {
   205  			perms.Write = false
   206  		}
   207  		if perms.Any() { // MapFile precondition
   208  			if err := mm.as.MapFile(pmaMapAR.Start, pma.file, pseg.fileRangeOf(pmaMapAR), perms, precommit); err != nil {
   209  				return err
   210  			}
   211  		}
   212  		pseg = pseg.NextSegment()
   213  	}
   214  	return nil
   215  }
   216  
   217  // unmapASLocked removes all AddressSpace mappings for addresses in ar.
   218  //
   219  // Preconditions: mm.activeMu must be locked.
   220  func (mm *MemoryManager) unmapASLocked(ar hostarch.AddrRange) {
   221  	if mm.as == nil {
   222  		// No AddressSpace? Force all mappings to be unmapped on the next
   223  		// Activate.
   224  		mm.unmapAllOnActivate = true
   225  		return
   226  	}
   227  
   228  	// unmapASLocked doesn't require vmas or pmas to exist for ar, so it can be
   229  	// passed ranges that include addresses that can't be mapped by the
   230  	// application.
   231  	ar = ar.Intersect(mm.applicationAddrRange())
   232  
   233  	// Note that this AddressSpace may or may not be active. If the
   234  	// platform does not require cooperative sharing of AddressSpaces, they
   235  	// are retained between Deactivate/Activate calls. Despite not being
   236  	// active, it is still valid to perform operations on these address
   237  	// spaces.
   238  	mm.as.Unmap(ar.Start, uint64(ar.Length()))
   239  }