github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/sentry/mm/address_space.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package mm
    16  
    17  import (
    18  	"fmt"
    19  	"sync/atomic"
    20  
    21  	"github.com/SagerNet/gvisor/pkg/context"
    22  	"github.com/SagerNet/gvisor/pkg/hostarch"
    23  	"github.com/SagerNet/gvisor/pkg/sentry/platform"
    24  )
    25  
    26  // AddressSpace returns the platform.AddressSpace bound to mm.
    27  //
    28  // Preconditions: The caller must have called mm.Activate().
    29  func (mm *MemoryManager) AddressSpace() platform.AddressSpace {
    30  	if atomic.LoadInt32(&mm.active) == 0 {
    31  		panic("trying to use inactive address space?")
    32  	}
    33  	return mm.as
    34  }
    35  
    36  // Activate ensures this MemoryManager has a platform.AddressSpace.
    37  //
    38  // The caller must not hold any locks when calling Activate.
    39  //
    40  // When this MemoryManager is no longer needed by a task, it should call
    41  // Deactivate to release the reference.
    42  func (mm *MemoryManager) Activate(ctx context.Context) error {
    43  	// Fast path: the MemoryManager already has an active
    44  	// platform.AddressSpace, and we just need to indicate that we need it too.
    45  	for {
    46  		active := atomic.LoadInt32(&mm.active)
    47  		if active == 0 {
    48  			// Fall back to the slow path.
    49  			break
    50  		}
    51  		if atomic.CompareAndSwapInt32(&mm.active, active, active+1) {
    52  			return nil
    53  		}
    54  	}
    55  
    56  	for {
    57  		// Slow path: may need to synchronize with other goroutines changing
    58  		// mm.active to or from zero.
    59  		mm.activeMu.Lock()
    60  		// Inline Unlock instead of using a defer for performance since this
    61  		// method is commonly in the hot-path.
    62  
    63  		// Check if we raced with another goroutine performing activation.
    64  		if atomic.LoadInt32(&mm.active) > 0 {
    65  			// This can't race; Deactivate can't decrease mm.active from 1 to 0
    66  			// without holding activeMu.
    67  			atomic.AddInt32(&mm.active, 1)
    68  			mm.activeMu.Unlock()
    69  			return nil
    70  		}
    71  
    72  		// Do we have a context? If so, then we never unmapped it. This can
    73  		// only be the case if !mm.p.CooperativelySchedulesAddressSpace().
    74  		if mm.as != nil {
    75  			atomic.StoreInt32(&mm.active, 1)
    76  			mm.activeMu.Unlock()
    77  			return nil
    78  		}
    79  
    80  		// Get a new address space. We must force unmapping by passing nil to
    81  		// NewAddressSpace if requested. (As in the nil interface object, not a
    82  		// typed nil.)
    83  		mappingsID := (interface{})(mm)
    84  		if mm.unmapAllOnActivate {
    85  			mappingsID = nil
    86  		}
    87  		as, c, err := mm.p.NewAddressSpace(mappingsID)
    88  		if err != nil {
    89  			mm.activeMu.Unlock()
    90  			return err
    91  		}
    92  		if as == nil {
    93  			// AddressSpace is unavailable, we must wait.
    94  			//
    95  			// activeMu must not be held while waiting, as the user of the address
    96  			// space we are waiting on may attempt to take activeMu.
    97  			mm.activeMu.Unlock()
    98  
    99  			sleep := mm.p.CooperativelySchedulesAddressSpace() && mm.sleepForActivation
   100  			if sleep {
   101  				// Mark this task sleeping while waiting for the address space to
   102  				// prevent the watchdog from reporting it as a stuck task.
   103  				ctx.UninterruptibleSleepStart(false)
   104  			}
   105  			<-c
   106  			if sleep {
   107  				ctx.UninterruptibleSleepFinish(false)
   108  			}
   109  			continue
   110  		}
   111  
   112  		// Okay, we could restore all mappings at this point.
   113  		// But forget that. Let's just let them fault in.
   114  		mm.as = as
   115  
   116  		// Unmapping is done, if necessary.
   117  		mm.unmapAllOnActivate = false
   118  
   119  		// Now that m.as has been assigned, we can set m.active to a non-zero value
   120  		// to enable the fast path.
   121  		atomic.StoreInt32(&mm.active, 1)
   122  
   123  		mm.activeMu.Unlock()
   124  		return nil
   125  	}
   126  }
   127  
   128  // Deactivate releases a reference to the MemoryManager.
   129  func (mm *MemoryManager) Deactivate() {
   130  	// Fast path: this is not the last goroutine to deactivate the
   131  	// MemoryManager.
   132  	for {
   133  		active := atomic.LoadInt32(&mm.active)
   134  		if active == 1 {
   135  			// Fall back to the slow path.
   136  			break
   137  		}
   138  		if atomic.CompareAndSwapInt32(&mm.active, active, active-1) {
   139  			return
   140  		}
   141  	}
   142  
   143  	mm.activeMu.Lock()
   144  	// Same as Activate.
   145  
   146  	// Still active?
   147  	if atomic.AddInt32(&mm.active, -1) > 0 {
   148  		mm.activeMu.Unlock()
   149  		return
   150  	}
   151  
   152  	// Can we hold on to the address space?
   153  	if !mm.p.CooperativelySchedulesAddressSpace() {
   154  		mm.activeMu.Unlock()
   155  		return
   156  	}
   157  
   158  	// Release the address space.
   159  	mm.as.Release()
   160  
   161  	// Lost it.
   162  	mm.as = nil
   163  	mm.activeMu.Unlock()
   164  }
   165  
   166  // mapASLocked maps addresses in ar into mm.as. If precommit is true, mappings
   167  // for all addresses in ar should be precommitted.
   168  //
   169  // Preconditions:
   170  // * mm.activeMu must be locked.
   171  // * mm.as != nil.
   172  // * ar.Length() != 0.
   173  // * ar must be page-aligned.
   174  // * pseg == mm.pmas.LowerBoundSegment(ar.Start).
   175  func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar hostarch.AddrRange, precommit bool) error {
   176  	// By default, map entire pmas at a time, under the assumption that there
   177  	// is no cost to mapping more of a pma than necessary.
   178  	mapAR := hostarch.AddrRange{0, ^hostarch.Addr(hostarch.PageSize - 1)}
   179  	if precommit {
   180  		// When explicitly precommitting, only map ar, since overmapping may
   181  		// incur unexpected resource usage.
   182  		mapAR = ar
   183  	} else if mapUnit := mm.p.MapUnit(); mapUnit != 0 {
   184  		// Limit the range we map to ar, aligned to mapUnit.
   185  		mapMask := hostarch.Addr(mapUnit - 1)
   186  		mapAR.Start = ar.Start &^ mapMask
   187  		// If rounding ar.End up overflows, just keep the existing mapAR.End.
   188  		if end := (ar.End + mapMask) &^ mapMask; end >= ar.End {
   189  			mapAR.End = end
   190  		}
   191  	}
   192  	if checkInvariants {
   193  		if !mapAR.IsSupersetOf(ar) {
   194  			panic(fmt.Sprintf("mapAR %#v is not a superset of ar %#v", mapAR, ar))
   195  		}
   196  	}
   197  
   198  	// Since this checks ar.End and not mapAR.End, we will never map a pma that
   199  	// is not required.
   200  	for pseg.Ok() && pseg.Start() < ar.End {
   201  		pma := pseg.ValuePtr()
   202  		pmaAR := pseg.Range()
   203  		pmaMapAR := pmaAR.Intersect(mapAR)
   204  		perms := pma.effectivePerms
   205  		if pma.needCOW {
   206  			perms.Write = false
   207  		}
   208  		if perms.Any() { // MapFile precondition
   209  			if err := mm.as.MapFile(pmaMapAR.Start, pma.file, pseg.fileRangeOf(pmaMapAR), perms, precommit); err != nil {
   210  				return err
   211  			}
   212  		}
   213  		pseg = pseg.NextSegment()
   214  	}
   215  	return nil
   216  }
   217  
   218  // unmapASLocked removes all AddressSpace mappings for addresses in ar.
   219  //
   220  // Preconditions: mm.activeMu must be locked.
   221  func (mm *MemoryManager) unmapASLocked(ar hostarch.AddrRange) {
   222  	if mm.as == nil {
   223  		// No AddressSpace? Force all mappings to be unmapped on the next
   224  		// Activate.
   225  		mm.unmapAllOnActivate = true
   226  		return
   227  	}
   228  
   229  	// unmapASLocked doesn't require vmas or pmas to exist for ar, so it can be
   230  	// passed ranges that include addresses that can't be mapped by the
   231  	// application.
   232  	ar = ar.Intersect(mm.applicationAddrRange())
   233  
   234  	// Note that this AddressSpace may or may not be active. If the
   235  	// platform does not require cooperative sharing of AddressSpaces, they
   236  	// are retained between Deactivate/Activate calls. Despite not being
   237  	// active, it is still valid to perform operations on these address
   238  	// spaces.
   239  	mm.as.Unmap(ar.Start, uint64(ar.Length()))
   240  }