github.com/ttpreport/gvisor-ligolo@v0.0.0-20240123134145-a858404967ba/pkg/ring0/pagetables/walker_empty_arm64.go (about)

     1  //go:build arm64
     2  // +build arm64
     3  
     4  package pagetables
     5  
     6  // iterateRangeCanonical walks a canonical range.
     7  //
     8  //go:nosplit
     9  func (w *emptyWalker) iterateRangeCanonical(start, end uintptr) bool {
    10  	pgdEntryIndex := w.pageTables.root
    11  	if start >= upperBottom {
    12  		pgdEntryIndex = w.pageTables.archPageTables.root
    13  	}
    14  
    15  	for pgdIndex := (uint16((start & pgdMask) >> pgdShift)); start < end && pgdIndex < entriesPerPage; pgdIndex++ {
    16  		var (
    17  			pgdEntry   = &pgdEntryIndex[pgdIndex]
    18  			pudEntries *PTEs
    19  		)
    20  		if !pgdEntry.Valid() {
    21  			if !w.visitor.requiresAlloc() {
    22  
    23  				start = emptynext(start, pgdSize)
    24  				continue
    25  			}
    26  
    27  			pudEntries = w.pageTables.Allocator.NewPTEs()
    28  			pgdEntry.setPageTable(w.pageTables, pudEntries)
    29  		} else {
    30  			pudEntries = w.pageTables.Allocator.LookupPTEs(pgdEntry.Address())
    31  		}
    32  
    33  		clearPUDEntries := uint16(0)
    34  
    35  		for pudIndex := uint16((start & pudMask) >> pudShift); start < end && pudIndex < entriesPerPage; pudIndex++ {
    36  			var (
    37  				pudEntry   = &pudEntries[pudIndex]
    38  				pmdEntries *PTEs
    39  			)
    40  			if !pudEntry.Valid() {
    41  				if !w.visitor.requiresAlloc() {
    42  
    43  					clearPUDEntries++
    44  					start = emptynext(start, pudSize)
    45  					continue
    46  				}
    47  
    48  				if start&(pudSize-1) == 0 && end-start >= pudSize {
    49  					pudEntry.SetSect()
    50  					if !w.visitor.visit(uintptr(start), pudEntry, pudSize-1) {
    51  						return false
    52  					}
    53  					if pudEntry.Valid() {
    54  						start = emptynext(start, pudSize)
    55  						continue
    56  					}
    57  				}
    58  
    59  				pmdEntries = w.pageTables.Allocator.NewPTEs()
    60  				pudEntry.setPageTable(w.pageTables, pmdEntries)
    61  
    62  			} else if pudEntry.IsSect() {
    63  
    64  				if w.visitor.requiresSplit() && (start&(pudSize-1) != 0 || end < emptynext(start, pudSize)) {
    65  
    66  					pmdEntries = w.pageTables.Allocator.NewPTEs()
    67  					for index := uint16(0); index < entriesPerPage; index++ {
    68  						pmdEntries[index].SetSect()
    69  						pmdEntries[index].Set(
    70  							pudEntry.Address()+(pmdSize*uintptr(index)),
    71  							pudEntry.Opts())
    72  					}
    73  					pudEntry.setPageTable(w.pageTables, pmdEntries)
    74  				} else {
    75  
    76  					if !w.visitor.visit(uintptr(start), pudEntry, pudSize-1) {
    77  						return false
    78  					}
    79  
    80  					if !pudEntry.Valid() {
    81  						clearPUDEntries++
    82  					}
    83  
    84  					start = emptynext(start, pudSize)
    85  					continue
    86  				}
    87  
    88  			} else {
    89  				pmdEntries = w.pageTables.Allocator.LookupPTEs(pudEntry.Address())
    90  			}
    91  
    92  			clearPMDEntries := uint16(0)
    93  
    94  			for pmdIndex := uint16((start & pmdMask) >> pmdShift); start < end && pmdIndex < entriesPerPage; pmdIndex++ {
    95  				var (
    96  					pmdEntry   = &pmdEntries[pmdIndex]
    97  					pteEntries *PTEs
    98  				)
    99  				if !pmdEntry.Valid() {
   100  					if !w.visitor.requiresAlloc() {
   101  
   102  						clearPMDEntries++
   103  						start = emptynext(start, pmdSize)
   104  						continue
   105  					}
   106  
   107  					if start&(pmdSize-1) == 0 && end-start >= pmdSize {
   108  						pmdEntry.SetSect()
   109  						if !w.visitor.visit(uintptr(start), pmdEntry, pmdSize-1) {
   110  							return false
   111  						}
   112  						if pmdEntry.Valid() {
   113  							start = emptynext(start, pmdSize)
   114  							continue
   115  						}
   116  					}
   117  
   118  					pteEntries = w.pageTables.Allocator.NewPTEs()
   119  					pmdEntry.setPageTable(w.pageTables, pteEntries)
   120  
   121  				} else if pmdEntry.IsSect() {
   122  
   123  					if w.visitor.requiresSplit() && (start&(pmdSize-1) != 0 || end < emptynext(start, pmdSize)) {
   124  
   125  						pteEntries = w.pageTables.Allocator.NewPTEs()
   126  						for index := uint16(0); index < entriesPerPage; index++ {
   127  							pteEntries[index].Set(
   128  								pmdEntry.Address()+(pteSize*uintptr(index)),
   129  								pmdEntry.Opts())
   130  						}
   131  						pmdEntry.setPageTable(w.pageTables, pteEntries)
   132  					} else {
   133  
   134  						if !w.visitor.visit(uintptr(start), pmdEntry, pmdSize-1) {
   135  							return false
   136  						}
   137  
   138  						if !pmdEntry.Valid() {
   139  							clearPMDEntries++
   140  						}
   141  
   142  						start = emptynext(start, pmdSize)
   143  						continue
   144  					}
   145  
   146  				} else {
   147  					pteEntries = w.pageTables.Allocator.LookupPTEs(pmdEntry.Address())
   148  				}
   149  
   150  				clearPTEEntries := uint16(0)
   151  
   152  				for pteIndex := uint16((start & pteMask) >> pteShift); start < end && pteIndex < entriesPerPage; pteIndex++ {
   153  					var (
   154  						pteEntry = &pteEntries[pteIndex]
   155  					)
   156  					if !pteEntry.Valid() && !w.visitor.requiresAlloc() {
   157  						clearPTEEntries++
   158  						start += pteSize
   159  						continue
   160  					}
   161  
   162  					if !w.visitor.visit(uintptr(start), pteEntry, pteSize-1) {
   163  						return false
   164  					}
   165  					if !pteEntry.Valid() {
   166  						if w.visitor.requiresAlloc() {
   167  							panic("PTE not set after iteration with requiresAlloc!")
   168  						}
   169  						clearPTEEntries++
   170  					}
   171  
   172  					start += pteSize
   173  					continue
   174  				}
   175  
   176  				if clearPTEEntries == entriesPerPage {
   177  					pmdEntry.Clear()
   178  					w.pageTables.Allocator.FreePTEs(pteEntries)
   179  					clearPMDEntries++
   180  				}
   181  			}
   182  
   183  			if clearPMDEntries == entriesPerPage {
   184  				pudEntry.Clear()
   185  				w.pageTables.Allocator.FreePTEs(pmdEntries)
   186  				clearPUDEntries++
   187  			}
   188  		}
   189  
   190  		if clearPUDEntries == entriesPerPage {
   191  			pgdEntry.Clear()
   192  			w.pageTables.Allocator.FreePTEs(pudEntries)
   193  		}
   194  	}
   195  	return true
   196  }
   197  
   198  // Walker walks page tables.
   199  type emptyWalker struct {
   200  	// pageTables are the tables to walk.
   201  	pageTables *PageTables
   202  
   203  	// Visitor is the set of arguments.
   204  	visitor emptyVisitor
   205  }
   206  
   207  // iterateRange iterates over all appropriate levels of page tables for the given range.
   208  //
   209  // If requiresAlloc is true, then Set _must_ be called on all given PTEs. The
   210  // exception is super pages. If a valid super page (huge or jumbo) cannot be
   211  // installed, then the walk will continue to individual entries.
   212  //
   213  // This algorithm will attempt to maximize the use of super/sect pages whenever
   214  // possible. Whether a super page is provided will be clear through the range
   215  // provided in the callback.
   216  //
   217  // Note that if requiresAlloc is true, then no gaps will be present. However,
   218  // if alloc is not set, then the iteration will likely be full of gaps.
   219  //
   220  // Note that this function should generally be avoided in favor of Map, Unmap,
   221  // etc. when not necessary.
   222  //
   223  // Precondition: start must be page-aligned.
   224  // Precondition: start must be less than end.
   225  // Precondition: If requiresAlloc is true, then start and end should not span
   226  // non-canonical ranges. If they do, a panic will result.
   227  //
   228  //go:nosplit
   229  func (w *emptyWalker) iterateRange(start, end uintptr) {
   230  	if start%pteSize != 0 {
   231  		panic("unaligned start")
   232  	}
   233  	if end < start {
   234  		panic("start > end")
   235  	}
   236  	if start < lowerTop {
   237  		if end <= lowerTop {
   238  			w.iterateRangeCanonical(start, end)
   239  		} else if end > lowerTop && end <= upperBottom {
   240  			if w.visitor.requiresAlloc() {
   241  				panic("alloc spans non-canonical range")
   242  			}
   243  			w.iterateRangeCanonical(start, lowerTop)
   244  		} else {
   245  			if w.visitor.requiresAlloc() {
   246  				panic("alloc spans non-canonical range")
   247  			}
   248  			if !w.iterateRangeCanonical(start, lowerTop) {
   249  				return
   250  			}
   251  			w.iterateRangeCanonical(upperBottom, end)
   252  		}
   253  	} else if start < upperBottom {
   254  		if end <= upperBottom {
   255  			if w.visitor.requiresAlloc() {
   256  				panic("alloc spans non-canonical range")
   257  			}
   258  		} else {
   259  			if w.visitor.requiresAlloc() {
   260  				panic("alloc spans non-canonical range")
   261  			}
   262  			w.iterateRangeCanonical(upperBottom, end)
   263  		}
   264  	} else {
   265  		w.iterateRangeCanonical(start, end)
   266  	}
   267  }
   268  
   269  // next returns the next address quantized by the given size.
   270  //
   271  //go:nosplit
   272  func emptynext(start uintptr, size uintptr) uintptr {
   273  	start &= ^(size - 1)
   274  	start += size
   275  	return start
   276  }