github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/pkg/ring0/pagetables/walker_map_amd64.go (about) 1 //go:build amd64 2 // +build amd64 3 4 package pagetables 5 6 // iterateRangeCanonical walks a canonical range. 7 // 8 //go:nosplit 9 func (w *mapWalker) iterateRangeCanonical(start, end uintptr) bool { 10 for pgdIndex := uint16((start & pgdMask) >> pgdShift); start < end && pgdIndex < entriesPerPage; pgdIndex++ { 11 var ( 12 pgdEntry = &w.pageTables.root[pgdIndex] 13 pudEntries *PTEs 14 ) 15 if !pgdEntry.Valid() { 16 if !w.visitor.requiresAlloc() { 17 18 start = mapnext(start, pgdSize) 19 continue 20 } 21 22 pudEntries = w.pageTables.Allocator.NewPTEs() 23 pgdEntry.setPageTable(w.pageTables, pudEntries) 24 } else { 25 pudEntries = w.pageTables.Allocator.LookupPTEs(pgdEntry.Address()) 26 } 27 28 clearPUDEntries := uint16(0) 29 30 for pudIndex := uint16((start & pudMask) >> pudShift); start < end && pudIndex < entriesPerPage; pudIndex++ { 31 var ( 32 pudEntry = &pudEntries[pudIndex] 33 pmdEntries *PTEs 34 ) 35 if !pudEntry.Valid() { 36 if !w.visitor.requiresAlloc() { 37 38 clearPUDEntries++ 39 start = mapnext(start, pudSize) 40 continue 41 } 42 43 if start&(pudSize-1) == 0 && end-start >= pudSize { 44 pudEntry.SetSuper() 45 if !w.visitor.visit(uintptr(start&^(pudSize-1)), pudEntry, pudSize-1) { 46 return false 47 } 48 if pudEntry.Valid() { 49 start = mapnext(start, pudSize) 50 continue 51 } 52 } 53 54 pmdEntries = w.pageTables.Allocator.NewPTEs() 55 pudEntry.setPageTable(w.pageTables, pmdEntries) 56 57 } else if pudEntry.IsSuper() { 58 59 if w.visitor.requiresSplit() && (start&(pudSize-1) != 0 || end < mapnext(start, pudSize)) { 60 61 pmdEntries = w.pageTables.Allocator.NewPTEs() 62 for index := uint16(0); index < entriesPerPage; index++ { 63 pmdEntries[index].SetSuper() 64 pmdEntries[index].Set( 65 pudEntry.Address()+(pmdSize*uintptr(index)), 66 pudEntry.Opts()) 67 } 68 pudEntry.setPageTable(w.pageTables, pmdEntries) 69 } else { 70 71 if !w.visitor.visit(uintptr(start&^(pudSize-1)), pudEntry, pudSize-1) { 72 return false 73 } 74 75 if !pudEntry.Valid() { 76 clearPUDEntries++ 77 } 78 79 start = mapnext(start, pudSize) 80 continue 81 } 82 } else { 83 pmdEntries = w.pageTables.Allocator.LookupPTEs(pudEntry.Address()) 84 } 85 86 clearPMDEntries := uint16(0) 87 88 for pmdIndex := uint16((start & pmdMask) >> pmdShift); start < end && pmdIndex < entriesPerPage; pmdIndex++ { 89 var ( 90 pmdEntry = &pmdEntries[pmdIndex] 91 pteEntries *PTEs 92 ) 93 if !pmdEntry.Valid() { 94 if !w.visitor.requiresAlloc() { 95 96 clearPMDEntries++ 97 start = mapnext(start, pmdSize) 98 continue 99 } 100 101 if start&(pmdSize-1) == 0 && end-start >= pmdSize { 102 pmdEntry.SetSuper() 103 if !w.visitor.visit(uintptr(start&^(pmdSize-1)), pmdEntry, pmdSize-1) { 104 return false 105 } 106 if pmdEntry.Valid() { 107 start = mapnext(start, pmdSize) 108 continue 109 } 110 } 111 112 pteEntries = w.pageTables.Allocator.NewPTEs() 113 pmdEntry.setPageTable(w.pageTables, pteEntries) 114 115 } else if pmdEntry.IsSuper() { 116 117 if w.visitor.requiresSplit() && (start&(pmdSize-1) != 0 || end < mapnext(start, pmdSize)) { 118 119 pteEntries = w.pageTables.Allocator.NewPTEs() 120 for index := uint16(0); index < entriesPerPage; index++ { 121 pteEntries[index].Set( 122 pmdEntry.Address()+(pteSize*uintptr(index)), 123 pmdEntry.Opts()) 124 } 125 pmdEntry.setPageTable(w.pageTables, pteEntries) 126 } else { 127 128 if !w.visitor.visit(uintptr(start&^(pmdSize-1)), pmdEntry, pmdSize-1) { 129 return false 130 } 131 132 if !pmdEntry.Valid() { 133 clearPMDEntries++ 134 } 135 136 start = mapnext(start, pmdSize) 137 continue 138 } 139 } else { 140 pteEntries = w.pageTables.Allocator.LookupPTEs(pmdEntry.Address()) 141 } 142 143 clearPTEEntries := uint16(0) 144 145 for pteIndex := uint16((start & pteMask) >> pteShift); start < end && pteIndex < entriesPerPage; pteIndex++ { 146 var ( 147 pteEntry = &pteEntries[pteIndex] 148 ) 149 if !pteEntry.Valid() && !w.visitor.requiresAlloc() { 150 clearPTEEntries++ 151 start += pteSize 152 continue 153 } 154 155 if !w.visitor.visit(uintptr(start&^(pteSize-1)), pteEntry, pteSize-1) { 156 return false 157 } 158 if !pteEntry.Valid() && !w.visitor.requiresAlloc() { 159 clearPTEEntries++ 160 } 161 162 start += pteSize 163 continue 164 } 165 166 if clearPTEEntries == entriesPerPage { 167 pmdEntry.Clear() 168 w.pageTables.Allocator.FreePTEs(pteEntries) 169 clearPMDEntries++ 170 } 171 } 172 173 if clearPMDEntries == entriesPerPage { 174 pudEntry.Clear() 175 w.pageTables.Allocator.FreePTEs(pmdEntries) 176 clearPUDEntries++ 177 } 178 } 179 180 if clearPUDEntries == entriesPerPage { 181 pgdEntry.Clear() 182 w.pageTables.Allocator.FreePTEs(pudEntries) 183 } 184 } 185 return true 186 } 187 188 // Walker walks page tables. 189 type mapWalker struct { 190 // pageTables are the tables to walk. 191 pageTables *PageTables 192 193 // Visitor is the set of arguments. 194 visitor mapVisitor 195 } 196 197 // iterateRange iterates over all appropriate levels of page tables for the given range. 198 // 199 // If requiresAlloc is true, then Set _must_ be called on all given PTEs. The 200 // exception is super pages. If a valid super page (huge or jumbo) cannot be 201 // installed, then the walk will continue to individual entries. 202 // 203 // This algorithm will attempt to maximize the use of super/sect pages whenever 204 // possible. Whether a super page is provided will be clear through the range 205 // provided in the callback. 206 // 207 // Note that if requiresAlloc is true, then no gaps will be present. However, 208 // if alloc is not set, then the iteration will likely be full of gaps. 209 // 210 // Note that this function should generally be avoided in favor of Map, Unmap, 211 // etc. when not necessary. 212 // 213 // Precondition: start must be page-aligned. 214 // Precondition: start must be less than end. 215 // Precondition: If requiresAlloc is true, then start and end should not span 216 // non-canonical ranges. If they do, a panic will result. 217 // 218 //go:nosplit 219 func (w *mapWalker) iterateRange(start, end uintptr) { 220 if start%pteSize != 0 { 221 panic("unaligned start") 222 } 223 if end < start { 224 panic("start > end") 225 } 226 if start < lowerTop { 227 if end <= lowerTop { 228 w.iterateRangeCanonical(start, end) 229 } else if end > lowerTop && end <= upperBottom { 230 if w.visitor.requiresAlloc() { 231 panic("alloc spans non-canonical range") 232 } 233 w.iterateRangeCanonical(start, lowerTop) 234 } else { 235 if w.visitor.requiresAlloc() { 236 panic("alloc spans non-canonical range") 237 } 238 if !w.iterateRangeCanonical(start, lowerTop) { 239 return 240 } 241 w.iterateRangeCanonical(upperBottom, end) 242 } 243 } else if start < upperBottom { 244 if end <= upperBottom { 245 if w.visitor.requiresAlloc() { 246 panic("alloc spans non-canonical range") 247 } 248 } else { 249 if w.visitor.requiresAlloc() { 250 panic("alloc spans non-canonical range") 251 } 252 w.iterateRangeCanonical(upperBottom, end) 253 } 254 } else { 255 w.iterateRangeCanonical(start, end) 256 } 257 } 258 259 // next returns the next address quantized by the given size. 260 // 261 //go:nosplit 262 func mapnext(start uintptr, size uintptr) uintptr { 263 start &= ^(size - 1) 264 start += size 265 return start 266 }