github.com/icexin/eggos@v0.4.2-0.20220216025428-78b167e4f349/kernel/mm/mm.go (about)

     1  package mm
     2  
     3  import (
     4  	"unsafe"
     5  
     6  	"github.com/icexin/eggos/drivers/multiboot"
     7  	"github.com/icexin/eggos/kernel/sys"
     8  )
     9  
    10  const (
    11  	PGSIZE = 4 << 10
    12  	// 1-100 Mb memory reverse for kernel image
    13  	MEMSTART = 100 << 20
    14  	// 默认可以使用的物理内存终止地址,如果能从grub那里获取就用grub的
    15  	DEFAULT_MEMTOP = 256 << 20
    16  	// 虚拟内存起始地址
    17  	VMSTART = 1 << 30
    18  
    19  	PTE_P = 0x001
    20  	PTE_W = 0x002
    21  	PTE_U = 0x004
    22  
    23  	_ENTRY_NUMBER = PGSIZE / sys.PtrSize
    24  )
    25  
    26  var (
    27  	memtop uintptr
    28  
    29  	kmm = kmmt{voffset: VMSTART}
    30  	vmm vmmt
    31  )
    32  
    33  //go:nosplit
    34  func pageEnable()
    35  
    36  //go:nosplit
    37  func lcr3(topPage *entryPage)
    38  
    39  //go:linkname throw github.com/icexin/eggos/kernel.throw
    40  func throw(msg string)
    41  
    42  //go:nosplit
    43  func pageRoundUp(size uintptr) uintptr {
    44  	return (size + PGSIZE - 1) &^ (PGSIZE - 1)
    45  }
    46  
    47  //go:nosplit
    48  func pageRoundDown(v uintptr) uintptr {
    49  	return v &^ (PGSIZE - 1)
    50  }
    51  
    52  //go:nosplit
    53  func pageEntryIdx(v uintptr, lvl int) uintptr {
    54  	return (v >> (12 + (lvl-1)*9)) & (_ENTRY_NUMBER - 1)
    55  }
    56  
    57  //go:notinheap
    58  type page struct {
    59  	next *page
    60  }
    61  
    62  type kmmstat struct {
    63  	alloc int
    64  }
    65  
    66  type kmmt struct {
    67  	freelist *page
    68  	voffset  uintptr
    69  	stat     kmmstat
    70  }
    71  
    72  //go:nosplit
    73  func (k *kmmt) sbrk(n uintptr) uintptr {
    74  	p := k.voffset
    75  	k.voffset = pageRoundUp(k.voffset + n)
    76  	if k.voffset < p {
    77  		throw("virtual memory address all used")
    78  	}
    79  	return p
    80  }
    81  
    82  //go:nosplit
    83  func (k *kmmt) alloc() uintptr {
    84  	r := k.freelist
    85  	if r == nil {
    86  		throw("kmemt.alloc")
    87  	}
    88  	k.stat.alloc++
    89  	k.freelist = r.next
    90  	return uintptr(unsafe.Pointer(r))
    91  }
    92  
    93  //go:nosplit
    94  func (k *kmmt) freeRange(start, end uintptr) {
    95  	p := pageRoundUp(start)
    96  	for ; p+PGSIZE <= end; p += PGSIZE {
    97  		k.free(p)
    98  	}
    99  }
   100  
   101  //go:nosplit
   102  func (k *kmmt) free(p uintptr) {
   103  	if p%PGSIZE != 0 || p >= memtop {
   104  		throw("kmemt.free")
   105  	}
   106  	r := (*page)(unsafe.Pointer(p))
   107  	r.next = k.freelist
   108  	k.freelist = r
   109  }
   110  
   111  //go:notinheap
   112  type entryPage [_ENTRY_NUMBER]entry
   113  
   114  type entry uintptr
   115  
   116  //go:nosplit
   117  func (p entry) present() bool {
   118  	return p&PTE_P != 0
   119  }
   120  
   121  //go:nosplit
   122  func (p entry) addr() uintptr {
   123  	return uintptr(p) &^ 0xfff
   124  }
   125  
   126  //go:nosplit
   127  func (p entry) entryPage() *entryPage {
   128  	return (*entryPage)(unsafe.Pointer(p.addr()))
   129  }
   130  
   131  type vmmt struct {
   132  	topPage *entryPage
   133  }
   134  
   135  //go:nosplit
   136  func (v *vmmt) munmap(va, size uintptr) bool {
   137  	// println("mumap va=", va, " size=", size)
   138  	p := pageRoundDown(va)
   139  	last := pageRoundDown(va + size - 1)
   140  	for ; p != last; p += PGSIZE {
   141  		pte := v.walkpgdir(p, false)
   142  		if pte == nil {
   143  			return false
   144  		}
   145  		if !pte.present() {
   146  			return false
   147  		}
   148  		kmm.free(pte.addr())
   149  		*pte = 0
   150  	}
   151  	return true
   152  }
   153  
   154  //go:nosplit
   155  func (v *vmmt) mmap(va, size, perm uintptr) bool {
   156  	// println("mmap va=", unsafe.Pointer(va), " size=", size>>10)
   157  	var pa uintptr
   158  	p := pageRoundDown(va)
   159  	last := pageRoundDown(va + size - 1)
   160  	for {
   161  		pte := v.walkpgdir(p, true)
   162  		if pte == nil {
   163  			return false
   164  		}
   165  		if pte.present() {
   166  			throw("mmap remap")
   167  		} else {
   168  			pa = kmm.alloc()
   169  			sys.Memclr(pa, PGSIZE)
   170  			*pte = entry(pa | perm)
   171  		}
   172  		if p == last {
   173  			break
   174  		}
   175  		p += PGSIZE
   176  	}
   177  	return true
   178  }
   179  
   180  //go:nosplit
   181  func Sbrk(n uintptr) uintptr {
   182  	return kmm.sbrk(n)
   183  }
   184  
   185  //go:nosplit
   186  func Mmap(va, size uintptr) uintptr {
   187  	if va == 0 {
   188  		va = kmm.sbrk(size)
   189  	}
   190  	vmm.mmap(va, size, PTE_P|PTE_W|PTE_U)
   191  	// flush page table cache
   192  	lcr3(vmm.topPage)
   193  	return va
   194  }
   195  
   196  //go:nosplit
   197  func Munmap(va, size uintptr) bool {
   198  	ok := vmm.munmap(va, size)
   199  	lcr3(vmm.topPage)
   200  	return ok
   201  }
   202  
   203  //go:nosplit
   204  func Fixmap(va, pa, size uintptr) {
   205  	vmm.fixmap(va, pa, size, PTE_P|PTE_W|PTE_U)
   206  	// flush page table cache
   207  	lcr3(vmm.topPage)
   208  }
   209  
   210  //go:nosplit
   211  func Alloc() uintptr {
   212  	ptr := kmm.alloc()
   213  	buf := sys.UnsafeBuffer(ptr, PGSIZE)
   214  	for i := range buf {
   215  		buf[i] = 0
   216  	}
   217  	return ptr
   218  }
   219  
   220  //go:nosplit
   221  func (v *vmmt) fixmap(va, pa, size, perm uintptr) bool {
   222  	p := pageRoundDown(va)
   223  	last := pageRoundDown(va + size - 1)
   224  	for {
   225  		pte := v.walkpgdir(p, true)
   226  		if pte == nil {
   227  			return false
   228  		}
   229  		if pte.present() {
   230  			throw("fixmap remap")
   231  		}
   232  		*pte = entry(pa | perm)
   233  		if p == last {
   234  			break
   235  		}
   236  		p += PGSIZE
   237  		pa += PGSIZE
   238  	}
   239  	return true
   240  }
   241  
   242  //go:nosplit
   243  func (v *vmmt) walkpglvl(pg *entryPage, va uintptr, lvl int, alloc bool) *entry {
   244  	idx := pageEntryIdx(va, lvl)
   245  	if int(idx) >= len(pg) {
   246  		throw("bad page index")
   247  	}
   248  	pe := &pg[idx]
   249  	if lvl == 1 {
   250  		return pe
   251  	}
   252  
   253  	// find entry
   254  	if pe.present() {
   255  		return pe
   256  	}
   257  	// not found and no alloc
   258  	if !alloc {
   259  		return nil
   260  	}
   261  	// alloc a page to map entry page
   262  	addr := kmm.alloc()
   263  	if addr == 0 {
   264  		return nil
   265  	}
   266  	sys.Memclr(addr, PGSIZE)
   267  	// map new page to entry
   268  	*pe = entry(addr | PTE_P | PTE_W | PTE_U)
   269  	// log.PrintHex(uintptr(unsafe.Pointer(pg)))
   270  	// log.PrintStr("@")
   271  	// log.PrintHex(idx)
   272  	// log.PrintStr("=")
   273  	// log.PrintHex(addr)
   274  	// log.PrintStr("\n")
   275  	return pe
   276  }
   277  
   278  //go:nosplit
   279  func (v *vmmt) walkpgdir(va uintptr, alloc bool) *entry {
   280  	epg := v.topPage
   281  	var pe *entry
   282  	for i := 4; i >= 1; i-- {
   283  		pe = v.walkpglvl(epg, va, i, alloc)
   284  		if pe == nil {
   285  			return nil
   286  		}
   287  		epg = pe.entryPage()
   288  	}
   289  	return pe
   290  }
   291  
   292  //go:nosplit
   293  func findMemTop() uintptr {
   294  	if !multiboot.Enabled() {
   295  		return DEFAULT_MEMTOP
   296  	}
   297  	var top uintptr
   298  	for _, e := range multiboot.BootInfo.MmapEntries() {
   299  		if e.Type != multiboot.MemoryAvailable {
   300  			continue
   301  		}
   302  		ptop := e.Addr + e.Len
   303  		if ptop > VMSTART {
   304  			ptop = VMSTART
   305  		}
   306  		if top < uintptr(ptop) {
   307  			top = uintptr(ptop)
   308  		}
   309  	}
   310  	if top == 0 {
   311  		return DEFAULT_MEMTOP
   312  	}
   313  	return top
   314  }
   315  
   316  //go:nosplit
   317  func Init() {
   318  	memtop = findMemTop()
   319  	kmm.voffset = VMSTART
   320  	kmm.freeRange(MEMSTART, memtop)
   321  
   322  	vmm.topPage = (*entryPage)(unsafe.Pointer(kmm.alloc()))
   323  	sys.Memclr(uintptr(unsafe.Pointer(vmm.topPage)), PGSIZE)
   324  	// 4096-MEMTOP 用来让微内核访问到所有的地址空间
   325  	// identity map all phy memory
   326  	vmm.fixmap(4096, 4096, memtop-4096, PTE_P|PTE_W|PTE_U)
   327  
   328  	lcr3(vmm.topPage)
   329  	pageEnable()
   330  }