github.com/jspc/eggos@v0.5.1-0.20221028160421-556c75c878a5/kernel/thread.go (about) 1 package kernel 2 3 import ( 4 "unsafe" 5 6 "github.com/jspc/eggos/kernel/mm" 7 "github.com/jspc/eggos/kernel/sys" 8 ) 9 10 const ( 11 _NTHREDS = 20 12 13 _FLAGS_IF = 0x200 14 _FLAGS_IOPL_USER = 0x3000 15 16 _RPL_USER = 3 17 18 _THREAD_STACK_SIZE = 32 << 10 19 _THREAD_STACK_GUARD_OFFSET = 1 << 10 20 21 _CLONE_IDLE = 0x8000000000000000 22 ) 23 24 const ( 25 UNUSED = iota 26 INITING 27 SLEEPING 28 RUNNABLE 29 RUNNING 30 EXIT 31 ) 32 33 const ( 34 _TSS_ESP0 = 1 35 _TSS_SS0 = 2 36 ) 37 38 var ( 39 threads [_NTHREDS]Thread 40 scheduler *context 41 idleThread threadptr 42 ) 43 44 //go:notinheap 45 type context struct { 46 r15 uintptr 47 r14 uintptr 48 r13 uintptr 49 r12 uintptr 50 r11 uintptr 51 bx uintptr 52 bp uintptr 53 ip uintptr 54 } 55 56 // position of threadTLS and fpstate must be synced with trap.s and syscall.s 57 type Thread struct { 58 // store thread tls, the pointer to Thread 59 threadTLS [4]uintptr 60 61 // the state of fpu 62 fpstate uintptr 63 64 kstack uintptr 65 stack uintptr 66 tf *trapFrame 67 68 context *context 69 id int 70 state int 71 counter int64 72 73 // sysmon 会调用usleep,进而调用sleepon,如果sleepKey是个指针会触发gcWriteBarrier 74 // 而sysmon没有P,会导致空指针 75 sleepKey uintptr 76 // for sleep timeout 77 timerKey uintptr 78 79 // store goroutine tls 80 fsBase uintptr 81 82 // 用于保存需要转发的系统调用栈帧 83 systf trapFrame 84 } 85 86 //go:nosplit 87 func allocThread() *Thread { 88 var t *Thread 89 for i := 0; i < _NTHREDS; i++ { 90 tt := &threads[i] 91 if tt.state == UNUSED { 92 t = tt 93 t.id = i 94 break 95 } 96 } 97 if t == nil { 98 throw("no thread slot available") 99 } 100 101 t.state = INITING 102 t.kstack = allocThreadStack() 103 t.fpstate = mm.Alloc() 104 t.threadTLS[0] = uintptr(unsafe.Pointer(t)) 105 return t 106 } 107 108 //go:nosplit 109 func allocThreadStack() uintptr { 110 stack := mm.Mmap(0, _THREAD_STACK_SIZE) 111 stack += _THREAD_STACK_SIZE - _THREAD_STACK_GUARD_OFFSET 112 return stack 113 } 114 115 type threadptr uintptr 116 117 //go:nosplit 118 func (t threadptr) ptr() *Thread { 119 return (*Thread)(unsafe.Pointer(t)) 120 } 121 122 //go:nosplit 123 func setFS(addr uintptr) { 124 wrmsr(_MSR_FS_BASE, addr) 125 } 126 127 //go:nosplit 128 func setGS(addr uintptr) { 129 wrmsr(_MSR_GS_BASE, addr) 130 } 131 132 //go:nosplit 133 func Mythread() *Thread 134 135 //go:nosplit 136 func setMythread(t *Thread) { 137 switchThreadContext(t) 138 } 139 140 //go:nosplit 141 func switchThreadContext(t *Thread) { 142 // set go tls base address 143 if t.fsBase != 0 { 144 setFS(t.fsBase) 145 } 146 // set current thread base address 147 setGS(uintptr(unsafe.Pointer(&t.threadTLS))) 148 149 // use current thread esp0 in tss 150 setTssSP0(t.kstack) 151 } 152 153 //go:nosplit 154 func thread0Init() { 155 t := allocThread() 156 t.stack = allocThreadStack() 157 158 sp := t.kstack 159 160 // for trap frame 161 sp -= unsafe.Sizeof(trapFrame{}) 162 tf := (*trapFrame)(unsafe.Pointer(sp)) 163 164 // Because trapret restore fpstate 165 // we need a valid fpstate here 166 sys.Fxsave(t.fpstate) 167 tf.SS = _UDATA_IDX<<3 | _RPL_USER 168 tf.SP = t.stack 169 // enable interrupt and io port 170 // TODO: enable interrupt 171 tf.FLAGS = _FLAGS_IF | _FLAGS_IOPL_USER 172 // tf.FLAGS = _FLAGS_IF 173 tf.CS = _UCODE_IDX<<3 | _RPL_USER 174 tf.IP = sys.FuncPC(thread0) 175 t.tf = tf 176 177 // for context 178 sp -= unsafe.Sizeof(*t.context) 179 ctx := (*context)(unsafe.Pointer(sp)) 180 ctx.ip = sys.FuncPC(trapret) 181 t.context = ctx 182 183 t.state = RUNNABLE 184 } 185 186 //go:nosplit 187 func ksysClone(pc, stack, flags uintptr) uintptr 188 189 //go:nosplit 190 func ksysYield() 191 192 // thread0 is the first thread 193 //go:nosplit 194 func thread0() { 195 // jump to go rt0 196 go_entry() 197 panic("main return") 198 } 199 200 // run when after main init 201 func idleInit() { 202 // thread0 clone idle thread 203 stack := mm.SysMmap(0, _THREAD_STACK_SIZE) + 204 _THREAD_STACK_SIZE - _THREAD_STACK_GUARD_OFFSET 205 206 tid := ksysClone(sys.FuncPC(idle), stack, _CLONE_IDLE) 207 idleThread = (threadptr)(unsafe.Pointer(&threads[tid])) 208 } 209 210 //go:nosplit 211 func idle() { 212 for { 213 if sys.CS() != 8 { 214 throw("bad cs in idle thread") 215 } 216 sys.Hlt() 217 ksysYield() 218 } 219 } 220 221 //go:nosplit 222 func clone(pc, usp, flags, tls uintptr) int { 223 my := Mythread() 224 chld := allocThread() 225 226 sp := chld.kstack 227 // for trap frame 228 sp -= unsafe.Sizeof(trapFrame{}) 229 tf := (*trapFrame)(unsafe.Pointer(sp)) 230 *tf = *my.tf 231 232 // copy fpstate 233 fpsrc := (*[512]byte)(unsafe.Pointer(my.fpstate)) 234 fpdst := (*[512]byte)(unsafe.Pointer(chld.fpstate)) 235 *fpdst = *fpsrc 236 237 tf.SP = usp 238 tf.IP = pc 239 tf.AX = 0 240 // idle thread running on ring0 which rely on HLT ins 241 if flags&_CLONE_IDLE != 0 { 242 tf.CS = _KCODE_IDX << 3 243 tf.SS = _KDATA_IDX << 3 244 } 245 246 // for context 247 sp -= unsafe.Sizeof(context{}) 248 ctx := (*context)(unsafe.Pointer(sp)) 249 ctx.ip = sys.FuncPC(trapret) 250 251 chld.context = ctx 252 // *(*uintptr)(unsafe.Pointer(&chld.context)) = sp 253 chld.tf = tf 254 chld.stack = usp 255 chld.fsBase = tls 256 chld.state = RUNNABLE 257 return chld.id 258 } 259 260 //go:nosplit 261 func exit() { 262 t := Mythread() 263 t.state = EXIT 264 Yield() 265 // TODO: handle thread exit in scheduler 266 } 267 268 //go:nosplit 269 func threadInit() { 270 thread0Init() 271 } 272 273 //go:nosplit 274 func swtch(old **context, _new *context) 275 276 //go:nosplit 277 func schedule() { 278 var t *Thread 279 var idx int 280 for { 281 t = pickup(&idx) 282 switchto(t) 283 } 284 } 285 286 // pickup selects the next runnable thread 287 //go:nosplit 288 func pickup(pidx *int) *Thread { 289 curr := *pidx 290 if traptask != 0 && traptask.ptr().state == RUNNABLE { 291 return traptask.ptr() 292 } 293 if syscalltask != 0 && syscalltask.ptr().state == RUNNABLE { 294 return syscalltask.ptr() 295 } 296 297 var t *Thread 298 for i := 0; i < _NTHREDS; i++ { 299 idx := (curr + i + 1) % _NTHREDS 300 *pidx = idx 301 tt := &threads[idx] 302 if tt.state == RUNNABLE && tt != idleThread.ptr() { 303 t = tt 304 break 305 } 306 } 307 if t == nil { 308 t = idleThread.ptr() 309 } 310 if t == nil { 311 throw("no runnable thread") 312 } 313 return t 314 } 315 316 // switchto switch thread context from scheduler to t 317 //go:nosplit 318 func switchto(t *Thread) { 319 begin := nanosecond() 320 // assert interrupt is enableds 321 // TODO: enable check 322 if t.tf != nil && t.tf.FLAGS&0x200 == 0 { 323 throw("bad eflags") 324 } 325 setMythread(t) 326 t.state = RUNNING 327 328 if t == idleThread.ptr() && t.tf.CS != 8 { 329 throw("bad idle cs") 330 331 } 332 swtch(&scheduler, t.context) 333 used := nanosecond() - begin 334 t.counter += used 335 } 336 337 func ThreadStat(stat *[_NTHREDS]int64) { 338 for i := 0; i < _NTHREDS; i++ { 339 stat[i] = threads[i].counter 340 } 341 } 342 343 //go:nosplit 344 func Sched() { 345 my := Mythread() 346 swtch(&my.context, scheduler) 347 } 348 349 //go:nosplit 350 func Yield() { 351 my := Mythread() 352 my.state = RUNNABLE 353 swtch(&my.context, scheduler) 354 }