github.com/aloncn/graphics-go@v0.0.1/src/runtime/select.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 // This file contains the implementation of Go select statements. 8 9 import ( 10 "runtime/internal/sys" 11 "unsafe" 12 ) 13 14 const ( 15 debugSelect = false 16 17 // scase.kind 18 caseRecv = iota 19 caseSend 20 caseDefault 21 ) 22 23 // Select statement header. 24 // Known to compiler. 25 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype. 26 type hselect struct { 27 tcase uint16 // total count of scase[] 28 ncase uint16 // currently filled scase[] 29 pollorder *uint16 // case poll order 30 lockorder **hchan // channel lock order 31 scase [1]scase // one per case (in order of appearance) 32 } 33 34 // Select case descriptor. 35 // Known to compiler. 36 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype. 37 type scase struct { 38 elem unsafe.Pointer // data element 39 c *hchan // chan 40 pc uintptr // return pc 41 kind uint16 42 so uint16 // vararg of selected bool 43 receivedp *bool // pointer to received bool (recv2) 44 releasetime int64 45 } 46 47 var ( 48 chansendpc = funcPC(chansend) 49 chanrecvpc = funcPC(chanrecv) 50 ) 51 52 func selectsize(size uintptr) uintptr { 53 selsize := unsafe.Sizeof(hselect{}) + 54 (size-1)*unsafe.Sizeof(hselect{}.scase[0]) + 55 size*unsafe.Sizeof(*hselect{}.lockorder) + 56 size*unsafe.Sizeof(*hselect{}.pollorder) 57 return round(selsize, sys.Int64Align) 58 } 59 60 func newselect(sel *hselect, selsize int64, size int32) { 61 if selsize != int64(selectsize(uintptr(size))) { 62 print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n") 63 throw("bad select size") 64 } 65 sel.tcase = uint16(size) 66 sel.ncase = 0 67 sel.lockorder = (**hchan)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(hselect{}.scase[0]))) 68 sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*hselect{}.lockorder))) 69 70 if debugSelect { 71 print("newselect s=", sel, " size=", size, "\n") 72 } 73 } 74 75 //go:nosplit 76 func selectsend(sel *hselect, c *hchan, elem unsafe.Pointer) (selected bool) { 77 // nil cases do not compete 78 if c != nil { 79 selectsendImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 80 } 81 return 82 } 83 84 // cut in half to give stack a chance to split 85 func selectsendImpl(sel *hselect, c *hchan, pc uintptr, elem unsafe.Pointer, so uintptr) { 86 i := sel.ncase 87 if i >= sel.tcase { 88 throw("selectsend: too many cases") 89 } 90 sel.ncase = i + 1 91 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 92 93 cas.pc = pc 94 cas.c = c 95 cas.so = uint16(so) 96 cas.kind = caseSend 97 cas.elem = elem 98 99 if debugSelect { 100 print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, " so=", cas.so, "\n") 101 } 102 } 103 104 //go:nosplit 105 func selectrecv(sel *hselect, c *hchan, elem unsafe.Pointer) (selected bool) { 106 // nil cases do not compete 107 if c != nil { 108 selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, nil, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 109 } 110 return 111 } 112 113 //go:nosplit 114 func selectrecv2(sel *hselect, c *hchan, elem unsafe.Pointer, received *bool) (selected bool) { 115 // nil cases do not compete 116 if c != nil { 117 selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, received, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 118 } 119 return 120 } 121 122 func selectrecvImpl(sel *hselect, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, so uintptr) { 123 i := sel.ncase 124 if i >= sel.tcase { 125 throw("selectrecv: too many cases") 126 } 127 sel.ncase = i + 1 128 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 129 cas.pc = pc 130 cas.c = c 131 cas.so = uint16(so) 132 cas.kind = caseRecv 133 cas.elem = elem 134 cas.receivedp = received 135 136 if debugSelect { 137 print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, " so=", cas.so, "\n") 138 } 139 } 140 141 //go:nosplit 142 func selectdefault(sel *hselect) (selected bool) { 143 selectdefaultImpl(sel, getcallerpc(unsafe.Pointer(&sel)), uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 144 return 145 } 146 147 func selectdefaultImpl(sel *hselect, callerpc uintptr, so uintptr) { 148 i := sel.ncase 149 if i >= sel.tcase { 150 throw("selectdefault: too many cases") 151 } 152 sel.ncase = i + 1 153 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 154 cas.pc = callerpc 155 cas.c = nil 156 cas.so = uint16(so) 157 cas.kind = caseDefault 158 159 if debugSelect { 160 print("selectdefault s=", sel, " pc=", hex(cas.pc), " so=", cas.so, "\n") 161 } 162 } 163 164 func sellock(sel *hselect) { 165 lockslice := slice{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)} 166 lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) 167 var c *hchan 168 for _, c0 := range lockorder { 169 if c0 != nil && c0 != c { 170 c = c0 171 lock(&c.lock) 172 } 173 } 174 } 175 176 func selunlock(sel *hselect) { 177 // We must be very careful here to not touch sel after we have unlocked 178 // the last lock, because sel can be freed right after the last unlock. 179 // Consider the following situation. 180 // First M calls runtime·park() in runtime·selectgo() passing the sel. 181 // Once runtime·park() has unlocked the last lock, another M makes 182 // the G that calls select runnable again and schedules it for execution. 183 // When the G runs on another M, it locks all the locks and frees sel. 184 // Now if the first M touches sel, it will access freed memory. 185 n := int(sel.ncase) 186 r := 0 187 lockslice := slice{unsafe.Pointer(sel.lockorder), n, n} 188 lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) 189 // skip the default case 190 if n > 0 && lockorder[0] == nil { 191 r = 1 192 } 193 for i := n - 1; i >= r; i-- { 194 c := lockorder[i] 195 if i > 0 && c == lockorder[i-1] { 196 continue // will unlock it on the next iteration 197 } 198 unlock(&c.lock) 199 } 200 } 201 202 func selparkcommit(gp *g, sel unsafe.Pointer) bool { 203 selunlock((*hselect)(sel)) 204 return true 205 } 206 207 func block() { 208 gopark(nil, nil, "select (no cases)", traceEvGoStop, 1) // forever 209 } 210 211 // overwrites return pc on stack to signal which case of the select 212 // to run, so cannot appear at the top of a split stack. 213 //go:nosplit 214 func selectgo(sel *hselect) { 215 pc, offset := selectgoImpl(sel) 216 *(*bool)(add(unsafe.Pointer(&sel), uintptr(offset))) = true 217 setcallerpc(unsafe.Pointer(&sel), pc) 218 } 219 220 // selectgoImpl returns scase.pc and scase.so for the select 221 // case which fired. 222 func selectgoImpl(sel *hselect) (uintptr, uint16) { 223 if debugSelect { 224 print("select: sel=", sel, "\n") 225 } 226 227 scaseslice := slice{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)} 228 scases := *(*[]scase)(unsafe.Pointer(&scaseslice)) 229 230 var t0 int64 231 if blockprofilerate > 0 { 232 t0 = cputicks() 233 for i := 0; i < int(sel.ncase); i++ { 234 scases[i].releasetime = -1 235 } 236 } 237 238 // The compiler rewrites selects that statically have 239 // only 0 or 1 cases plus default into simpler constructs. 240 // The only way we can end up with such small sel.ncase 241 // values here is for a larger select in which most channels 242 // have been nilled out. The general code handles those 243 // cases correctly, and they are rare enough not to bother 244 // optimizing (and needing to test). 245 246 // generate permuted order 247 pollslice := slice{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)} 248 pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice)) 249 for i := 1; i < int(sel.ncase); i++ { 250 j := int(fastrand1()) % (i + 1) 251 pollorder[i] = pollorder[j] 252 pollorder[j] = uint16(i) 253 } 254 255 // sort the cases by Hchan address to get the locking order. 256 // simple heap sort, to guarantee n log n time and constant stack footprint. 257 lockslice := slice{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)} 258 lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) 259 for i := 0; i < int(sel.ncase); i++ { 260 j := i 261 c := scases[j].c 262 for j > 0 && lockorder[(j-1)/2].sortkey() < c.sortkey() { 263 k := (j - 1) / 2 264 lockorder[j] = lockorder[k] 265 j = k 266 } 267 lockorder[j] = c 268 } 269 for i := int(sel.ncase) - 1; i >= 0; i-- { 270 c := lockorder[i] 271 lockorder[i] = lockorder[0] 272 j := 0 273 for { 274 k := j*2 + 1 275 if k >= i { 276 break 277 } 278 if k+1 < i && lockorder[k].sortkey() < lockorder[k+1].sortkey() { 279 k++ 280 } 281 if c.sortkey() < lockorder[k].sortkey() { 282 lockorder[j] = lockorder[k] 283 j = k 284 continue 285 } 286 break 287 } 288 lockorder[j] = c 289 } 290 /* 291 for i := 0; i+1 < int(sel.ncase); i++ { 292 if lockorder[i].sortkey() > lockorder[i+1].sortkey() { 293 print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n") 294 throw("select: broken sort") 295 } 296 } 297 */ 298 299 // lock all the channels involved in the select 300 sellock(sel) 301 302 var ( 303 gp *g 304 done uint32 305 sg *sudog 306 c *hchan 307 k *scase 308 sglist *sudog 309 sgnext *sudog 310 qp unsafe.Pointer 311 ) 312 313 loop: 314 // pass 1 - look for something already waiting 315 var dfl *scase 316 var cas *scase 317 for i := 0; i < int(sel.ncase); i++ { 318 cas = &scases[pollorder[i]] 319 c = cas.c 320 321 switch cas.kind { 322 case caseRecv: 323 sg = c.sendq.dequeue() 324 if sg != nil { 325 goto recv 326 } 327 if c.qcount > 0 { 328 goto bufrecv 329 } 330 if c.closed != 0 { 331 goto rclose 332 } 333 334 case caseSend: 335 if raceenabled { 336 racereadpc(unsafe.Pointer(c), cas.pc, chansendpc) 337 } 338 if c.closed != 0 { 339 goto sclose 340 } 341 sg = c.recvq.dequeue() 342 if sg != nil { 343 goto send 344 } 345 if c.qcount < c.dataqsiz { 346 goto bufsend 347 } 348 349 case caseDefault: 350 dfl = cas 351 } 352 } 353 354 if dfl != nil { 355 selunlock(sel) 356 cas = dfl 357 goto retc 358 } 359 360 // pass 2 - enqueue on all chans 361 gp = getg() 362 done = 0 363 if gp.waiting != nil { 364 throw("gp.waiting != nil") 365 } 366 for i := 0; i < int(sel.ncase); i++ { 367 cas = &scases[pollorder[i]] 368 c = cas.c 369 sg := acquireSudog() 370 sg.g = gp 371 // Note: selectdone is adjusted for stack copies in stack1.go:adjustsudogs 372 sg.selectdone = (*uint32)(noescape(unsafe.Pointer(&done))) 373 sg.elem = cas.elem 374 sg.releasetime = 0 375 if t0 != 0 { 376 sg.releasetime = -1 377 } 378 sg.waitlink = gp.waiting 379 gp.waiting = sg 380 381 switch cas.kind { 382 case caseRecv: 383 c.recvq.enqueue(sg) 384 385 case caseSend: 386 c.sendq.enqueue(sg) 387 } 388 } 389 390 // wait for someone to wake us up 391 gp.param = nil 392 gopark(selparkcommit, unsafe.Pointer(sel), "select", traceEvGoBlockSelect, 2) 393 394 // someone woke us up 395 sellock(sel) 396 sg = (*sudog)(gp.param) 397 gp.param = nil 398 399 // pass 3 - dequeue from unsuccessful chans 400 // otherwise they stack up on quiet channels 401 // record the successful case, if any. 402 // We singly-linked up the SudoGs in case order, so when 403 // iterating through the linked list they are in reverse order. 404 cas = nil 405 sglist = gp.waiting 406 // Clear all elem before unlinking from gp.waiting. 407 for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { 408 sg1.selectdone = nil 409 sg1.elem = nil 410 } 411 gp.waiting = nil 412 for i := int(sel.ncase) - 1; i >= 0; i-- { 413 k = &scases[pollorder[i]] 414 if sglist.releasetime > 0 { 415 k.releasetime = sglist.releasetime 416 } 417 if sg == sglist { 418 // sg has already been dequeued by the G that woke us up. 419 cas = k 420 } else { 421 c = k.c 422 if k.kind == caseSend { 423 c.sendq.dequeueSudoG(sglist) 424 } else { 425 c.recvq.dequeueSudoG(sglist) 426 } 427 } 428 sgnext = sglist.waitlink 429 sglist.waitlink = nil 430 releaseSudog(sglist) 431 sglist = sgnext 432 } 433 434 if cas == nil { 435 // This can happen if we were woken up by a close(). 436 // TODO: figure that out explicitly so we don't need this loop. 437 goto loop 438 } 439 440 c = cas.c 441 442 if debugSelect { 443 print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n") 444 } 445 446 if cas.kind == caseRecv { 447 if cas.receivedp != nil { 448 *cas.receivedp = true 449 } 450 } 451 452 if raceenabled { 453 if cas.kind == caseRecv && cas.elem != nil { 454 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) 455 } else if cas.kind == caseSend { 456 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 457 } 458 } 459 if msanenabled { 460 if cas.kind == caseRecv && cas.elem != nil { 461 msanwrite(cas.elem, c.elemtype.size) 462 } else if cas.kind == caseSend { 463 msanread(cas.elem, c.elemtype.size) 464 } 465 } 466 467 selunlock(sel) 468 goto retc 469 470 bufrecv: 471 // can receive from buffer 472 if raceenabled { 473 if cas.elem != nil { 474 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) 475 } 476 raceacquire(chanbuf(c, c.recvx)) 477 racerelease(chanbuf(c, c.recvx)) 478 } 479 if msanenabled && cas.elem != nil { 480 msanwrite(cas.elem, c.elemtype.size) 481 } 482 if cas.receivedp != nil { 483 *cas.receivedp = true 484 } 485 qp = chanbuf(c, c.recvx) 486 if cas.elem != nil { 487 typedmemmove(c.elemtype, cas.elem, qp) 488 } 489 memclr(qp, uintptr(c.elemsize)) 490 c.recvx++ 491 if c.recvx == c.dataqsiz { 492 c.recvx = 0 493 } 494 c.qcount-- 495 selunlock(sel) 496 goto retc 497 498 bufsend: 499 // can send to buffer 500 if raceenabled { 501 raceacquire(chanbuf(c, c.sendx)) 502 racerelease(chanbuf(c, c.sendx)) 503 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 504 } 505 if msanenabled { 506 msanread(cas.elem, c.elemtype.size) 507 } 508 typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem) 509 c.sendx++ 510 if c.sendx == c.dataqsiz { 511 c.sendx = 0 512 } 513 c.qcount++ 514 selunlock(sel) 515 goto retc 516 517 recv: 518 // can receive from sleeping sender (sg) 519 recv(c, sg, cas.elem, func() { selunlock(sel) }) 520 if debugSelect { 521 print("syncrecv: sel=", sel, " c=", c, "\n") 522 } 523 if cas.receivedp != nil { 524 *cas.receivedp = true 525 } 526 goto retc 527 528 rclose: 529 // read at end of closed channel 530 selunlock(sel) 531 if cas.receivedp != nil { 532 *cas.receivedp = false 533 } 534 if cas.elem != nil { 535 memclr(cas.elem, uintptr(c.elemsize)) 536 } 537 if raceenabled { 538 raceacquire(unsafe.Pointer(c)) 539 } 540 goto retc 541 542 send: 543 // can send to a sleeping receiver (sg) 544 if raceenabled { 545 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 546 } 547 if msanenabled { 548 msanread(cas.elem, c.elemtype.size) 549 } 550 send(c, sg, cas.elem, func() { selunlock(sel) }) 551 if debugSelect { 552 print("syncsend: sel=", sel, " c=", c, "\n") 553 } 554 goto retc 555 556 retc: 557 if cas.releasetime > 0 { 558 blockevent(cas.releasetime-t0, 2) 559 } 560 return cas.pc, cas.so 561 562 sclose: 563 // send on closed channel 564 selunlock(sel) 565 panic("send on closed channel") 566 } 567 568 func (c *hchan) sortkey() uintptr { 569 // TODO(khr): if we have a moving garbage collector, we'll need to 570 // change this function. 571 return uintptr(unsafe.Pointer(c)) 572 } 573 574 // A runtimeSelect is a single case passed to rselect. 575 // This must match ../reflect/value.go:/runtimeSelect 576 type runtimeSelect struct { 577 dir selectDir 578 typ unsafe.Pointer // channel type (not used here) 579 ch *hchan // channel 580 val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir) 581 } 582 583 // These values must match ../reflect/value.go:/SelectDir. 584 type selectDir int 585 586 const ( 587 _ selectDir = iota 588 selectSend // case Chan <- Send 589 selectRecv // case <-Chan: 590 selectDefault // default 591 ) 592 593 //go:linkname reflect_rselect reflect.rselect 594 func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) { 595 // flagNoScan is safe here, because all objects are also referenced from cases. 596 size := selectsize(uintptr(len(cases))) 597 sel := (*hselect)(mallocgc(size, nil, flagNoScan)) 598 newselect(sel, int64(size), int32(len(cases))) 599 r := new(bool) 600 for i := range cases { 601 rc := &cases[i] 602 switch rc.dir { 603 case selectDefault: 604 selectdefaultImpl(sel, uintptr(i), 0) 605 case selectSend: 606 if rc.ch == nil { 607 break 608 } 609 selectsendImpl(sel, rc.ch, uintptr(i), rc.val, 0) 610 case selectRecv: 611 if rc.ch == nil { 612 break 613 } 614 selectrecvImpl(sel, rc.ch, uintptr(i), rc.val, r, 0) 615 } 616 } 617 618 pc, _ := selectgoImpl(sel) 619 chosen = int(pc) 620 recvOK = *r 621 return 622 } 623 624 func (q *waitq) dequeueSudoG(sgp *sudog) { 625 x := sgp.prev 626 y := sgp.next 627 if x != nil { 628 if y != nil { 629 // middle of queue 630 x.next = y 631 y.prev = x 632 sgp.next = nil 633 sgp.prev = nil 634 return 635 } 636 // end of queue 637 x.next = nil 638 q.last = x 639 sgp.prev = nil 640 return 641 } 642 if y != nil { 643 // start of queue 644 y.prev = nil 645 q.first = y 646 sgp.next = nil 647 return 648 } 649 650 // x==y==nil. Either sgp is the only element in the queue, 651 // or it has already been removed. Use q.first to disambiguate. 652 if q.first == sgp { 653 q.first = nil 654 q.last = nil 655 } 656 }