rsc.io/go@v0.0.0-20150416155037-e040fd465409/src/runtime/select.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 // This file contains the implementation of Go select statements. 8 9 import "unsafe" 10 11 const ( 12 debugSelect = false 13 14 // scase.kind 15 caseRecv = iota 16 caseSend 17 caseDefault 18 ) 19 20 // Select statement header. 21 // Known to compiler. 22 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype. 23 type hselect struct { 24 tcase uint16 // total count of scase[] 25 ncase uint16 // currently filled scase[] 26 pollorder *uint16 // case poll order 27 lockorder **hchan // channel lock order 28 scase [1]scase // one per case (in order of appearance) 29 } 30 31 // Select case descriptor. 32 // Known to compiler. 33 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype. 34 type scase struct { 35 elem unsafe.Pointer // data element 36 c *hchan // chan 37 pc uintptr // return pc 38 kind uint16 39 so uint16 // vararg of selected bool 40 receivedp *bool // pointer to received bool (recv2) 41 releasetime int64 42 } 43 44 var ( 45 chansendpc = funcPC(chansend) 46 chanrecvpc = funcPC(chanrecv) 47 ) 48 49 func selectsize(size uintptr) uintptr { 50 selsize := unsafe.Sizeof(hselect{}) + 51 (size-1)*unsafe.Sizeof(hselect{}.scase[0]) + 52 size*unsafe.Sizeof(*hselect{}.lockorder) + 53 size*unsafe.Sizeof(*hselect{}.pollorder) 54 return round(selsize, _Int64Align) 55 } 56 57 func newselect(sel *hselect, selsize int64, size int32) { 58 if selsize != int64(selectsize(uintptr(size))) { 59 print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n") 60 throw("bad select size") 61 } 62 sel.tcase = uint16(size) 63 sel.ncase = 0 64 sel.lockorder = (**hchan)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(hselect{}.scase[0]))) 65 sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*hselect{}.lockorder))) 66 67 if debugSelect { 68 print("newselect s=", sel, " size=", size, "\n") 69 } 70 } 71 72 //go:nosplit 73 func selectsend(sel *hselect, c *hchan, elem unsafe.Pointer) (selected bool) { 74 // nil cases do not compete 75 if c != nil { 76 selectsendImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 77 } 78 return 79 } 80 81 // cut in half to give stack a chance to split 82 func selectsendImpl(sel *hselect, c *hchan, pc uintptr, elem unsafe.Pointer, so uintptr) { 83 i := sel.ncase 84 if i >= sel.tcase { 85 throw("selectsend: too many cases") 86 } 87 sel.ncase = i + 1 88 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 89 90 cas.pc = pc 91 cas.c = c 92 cas.so = uint16(so) 93 cas.kind = caseSend 94 cas.elem = elem 95 96 if debugSelect { 97 print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, " so=", cas.so, "\n") 98 } 99 } 100 101 //go:nosplit 102 func selectrecv(sel *hselect, c *hchan, elem unsafe.Pointer) (selected bool) { 103 // nil cases do not compete 104 if c != nil { 105 selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, nil, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 106 } 107 return 108 } 109 110 //go:nosplit 111 func selectrecv2(sel *hselect, c *hchan, elem unsafe.Pointer, received *bool) (selected bool) { 112 // nil cases do not compete 113 if c != nil { 114 selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, received, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 115 } 116 return 117 } 118 119 func selectrecvImpl(sel *hselect, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, so uintptr) { 120 i := sel.ncase 121 if i >= sel.tcase { 122 throw("selectrecv: too many cases") 123 } 124 sel.ncase = i + 1 125 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 126 cas.pc = pc 127 cas.c = c 128 cas.so = uint16(so) 129 cas.kind = caseRecv 130 cas.elem = elem 131 cas.receivedp = received 132 133 if debugSelect { 134 print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, " so=", cas.so, "\n") 135 } 136 } 137 138 //go:nosplit 139 func selectdefault(sel *hselect) (selected bool) { 140 selectdefaultImpl(sel, getcallerpc(unsafe.Pointer(&sel)), uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 141 return 142 } 143 144 func selectdefaultImpl(sel *hselect, callerpc uintptr, so uintptr) { 145 i := sel.ncase 146 if i >= sel.tcase { 147 throw("selectdefault: too many cases") 148 } 149 sel.ncase = i + 1 150 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 151 cas.pc = callerpc 152 cas.c = nil 153 cas.so = uint16(so) 154 cas.kind = caseDefault 155 156 if debugSelect { 157 print("selectdefault s=", sel, " pc=", hex(cas.pc), " so=", cas.so, "\n") 158 } 159 } 160 161 func sellock(sel *hselect) { 162 lockslice := slice{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)} 163 lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) 164 var c *hchan 165 for _, c0 := range lockorder { 166 if c0 != nil && c0 != c { 167 c = c0 168 lock(&c.lock) 169 } 170 } 171 } 172 173 func selunlock(sel *hselect) { 174 // We must be very careful here to not touch sel after we have unlocked 175 // the last lock, because sel can be freed right after the last unlock. 176 // Consider the following situation. 177 // First M calls runtime·park() in runtime·selectgo() passing the sel. 178 // Once runtime·park() has unlocked the last lock, another M makes 179 // the G that calls select runnable again and schedules it for execution. 180 // When the G runs on another M, it locks all the locks and frees sel. 181 // Now if the first M touches sel, it will access freed memory. 182 n := int(sel.ncase) 183 r := 0 184 lockslice := slice{unsafe.Pointer(sel.lockorder), n, n} 185 lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) 186 // skip the default case 187 if n > 0 && lockorder[0] == nil { 188 r = 1 189 } 190 for i := n - 1; i >= r; i-- { 191 c := lockorder[i] 192 if i > 0 && c == lockorder[i-1] { 193 continue // will unlock it on the next iteration 194 } 195 unlock(&c.lock) 196 } 197 } 198 199 func selparkcommit(gp *g, sel unsafe.Pointer) bool { 200 selunlock((*hselect)(sel)) 201 return true 202 } 203 204 func block() { 205 gopark(nil, nil, "select (no cases)", traceEvGoStop, 1) // forever 206 } 207 208 // overwrites return pc on stack to signal which case of the select 209 // to run, so cannot appear at the top of a split stack. 210 //go:nosplit 211 func selectgo(sel *hselect) { 212 pc, offset := selectgoImpl(sel) 213 *(*bool)(add(unsafe.Pointer(&sel), uintptr(offset))) = true 214 setcallerpc(unsafe.Pointer(&sel), pc) 215 } 216 217 // selectgoImpl returns scase.pc and scase.so for the select 218 // case which fired. 219 func selectgoImpl(sel *hselect) (uintptr, uint16) { 220 if debugSelect { 221 print("select: sel=", sel, "\n") 222 } 223 224 scaseslice := slice{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)} 225 scases := *(*[]scase)(unsafe.Pointer(&scaseslice)) 226 227 var t0 int64 228 if blockprofilerate > 0 { 229 t0 = cputicks() 230 for i := 0; i < int(sel.ncase); i++ { 231 scases[i].releasetime = -1 232 } 233 } 234 235 // The compiler rewrites selects that statically have 236 // only 0 or 1 cases plus default into simpler constructs. 237 // The only way we can end up with such small sel.ncase 238 // values here is for a larger select in which most channels 239 // have been nilled out. The general code handles those 240 // cases correctly, and they are rare enough not to bother 241 // optimizing (and needing to test). 242 243 // generate permuted order 244 pollslice := slice{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)} 245 pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice)) 246 for i := 0; i < int(sel.ncase); i++ { 247 pollorder[i] = uint16(i) 248 } 249 for i := 1; i < int(sel.ncase); i++ { 250 o := pollorder[i] 251 j := int(fastrand1()) % (i + 1) 252 pollorder[i] = pollorder[j] 253 pollorder[j] = o 254 } 255 256 // sort the cases by Hchan address to get the locking order. 257 // simple heap sort, to guarantee n log n time and constant stack footprint. 258 lockslice := slice{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)} 259 lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) 260 for i := 0; i < int(sel.ncase); i++ { 261 j := i 262 c := scases[j].c 263 for j > 0 && lockorder[(j-1)/2].sortkey() < c.sortkey() { 264 k := (j - 1) / 2 265 lockorder[j] = lockorder[k] 266 j = k 267 } 268 lockorder[j] = c 269 } 270 for i := int(sel.ncase) - 1; i >= 0; i-- { 271 c := lockorder[i] 272 lockorder[i] = lockorder[0] 273 j := 0 274 for { 275 k := j*2 + 1 276 if k >= i { 277 break 278 } 279 if k+1 < i && lockorder[k].sortkey() < lockorder[k+1].sortkey() { 280 k++ 281 } 282 if c.sortkey() < lockorder[k].sortkey() { 283 lockorder[j] = lockorder[k] 284 j = k 285 continue 286 } 287 break 288 } 289 lockorder[j] = c 290 } 291 /* 292 for i := 0; i+1 < int(sel.ncase); i++ { 293 if lockorder[i].sortkey() > lockorder[i+1].sortkey() { 294 print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n") 295 throw("select: broken sort") 296 } 297 } 298 */ 299 300 // lock all the channels involved in the select 301 sellock(sel) 302 303 var ( 304 gp *g 305 done uint32 306 sg *sudog 307 c *hchan 308 k *scase 309 sglist *sudog 310 sgnext *sudog 311 futile byte 312 ) 313 314 loop: 315 // pass 1 - look for something already waiting 316 var dfl *scase 317 var cas *scase 318 for i := 0; i < int(sel.ncase); i++ { 319 cas = &scases[pollorder[i]] 320 c = cas.c 321 322 switch cas.kind { 323 case caseRecv: 324 if c.dataqsiz > 0 { 325 if c.qcount > 0 { 326 goto asyncrecv 327 } 328 } else { 329 sg = c.sendq.dequeue() 330 if sg != nil { 331 goto syncrecv 332 } 333 } 334 if c.closed != 0 { 335 goto rclose 336 } 337 338 case caseSend: 339 if raceenabled { 340 racereadpc(unsafe.Pointer(c), cas.pc, chansendpc) 341 } 342 if c.closed != 0 { 343 goto sclose 344 } 345 if c.dataqsiz > 0 { 346 if c.qcount < c.dataqsiz { 347 goto asyncsend 348 } 349 } else { 350 sg = c.recvq.dequeue() 351 if sg != nil { 352 goto syncsend 353 } 354 } 355 356 case caseDefault: 357 dfl = cas 358 } 359 } 360 361 if dfl != nil { 362 selunlock(sel) 363 cas = dfl 364 goto retc 365 } 366 367 // pass 2 - enqueue on all chans 368 gp = getg() 369 done = 0 370 for i := 0; i < int(sel.ncase); i++ { 371 cas = &scases[pollorder[i]] 372 c = cas.c 373 sg := acquireSudog() 374 sg.g = gp 375 // Note: selectdone is adjusted for stack copies in stack1.go:adjustsudogs 376 sg.selectdone = (*uint32)(noescape(unsafe.Pointer(&done))) 377 sg.elem = cas.elem 378 sg.releasetime = 0 379 if t0 != 0 { 380 sg.releasetime = -1 381 } 382 sg.waitlink = gp.waiting 383 gp.waiting = sg 384 385 switch cas.kind { 386 case caseRecv: 387 c.recvq.enqueue(sg) 388 389 case caseSend: 390 c.sendq.enqueue(sg) 391 } 392 } 393 394 // wait for someone to wake us up 395 gp.param = nil 396 gopark(selparkcommit, unsafe.Pointer(sel), "select", traceEvGoBlockSelect|futile, 2) 397 398 // someone woke us up 399 sellock(sel) 400 sg = (*sudog)(gp.param) 401 gp.param = nil 402 403 // pass 3 - dequeue from unsuccessful chans 404 // otherwise they stack up on quiet channels 405 // record the successful case, if any. 406 // We singly-linked up the SudoGs in case order, so when 407 // iterating through the linked list they are in reverse order. 408 cas = nil 409 sglist = gp.waiting 410 // Clear all elem before unlinking from gp.waiting. 411 for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { 412 sg1.selectdone = nil 413 sg1.elem = nil 414 } 415 gp.waiting = nil 416 for i := int(sel.ncase) - 1; i >= 0; i-- { 417 k = &scases[pollorder[i]] 418 if sglist.releasetime > 0 { 419 k.releasetime = sglist.releasetime 420 } 421 if sg == sglist { 422 // sg has already been dequeued by the G that woke us up. 423 cas = k 424 } else { 425 c = k.c 426 if k.kind == caseSend { 427 c.sendq.dequeueSudoG(sglist) 428 } else { 429 c.recvq.dequeueSudoG(sglist) 430 } 431 } 432 sgnext = sglist.waitlink 433 sglist.waitlink = nil 434 releaseSudog(sglist) 435 sglist = sgnext 436 } 437 438 if cas == nil { 439 futile = traceFutileWakeup 440 goto loop 441 } 442 443 c = cas.c 444 445 if c.dataqsiz > 0 { 446 throw("selectgo: shouldn't happen") 447 } 448 449 if debugSelect { 450 print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n") 451 } 452 453 if cas.kind == caseRecv { 454 if cas.receivedp != nil { 455 *cas.receivedp = true 456 } 457 } 458 459 if raceenabled { 460 if cas.kind == caseRecv && cas.elem != nil { 461 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) 462 } else if cas.kind == caseSend { 463 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 464 } 465 } 466 467 selunlock(sel) 468 goto retc 469 470 asyncrecv: 471 // can receive from buffer 472 if raceenabled { 473 if cas.elem != nil { 474 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) 475 } 476 raceacquire(chanbuf(c, c.recvx)) 477 racerelease(chanbuf(c, c.recvx)) 478 } 479 if cas.receivedp != nil { 480 *cas.receivedp = true 481 } 482 if cas.elem != nil { 483 typedmemmove(c.elemtype, cas.elem, chanbuf(c, c.recvx)) 484 } 485 memclr(chanbuf(c, c.recvx), uintptr(c.elemsize)) 486 c.recvx++ 487 if c.recvx == c.dataqsiz { 488 c.recvx = 0 489 } 490 c.qcount-- 491 sg = c.sendq.dequeue() 492 if sg != nil { 493 gp = sg.g 494 selunlock(sel) 495 if sg.releasetime != 0 { 496 sg.releasetime = cputicks() 497 } 498 goready(gp, 3) 499 } else { 500 selunlock(sel) 501 } 502 goto retc 503 504 asyncsend: 505 // can send to buffer 506 if raceenabled { 507 raceacquire(chanbuf(c, c.sendx)) 508 racerelease(chanbuf(c, c.sendx)) 509 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 510 } 511 typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem) 512 c.sendx++ 513 if c.sendx == c.dataqsiz { 514 c.sendx = 0 515 } 516 c.qcount++ 517 sg = c.recvq.dequeue() 518 if sg != nil { 519 gp = sg.g 520 selunlock(sel) 521 if sg.releasetime != 0 { 522 sg.releasetime = cputicks() 523 } 524 goready(gp, 3) 525 } else { 526 selunlock(sel) 527 } 528 goto retc 529 530 syncrecv: 531 // can receive from sleeping sender (sg) 532 if raceenabled { 533 if cas.elem != nil { 534 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) 535 } 536 racesync(c, sg) 537 } 538 selunlock(sel) 539 if debugSelect { 540 print("syncrecv: sel=", sel, " c=", c, "\n") 541 } 542 if cas.receivedp != nil { 543 *cas.receivedp = true 544 } 545 if cas.elem != nil { 546 typedmemmove(c.elemtype, cas.elem, sg.elem) 547 } 548 sg.elem = nil 549 gp = sg.g 550 gp.param = unsafe.Pointer(sg) 551 if sg.releasetime != 0 { 552 sg.releasetime = cputicks() 553 } 554 goready(gp, 3) 555 goto retc 556 557 rclose: 558 // read at end of closed channel 559 selunlock(sel) 560 if cas.receivedp != nil { 561 *cas.receivedp = false 562 } 563 if cas.elem != nil { 564 memclr(cas.elem, uintptr(c.elemsize)) 565 } 566 if raceenabled { 567 raceacquire(unsafe.Pointer(c)) 568 } 569 goto retc 570 571 syncsend: 572 // can send to sleeping receiver (sg) 573 if raceenabled { 574 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 575 racesync(c, sg) 576 } 577 selunlock(sel) 578 if debugSelect { 579 print("syncsend: sel=", sel, " c=", c, "\n") 580 } 581 if sg.elem != nil { 582 typedmemmove(c.elemtype, sg.elem, cas.elem) 583 } 584 sg.elem = nil 585 gp = sg.g 586 gp.param = unsafe.Pointer(sg) 587 if sg.releasetime != 0 { 588 sg.releasetime = cputicks() 589 } 590 goready(gp, 3) 591 592 retc: 593 if cas.releasetime > 0 { 594 blockevent(cas.releasetime-t0, 2) 595 } 596 return cas.pc, cas.so 597 598 sclose: 599 // send on closed channel 600 selunlock(sel) 601 panic("send on closed channel") 602 } 603 604 func (c *hchan) sortkey() uintptr { 605 // TODO(khr): if we have a moving garbage collector, we'll need to 606 // change this function. 607 return uintptr(unsafe.Pointer(c)) 608 } 609 610 // A runtimeSelect is a single case passed to rselect. 611 // This must match ../reflect/value.go:/runtimeSelect 612 type runtimeSelect struct { 613 dir selectDir 614 typ unsafe.Pointer // channel type (not used here) 615 ch *hchan // channel 616 val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir) 617 } 618 619 // These values must match ../reflect/value.go:/SelectDir. 620 type selectDir int 621 622 const ( 623 _ selectDir = iota 624 selectSend // case Chan <- Send 625 selectRecv // case <-Chan: 626 selectDefault // default 627 ) 628 629 //go:linkname reflect_rselect reflect.rselect 630 func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) { 631 // flagNoScan is safe here, because all objects are also referenced from cases. 632 size := selectsize(uintptr(len(cases))) 633 sel := (*hselect)(mallocgc(size, nil, flagNoScan)) 634 newselect(sel, int64(size), int32(len(cases))) 635 r := new(bool) 636 for i := range cases { 637 rc := &cases[i] 638 switch rc.dir { 639 case selectDefault: 640 selectdefaultImpl(sel, uintptr(i), 0) 641 case selectSend: 642 if rc.ch == nil { 643 break 644 } 645 selectsendImpl(sel, rc.ch, uintptr(i), rc.val, 0) 646 case selectRecv: 647 if rc.ch == nil { 648 break 649 } 650 selectrecvImpl(sel, rc.ch, uintptr(i), rc.val, r, 0) 651 } 652 } 653 654 pc, _ := selectgoImpl(sel) 655 chosen = int(pc) 656 recvOK = *r 657 return 658 } 659 660 func (q *waitq) dequeueSudoG(sgp *sudog) { 661 x := sgp.prev 662 y := sgp.next 663 if x != nil { 664 if y != nil { 665 // middle of queue 666 x.next = y 667 y.prev = x 668 sgp.next = nil 669 sgp.prev = nil 670 return 671 } 672 // end of queue 673 x.next = nil 674 q.last = x 675 sgp.prev = nil 676 return 677 } 678 if y != nil { 679 // start of queue 680 y.prev = nil 681 q.first = y 682 sgp.next = nil 683 return 684 } 685 686 // x==y==nil. Either sgp is the only element in the queue, 687 // or it has already been removed. Use q.first to disambiguate. 688 if q.first == sgp { 689 q.first = nil 690 q.last = nil 691 } 692 }