github.com/hbdrawn/golang@v0.0.0-20141214014649-6b835209aba2/src/runtime/select.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 // This file contains the implementation of Go select statements. 8 9 import "unsafe" 10 11 const ( 12 debugSelect = false 13 ) 14 15 var ( 16 chansendpc = funcPC(chansend) 17 chanrecvpc = funcPC(chanrecv) 18 ) 19 20 func selectsize(size uintptr) uintptr { 21 selsize := unsafe.Sizeof(_select{}) + 22 (size-1)*unsafe.Sizeof(_select{}.scase[0]) + 23 size*unsafe.Sizeof(*_select{}.lockorder) + 24 size*unsafe.Sizeof(*_select{}.pollorder) 25 return round(selsize, _Int64Align) 26 } 27 28 func newselect(sel *_select, selsize int64, size int32) { 29 if selsize != int64(selectsize(uintptr(size))) { 30 print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n") 31 gothrow("bad select size") 32 } 33 sel.tcase = uint16(size) 34 sel.ncase = 0 35 sel.lockorder = (**hchan)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(_select{}.scase[0]))) 36 sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*_select{}.lockorder))) 37 38 if debugSelect { 39 print("newselect s=", sel, " size=", size, "\n") 40 } 41 } 42 43 //go:nosplit 44 func selectsend(sel *_select, c *hchan, elem unsafe.Pointer) (selected bool) { 45 // nil cases do not compete 46 if c != nil { 47 selectsendImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 48 } 49 return 50 } 51 52 // cut in half to give stack a chance to split 53 func selectsendImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, so uintptr) { 54 i := sel.ncase 55 if i >= sel.tcase { 56 gothrow("selectsend: too many cases") 57 } 58 sel.ncase = i + 1 59 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 60 61 cas.pc = pc 62 cas._chan = c 63 cas.so = uint16(so) 64 cas.kind = _CaseSend 65 cas.elem = elem 66 67 if debugSelect { 68 print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas._chan, " so=", cas.so, "\n") 69 } 70 } 71 72 //go:nosplit 73 func selectrecv(sel *_select, c *hchan, elem unsafe.Pointer) (selected bool) { 74 // nil cases do not compete 75 if c != nil { 76 selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, nil, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 77 } 78 return 79 } 80 81 //go:nosplit 82 func selectrecv2(sel *_select, c *hchan, elem unsafe.Pointer, received *bool) (selected bool) { 83 // nil cases do not compete 84 if c != nil { 85 selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, received, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 86 } 87 return 88 } 89 90 func selectrecvImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, so uintptr) { 91 i := sel.ncase 92 if i >= sel.tcase { 93 gothrow("selectrecv: too many cases") 94 } 95 sel.ncase = i + 1 96 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 97 cas.pc = pc 98 cas._chan = c 99 cas.so = uint16(so) 100 cas.kind = _CaseRecv 101 cas.elem = elem 102 cas.receivedp = received 103 104 if debugSelect { 105 print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas._chan, " so=", cas.so, "\n") 106 } 107 } 108 109 //go:nosplit 110 func selectdefault(sel *_select) (selected bool) { 111 selectdefaultImpl(sel, getcallerpc(unsafe.Pointer(&sel)), uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 112 return 113 } 114 115 func selectdefaultImpl(sel *_select, callerpc uintptr, so uintptr) { 116 i := sel.ncase 117 if i >= sel.tcase { 118 gothrow("selectdefault: too many cases") 119 } 120 sel.ncase = i + 1 121 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 122 cas.pc = callerpc 123 cas._chan = nil 124 cas.so = uint16(so) 125 cas.kind = _CaseDefault 126 127 if debugSelect { 128 print("selectdefault s=", sel, " pc=", hex(cas.pc), " so=", cas.so, "\n") 129 } 130 } 131 132 func sellock(sel *_select) { 133 lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)} 134 lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) 135 var c *hchan 136 for _, c0 := range lockorder { 137 if c0 != nil && c0 != c { 138 c = c0 139 lock(&c.lock) 140 } 141 } 142 } 143 144 func selunlock(sel *_select) { 145 // We must be very careful here to not touch sel after we have unlocked 146 // the last lock, because sel can be freed right after the last unlock. 147 // Consider the following situation. 148 // First M calls runtime·park() in runtime·selectgo() passing the sel. 149 // Once runtime·park() has unlocked the last lock, another M makes 150 // the G that calls select runnable again and schedules it for execution. 151 // When the G runs on another M, it locks all the locks and frees sel. 152 // Now if the first M touches sel, it will access freed memory. 153 n := int(sel.ncase) 154 r := 0 155 lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), n, n} 156 lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) 157 // skip the default case 158 if n > 0 && lockorder[0] == nil { 159 r = 1 160 } 161 for i := n - 1; i >= r; i-- { 162 c := lockorder[i] 163 if i > 0 && c == lockorder[i-1] { 164 continue // will unlock it on the next iteration 165 } 166 unlock(&c.lock) 167 } 168 } 169 170 func selparkcommit(gp *g, sel unsafe.Pointer) bool { 171 selunlock((*_select)(sel)) 172 return true 173 } 174 175 func block() { 176 gopark(nil, nil, "select (no cases)") // forever 177 } 178 179 // overwrites return pc on stack to signal which case of the select 180 // to run, so cannot appear at the top of a split stack. 181 //go:nosplit 182 func selectgo(sel *_select) { 183 pc, offset := selectgoImpl(sel) 184 *(*bool)(add(unsafe.Pointer(&sel), uintptr(offset))) = true 185 setcallerpc(unsafe.Pointer(&sel), pc) 186 } 187 188 // selectgoImpl returns scase.pc and scase.so for the select 189 // case which fired. 190 func selectgoImpl(sel *_select) (uintptr, uint16) { 191 if debugSelect { 192 print("select: sel=", sel, "\n") 193 } 194 195 scaseslice := sliceStruct{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)} 196 scases := *(*[]scase)(unsafe.Pointer(&scaseslice)) 197 198 var t0 int64 199 if blockprofilerate > 0 { 200 t0 = cputicks() 201 for i := 0; i < int(sel.ncase); i++ { 202 scases[i].releasetime = -1 203 } 204 } 205 206 // The compiler rewrites selects that statically have 207 // only 0 or 1 cases plus default into simpler constructs. 208 // The only way we can end up with such small sel.ncase 209 // values here is for a larger select in which most channels 210 // have been nilled out. The general code handles those 211 // cases correctly, and they are rare enough not to bother 212 // optimizing (and needing to test). 213 214 // generate permuted order 215 pollslice := sliceStruct{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)} 216 pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice)) 217 for i := 0; i < int(sel.ncase); i++ { 218 pollorder[i] = uint16(i) 219 } 220 for i := 1; i < int(sel.ncase); i++ { 221 o := pollorder[i] 222 j := int(fastrand1()) % (i + 1) 223 pollorder[i] = pollorder[j] 224 pollorder[j] = o 225 } 226 227 // sort the cases by Hchan address to get the locking order. 228 // simple heap sort, to guarantee n log n time and constant stack footprint. 229 lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)} 230 lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) 231 for i := 0; i < int(sel.ncase); i++ { 232 j := i 233 c := scases[j]._chan 234 for j > 0 && lockorder[(j-1)/2].sortkey() < c.sortkey() { 235 k := (j - 1) / 2 236 lockorder[j] = lockorder[k] 237 j = k 238 } 239 lockorder[j] = c 240 } 241 for i := int(sel.ncase) - 1; i >= 0; i-- { 242 c := lockorder[i] 243 lockorder[i] = lockorder[0] 244 j := 0 245 for { 246 k := j*2 + 1 247 if k >= i { 248 break 249 } 250 if k+1 < i && lockorder[k].sortkey() < lockorder[k+1].sortkey() { 251 k++ 252 } 253 if c.sortkey() < lockorder[k].sortkey() { 254 lockorder[j] = lockorder[k] 255 j = k 256 continue 257 } 258 break 259 } 260 lockorder[j] = c 261 } 262 /* 263 for i := 0; i+1 < int(sel.ncase); i++ { 264 if lockorder[i].sortkey() > lockorder[i+1].sortkey() { 265 print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n") 266 gothrow("select: broken sort") 267 } 268 } 269 */ 270 271 // lock all the channels involved in the select 272 sellock(sel) 273 274 var ( 275 gp *g 276 done uint32 277 sg *sudog 278 c *hchan 279 k *scase 280 sglist *sudog 281 sgnext *sudog 282 ) 283 284 loop: 285 // pass 1 - look for something already waiting 286 var dfl *scase 287 var cas *scase 288 for i := 0; i < int(sel.ncase); i++ { 289 cas = &scases[pollorder[i]] 290 c = cas._chan 291 292 switch cas.kind { 293 case _CaseRecv: 294 if c.dataqsiz > 0 { 295 if c.qcount > 0 { 296 goto asyncrecv 297 } 298 } else { 299 sg = c.sendq.dequeue() 300 if sg != nil { 301 goto syncrecv 302 } 303 } 304 if c.closed != 0 { 305 goto rclose 306 } 307 308 case _CaseSend: 309 if raceenabled { 310 racereadpc(unsafe.Pointer(c), cas.pc, chansendpc) 311 } 312 if c.closed != 0 { 313 goto sclose 314 } 315 if c.dataqsiz > 0 { 316 if c.qcount < c.dataqsiz { 317 goto asyncsend 318 } 319 } else { 320 sg = c.recvq.dequeue() 321 if sg != nil { 322 goto syncsend 323 } 324 } 325 326 case _CaseDefault: 327 dfl = cas 328 } 329 } 330 331 if dfl != nil { 332 selunlock(sel) 333 cas = dfl 334 goto retc 335 } 336 337 // pass 2 - enqueue on all chans 338 gp = getg() 339 done = 0 340 for i := 0; i < int(sel.ncase); i++ { 341 cas = &scases[pollorder[i]] 342 c = cas._chan 343 sg := acquireSudog() 344 sg.g = gp 345 // Note: selectdone is adjusted for stack copies in stack.c:adjustsudogs 346 sg.selectdone = (*uint32)(noescape(unsafe.Pointer(&done))) 347 sg.elem = cas.elem 348 sg.releasetime = 0 349 if t0 != 0 { 350 sg.releasetime = -1 351 } 352 sg.waitlink = gp.waiting 353 gp.waiting = sg 354 355 switch cas.kind { 356 case _CaseRecv: 357 c.recvq.enqueue(sg) 358 359 case _CaseSend: 360 c.sendq.enqueue(sg) 361 } 362 } 363 364 // wait for someone to wake us up 365 gp.param = nil 366 gopark(selparkcommit, unsafe.Pointer(sel), "select") 367 368 // someone woke us up 369 sellock(sel) 370 sg = (*sudog)(gp.param) 371 gp.param = nil 372 373 // pass 3 - dequeue from unsuccessful chans 374 // otherwise they stack up on quiet channels 375 // record the successful case, if any. 376 // We singly-linked up the SudoGs in case order, so when 377 // iterating through the linked list they are in reverse order. 378 cas = nil 379 sglist = gp.waiting 380 // Clear all elem before unlinking from gp.waiting. 381 for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { 382 sg1.selectdone = nil 383 sg1.elem = nil 384 } 385 gp.waiting = nil 386 for i := int(sel.ncase) - 1; i >= 0; i-- { 387 k = &scases[pollorder[i]] 388 if sglist.releasetime > 0 { 389 k.releasetime = sglist.releasetime 390 } 391 if sg == sglist { 392 cas = k 393 } else { 394 c = k._chan 395 if k.kind == _CaseSend { 396 c.sendq.dequeueSudoG(sglist) 397 } else { 398 c.recvq.dequeueSudoG(sglist) 399 } 400 } 401 sgnext = sglist.waitlink 402 sglist.waitlink = nil 403 releaseSudog(sglist) 404 sglist = sgnext 405 } 406 407 if cas == nil { 408 goto loop 409 } 410 411 c = cas._chan 412 413 if c.dataqsiz > 0 { 414 gothrow("selectgo: shouldn't happen") 415 } 416 417 if debugSelect { 418 print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n") 419 } 420 421 if cas.kind == _CaseRecv { 422 if cas.receivedp != nil { 423 *cas.receivedp = true 424 } 425 } 426 427 if raceenabled { 428 if cas.kind == _CaseRecv && cas.elem != nil { 429 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) 430 } else if cas.kind == _CaseSend { 431 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 432 } 433 } 434 435 selunlock(sel) 436 goto retc 437 438 asyncrecv: 439 // can receive from buffer 440 if raceenabled { 441 if cas.elem != nil { 442 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) 443 } 444 raceacquire(chanbuf(c, c.recvx)) 445 racerelease(chanbuf(c, c.recvx)) 446 } 447 if cas.receivedp != nil { 448 *cas.receivedp = true 449 } 450 if cas.elem != nil { 451 memmove(cas.elem, chanbuf(c, c.recvx), uintptr(c.elemsize)) 452 } 453 memclr(chanbuf(c, c.recvx), uintptr(c.elemsize)) 454 c.recvx++ 455 if c.recvx == c.dataqsiz { 456 c.recvx = 0 457 } 458 c.qcount-- 459 sg = c.sendq.dequeue() 460 if sg != nil { 461 gp = sg.g 462 selunlock(sel) 463 if sg.releasetime != 0 { 464 sg.releasetime = cputicks() 465 } 466 goready(gp) 467 } else { 468 selunlock(sel) 469 } 470 goto retc 471 472 asyncsend: 473 // can send to buffer 474 if raceenabled { 475 raceacquire(chanbuf(c, c.sendx)) 476 racerelease(chanbuf(c, c.sendx)) 477 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 478 } 479 memmove(chanbuf(c, c.sendx), cas.elem, uintptr(c.elemsize)) 480 c.sendx++ 481 if c.sendx == c.dataqsiz { 482 c.sendx = 0 483 } 484 c.qcount++ 485 sg = c.recvq.dequeue() 486 if sg != nil { 487 gp = sg.g 488 selunlock(sel) 489 if sg.releasetime != 0 { 490 sg.releasetime = cputicks() 491 } 492 goready(gp) 493 } else { 494 selunlock(sel) 495 } 496 goto retc 497 498 syncrecv: 499 // can receive from sleeping sender (sg) 500 if raceenabled { 501 if cas.elem != nil { 502 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) 503 } 504 racesync(c, sg) 505 } 506 selunlock(sel) 507 if debugSelect { 508 print("syncrecv: sel=", sel, " c=", c, "\n") 509 } 510 if cas.receivedp != nil { 511 *cas.receivedp = true 512 } 513 if cas.elem != nil { 514 memmove(cas.elem, sg.elem, uintptr(c.elemsize)) 515 } 516 sg.elem = nil 517 gp = sg.g 518 gp.param = unsafe.Pointer(sg) 519 if sg.releasetime != 0 { 520 sg.releasetime = cputicks() 521 } 522 goready(gp) 523 goto retc 524 525 rclose: 526 // read at end of closed channel 527 selunlock(sel) 528 if cas.receivedp != nil { 529 *cas.receivedp = false 530 } 531 if cas.elem != nil { 532 memclr(cas.elem, uintptr(c.elemsize)) 533 } 534 if raceenabled { 535 raceacquire(unsafe.Pointer(c)) 536 } 537 goto retc 538 539 syncsend: 540 // can send to sleeping receiver (sg) 541 if raceenabled { 542 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 543 racesync(c, sg) 544 } 545 selunlock(sel) 546 if debugSelect { 547 print("syncsend: sel=", sel, " c=", c, "\n") 548 } 549 if sg.elem != nil { 550 memmove(sg.elem, cas.elem, uintptr(c.elemsize)) 551 } 552 sg.elem = nil 553 gp = sg.g 554 gp.param = unsafe.Pointer(sg) 555 if sg.releasetime != 0 { 556 sg.releasetime = cputicks() 557 } 558 goready(gp) 559 560 retc: 561 if cas.releasetime > 0 { 562 blockevent(cas.releasetime-t0, 2) 563 } 564 return cas.pc, cas.so 565 566 sclose: 567 // send on closed channel 568 selunlock(sel) 569 panic("send on closed channel") 570 } 571 572 func (c *hchan) sortkey() uintptr { 573 // TODO(khr): if we have a moving garbage collector, we'll need to 574 // change this function. 575 return uintptr(unsafe.Pointer(c)) 576 } 577 578 // A runtimeSelect is a single case passed to rselect. 579 // This must match ../reflect/value.go:/runtimeSelect 580 type runtimeSelect struct { 581 dir selectDir 582 typ unsafe.Pointer // channel type (not used here) 583 ch *hchan // channel 584 val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir) 585 } 586 587 // These values must match ../reflect/value.go:/SelectDir. 588 type selectDir int 589 590 const ( 591 _ selectDir = iota 592 selectSend // case Chan <- Send 593 selectRecv // case <-Chan: 594 selectDefault // default 595 ) 596 597 func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) { 598 // flagNoScan is safe here, because all objects are also referenced from cases. 599 size := selectsize(uintptr(len(cases))) 600 sel := (*_select)(mallocgc(size, nil, flagNoScan)) 601 newselect(sel, int64(size), int32(len(cases))) 602 r := new(bool) 603 for i := range cases { 604 rc := &cases[i] 605 switch rc.dir { 606 case selectDefault: 607 selectdefaultImpl(sel, uintptr(i), 0) 608 case selectSend: 609 if rc.ch == nil { 610 break 611 } 612 selectsendImpl(sel, rc.ch, uintptr(i), rc.val, 0) 613 case selectRecv: 614 if rc.ch == nil { 615 break 616 } 617 selectrecvImpl(sel, rc.ch, uintptr(i), rc.val, r, 0) 618 } 619 } 620 621 pc, _ := selectgoImpl(sel) 622 chosen = int(pc) 623 recvOK = *r 624 return 625 } 626 627 func (q *waitq) dequeueSudoG(s *sudog) { 628 var prevsgp *sudog 629 l := &q.first 630 for { 631 sgp := *l 632 if sgp == nil { 633 return 634 } 635 if sgp == s { 636 *l = sgp.next 637 if q.last == sgp { 638 q.last = prevsgp 639 } 640 s.next = nil 641 return 642 } 643 l = &sgp.next 644 prevsgp = sgp 645 } 646 }