github.com/brownsys/tracing-framework-go@v0.0.0-20161210174012-0542a62412fe/go/darwin_amd64/src/runtime/select.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 // This file contains the implementation of Go select statements. 8 9 import ( 10 "runtime/internal/sys" 11 "unsafe" 12 ) 13 14 const ( 15 debugSelect = false 16 17 // scase.kind 18 caseRecv = iota 19 caseSend 20 caseDefault 21 ) 22 23 // Select statement header. 24 // Known to compiler. 25 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype. 26 type hselect struct { 27 tcase uint16 // total count of scase[] 28 ncase uint16 // currently filled scase[] 29 pollorder *uint16 // case poll order 30 lockorder *uint16 // channel lock order 31 scase [1]scase // one per case (in order of appearance) 32 } 33 34 // Select case descriptor. 35 // Known to compiler. 36 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype. 37 type scase struct { 38 elem unsafe.Pointer // data element 39 c *hchan // chan 40 pc uintptr // return pc 41 kind uint16 42 so uint16 // vararg of selected bool 43 receivedp *bool // pointer to received bool (recv2) 44 releasetime int64 45 } 46 47 var ( 48 chansendpc = funcPC(chansend) 49 chanrecvpc = funcPC(chanrecv) 50 ) 51 52 func selectsize(size uintptr) uintptr { 53 selsize := unsafe.Sizeof(hselect{}) + 54 (size-1)*unsafe.Sizeof(hselect{}.scase[0]) + 55 size*unsafe.Sizeof(*hselect{}.lockorder) + 56 size*unsafe.Sizeof(*hselect{}.pollorder) 57 return round(selsize, sys.Int64Align) 58 } 59 60 func newselect(sel *hselect, selsize int64, size int32) { 61 if selsize != int64(selectsize(uintptr(size))) { 62 print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n") 63 throw("bad select size") 64 } 65 sel.tcase = uint16(size) 66 sel.ncase = 0 67 sel.lockorder = (*uint16)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(hselect{}.scase[0]))) 68 sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*hselect{}.lockorder))) 69 70 if debugSelect { 71 print("newselect s=", sel, " size=", size, "\n") 72 } 73 } 74 75 //go:nosplit 76 func selectsend(sel *hselect, c *hchan, elem unsafe.Pointer) (selected bool) { 77 // nil cases do not compete 78 if c != nil { 79 selectsendImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 80 } 81 return 82 } 83 84 // cut in half to give stack a chance to split 85 func selectsendImpl(sel *hselect, c *hchan, pc uintptr, elem unsafe.Pointer, so uintptr) { 86 i := sel.ncase 87 if i >= sel.tcase { 88 throw("selectsend: too many cases") 89 } 90 sel.ncase = i + 1 91 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 92 93 cas.pc = pc 94 cas.c = c 95 cas.so = uint16(so) 96 cas.kind = caseSend 97 cas.elem = elem 98 99 if debugSelect { 100 print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, " so=", cas.so, "\n") 101 } 102 } 103 104 //go:nosplit 105 func selectrecv(sel *hselect, c *hchan, elem unsafe.Pointer) (selected bool) { 106 // nil cases do not compete 107 if c != nil { 108 selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, nil, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 109 } 110 return 111 } 112 113 //go:nosplit 114 func selectrecv2(sel *hselect, c *hchan, elem unsafe.Pointer, received *bool) (selected bool) { 115 // nil cases do not compete 116 if c != nil { 117 selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, received, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 118 } 119 return 120 } 121 122 func selectrecvImpl(sel *hselect, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, so uintptr) { 123 i := sel.ncase 124 if i >= sel.tcase { 125 throw("selectrecv: too many cases") 126 } 127 sel.ncase = i + 1 128 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 129 cas.pc = pc 130 cas.c = c 131 cas.so = uint16(so) 132 cas.kind = caseRecv 133 cas.elem = elem 134 cas.receivedp = received 135 136 if debugSelect { 137 print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, " so=", cas.so, "\n") 138 } 139 } 140 141 //go:nosplit 142 func selectdefault(sel *hselect) (selected bool) { 143 selectdefaultImpl(sel, getcallerpc(unsafe.Pointer(&sel)), uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) 144 return 145 } 146 147 func selectdefaultImpl(sel *hselect, callerpc uintptr, so uintptr) { 148 i := sel.ncase 149 if i >= sel.tcase { 150 throw("selectdefault: too many cases") 151 } 152 sel.ncase = i + 1 153 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) 154 cas.pc = callerpc 155 cas.c = nil 156 cas.so = uint16(so) 157 cas.kind = caseDefault 158 159 if debugSelect { 160 print("selectdefault s=", sel, " pc=", hex(cas.pc), " so=", cas.so, "\n") 161 } 162 } 163 164 func sellock(scases []scase, lockorder []uint16) { 165 var c *hchan 166 for _, o := range lockorder { 167 c0 := scases[o].c 168 if c0 != nil && c0 != c { 169 c = c0 170 lock(&c.lock) 171 } 172 } 173 } 174 175 func selunlock(scases []scase, lockorder []uint16) { 176 // We must be very careful here to not touch sel after we have unlocked 177 // the last lock, because sel can be freed right after the last unlock. 178 // Consider the following situation. 179 // First M calls runtime·park() in runtime·selectgo() passing the sel. 180 // Once runtime·park() has unlocked the last lock, another M makes 181 // the G that calls select runnable again and schedules it for execution. 182 // When the G runs on another M, it locks all the locks and frees sel. 183 // Now if the first M touches sel, it will access freed memory. 184 n := len(scases) 185 r := 0 186 // skip the default case 187 if n > 0 && scases[lockorder[0]].c == nil { 188 r = 1 189 } 190 for i := n - 1; i >= r; i-- { 191 c := scases[lockorder[i]].c 192 if i > 0 && c == scases[lockorder[i-1]].c { 193 continue // will unlock it on the next iteration 194 } 195 unlock(&c.lock) 196 } 197 } 198 199 func selparkcommit(gp *g, _ unsafe.Pointer) bool { 200 // This must not access gp's stack (see gopark). In 201 // particular, it must not access the *hselect. That's okay, 202 // because by the time this is called, gp.waiting has all 203 // channels in lock order. 204 var lastc *hchan 205 for sg := gp.waiting; sg != nil; sg = sg.waitlink { 206 if sg.c != lastc && lastc != nil { 207 // As soon as we unlock the channel, fields in 208 // any sudog with that channel may change, 209 // including c and waitlink. Since multiple 210 // sudogs may have the same channel, we unlock 211 // only after we've passed the last instance 212 // of a channel. 213 unlock(&lastc.lock) 214 } 215 lastc = sg.c 216 } 217 if lastc != nil { 218 unlock(&lastc.lock) 219 } 220 return true 221 } 222 223 func block() { 224 gopark(nil, nil, "select (no cases)", traceEvGoStop, 1) // forever 225 } 226 227 // selectgo implements the select statement. 228 // 229 // *sel is on the current goroutine's stack (regardless of any 230 // escaping in selectgo). 231 // 232 // selectgo does not return. Instead, it overwrites its return PC and 233 // returns directly to the triggered select case. Because of this, it 234 // cannot appear at the top of a split stack. 235 // 236 //go:nosplit 237 func selectgo(sel *hselect) { 238 pc, offset := selectgoImpl(sel) 239 *(*bool)(add(unsafe.Pointer(&sel), uintptr(offset))) = true 240 setcallerpc(unsafe.Pointer(&sel), pc) 241 } 242 243 // selectgoImpl returns scase.pc and scase.so for the select 244 // case which fired. 245 func selectgoImpl(sel *hselect) (uintptr, uint16) { 246 if debugSelect { 247 print("select: sel=", sel, "\n") 248 } 249 250 scaseslice := slice{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)} 251 scases := *(*[]scase)(unsafe.Pointer(&scaseslice)) 252 253 var t0 int64 254 if blockprofilerate > 0 { 255 t0 = cputicks() 256 for i := 0; i < int(sel.ncase); i++ { 257 scases[i].releasetime = -1 258 } 259 } 260 261 // The compiler rewrites selects that statically have 262 // only 0 or 1 cases plus default into simpler constructs. 263 // The only way we can end up with such small sel.ncase 264 // values here is for a larger select in which most channels 265 // have been nilled out. The general code handles those 266 // cases correctly, and they are rare enough not to bother 267 // optimizing (and needing to test). 268 269 // generate permuted order 270 pollslice := slice{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)} 271 pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice)) 272 for i := 1; i < int(sel.ncase); i++ { 273 j := int(fastrand1()) % (i + 1) 274 pollorder[i] = pollorder[j] 275 pollorder[j] = uint16(i) 276 } 277 278 // sort the cases by Hchan address to get the locking order. 279 // simple heap sort, to guarantee n log n time and constant stack footprint. 280 lockslice := slice{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)} 281 lockorder := *(*[]uint16)(unsafe.Pointer(&lockslice)) 282 for i := 0; i < int(sel.ncase); i++ { 283 j := i 284 // Start with the pollorder to permute cases on the same channel. 285 c := scases[pollorder[i]].c 286 for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() { 287 k := (j - 1) / 2 288 lockorder[j] = lockorder[k] 289 j = k 290 } 291 lockorder[j] = pollorder[i] 292 } 293 for i := int(sel.ncase) - 1; i >= 0; i-- { 294 o := lockorder[i] 295 c := scases[o].c 296 lockorder[i] = lockorder[0] 297 j := 0 298 for { 299 k := j*2 + 1 300 if k >= i { 301 break 302 } 303 if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() { 304 k++ 305 } 306 if c.sortkey() < scases[lockorder[k]].c.sortkey() { 307 lockorder[j] = lockorder[k] 308 j = k 309 continue 310 } 311 break 312 } 313 lockorder[j] = o 314 } 315 /* 316 for i := 0; i+1 < int(sel.ncase); i++ { 317 if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() { 318 print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n") 319 throw("select: broken sort") 320 } 321 } 322 */ 323 324 // lock all the channels involved in the select 325 sellock(scases, lockorder) 326 327 var ( 328 gp *g 329 done uint32 330 sg *sudog 331 c *hchan 332 k *scase 333 sglist *sudog 334 sgnext *sudog 335 qp unsafe.Pointer 336 nextp **sudog 337 ) 338 339 loop: 340 // pass 1 - look for something already waiting 341 var dfl *scase 342 var cas *scase 343 for i := 0; i < int(sel.ncase); i++ { 344 cas = &scases[pollorder[i]] 345 c = cas.c 346 347 switch cas.kind { 348 case caseRecv: 349 sg = c.sendq.dequeue() 350 if sg != nil { 351 goto recv 352 } 353 if c.qcount > 0 { 354 goto bufrecv 355 } 356 if c.closed != 0 { 357 goto rclose 358 } 359 360 case caseSend: 361 if raceenabled { 362 racereadpc(unsafe.Pointer(c), cas.pc, chansendpc) 363 } 364 if c.closed != 0 { 365 goto sclose 366 } 367 sg = c.recvq.dequeue() 368 if sg != nil { 369 goto send 370 } 371 if c.qcount < c.dataqsiz { 372 goto bufsend 373 } 374 375 case caseDefault: 376 dfl = cas 377 } 378 } 379 380 if dfl != nil { 381 selunlock(scases, lockorder) 382 cas = dfl 383 goto retc 384 } 385 386 // pass 2 - enqueue on all chans 387 gp = getg() 388 done = 0 389 if gp.waiting != nil { 390 throw("gp.waiting != nil") 391 } 392 nextp = &gp.waiting 393 for _, casei := range lockorder { 394 cas = &scases[casei] 395 c = cas.c 396 sg := acquireSudog() 397 sg.g = gp 398 // Note: selectdone is adjusted for stack copies in stack1.go:adjustsudogs 399 sg.selectdone = (*uint32)(noescape(unsafe.Pointer(&done))) 400 // No stack splits between assigning elem and enqueuing 401 // sg on gp.waiting where copystack can find it. 402 sg.elem = cas.elem 403 sg.releasetime = 0 404 if t0 != 0 { 405 sg.releasetime = -1 406 } 407 sg.c = c 408 // Construct waiting list in lock order. 409 *nextp = sg 410 nextp = &sg.waitlink 411 412 switch cas.kind { 413 case caseRecv: 414 c.recvq.enqueue(sg) 415 416 case caseSend: 417 c.sendq.enqueue(sg) 418 } 419 } 420 421 // wait for someone to wake us up 422 gp.param = nil 423 gopark(selparkcommit, nil, "select", traceEvGoBlockSelect, 2) 424 425 // someone woke us up 426 sellock(scases, lockorder) 427 sg = (*sudog)(gp.param) 428 gp.param = nil 429 430 // pass 3 - dequeue from unsuccessful chans 431 // otherwise they stack up on quiet channels 432 // record the successful case, if any. 433 // We singly-linked up the SudoGs in lock order. 434 cas = nil 435 sglist = gp.waiting 436 // Clear all elem before unlinking from gp.waiting. 437 for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { 438 sg1.selectdone = nil 439 sg1.elem = nil 440 sg1.c = nil 441 } 442 gp.waiting = nil 443 444 for _, casei := range lockorder { 445 k = &scases[casei] 446 if sglist.releasetime > 0 { 447 k.releasetime = sglist.releasetime 448 } 449 if sg == sglist { 450 // sg has already been dequeued by the G that woke us up. 451 cas = k 452 } else { 453 c = k.c 454 if k.kind == caseSend { 455 c.sendq.dequeueSudoG(sglist) 456 } else { 457 c.recvq.dequeueSudoG(sglist) 458 } 459 } 460 sgnext = sglist.waitlink 461 sglist.waitlink = nil 462 releaseSudog(sglist) 463 sglist = sgnext 464 } 465 466 if cas == nil { 467 // This can happen if we were woken up by a close(). 468 // TODO: figure that out explicitly so we don't need this loop. 469 goto loop 470 } 471 472 c = cas.c 473 474 if debugSelect { 475 print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n") 476 } 477 478 if cas.kind == caseRecv { 479 if cas.receivedp != nil { 480 *cas.receivedp = true 481 } 482 } 483 484 if raceenabled { 485 if cas.kind == caseRecv && cas.elem != nil { 486 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) 487 } else if cas.kind == caseSend { 488 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 489 } 490 } 491 if msanenabled { 492 if cas.kind == caseRecv && cas.elem != nil { 493 msanwrite(cas.elem, c.elemtype.size) 494 } else if cas.kind == caseSend { 495 msanread(cas.elem, c.elemtype.size) 496 } 497 } 498 499 selunlock(scases, lockorder) 500 goto retc 501 502 bufrecv: 503 // can receive from buffer 504 if raceenabled { 505 if cas.elem != nil { 506 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) 507 } 508 raceacquire(chanbuf(c, c.recvx)) 509 racerelease(chanbuf(c, c.recvx)) 510 } 511 if msanenabled && cas.elem != nil { 512 msanwrite(cas.elem, c.elemtype.size) 513 } 514 if cas.receivedp != nil { 515 *cas.receivedp = true 516 } 517 qp = chanbuf(c, c.recvx) 518 if cas.elem != nil { 519 typedmemmove(c.elemtype, cas.elem, qp) 520 } 521 memclr(qp, uintptr(c.elemsize)) 522 c.recvx++ 523 if c.recvx == c.dataqsiz { 524 c.recvx = 0 525 } 526 c.qcount-- 527 selunlock(scases, lockorder) 528 goto retc 529 530 bufsend: 531 // can send to buffer 532 if raceenabled { 533 raceacquire(chanbuf(c, c.sendx)) 534 racerelease(chanbuf(c, c.sendx)) 535 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 536 } 537 if msanenabled { 538 msanread(cas.elem, c.elemtype.size) 539 } 540 typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem) 541 c.sendx++ 542 if c.sendx == c.dataqsiz { 543 c.sendx = 0 544 } 545 c.qcount++ 546 selunlock(scases, lockorder) 547 goto retc 548 549 recv: 550 // can receive from sleeping sender (sg) 551 recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }) 552 if debugSelect { 553 print("syncrecv: sel=", sel, " c=", c, "\n") 554 } 555 if cas.receivedp != nil { 556 *cas.receivedp = true 557 } 558 goto retc 559 560 rclose: 561 // read at end of closed channel 562 selunlock(scases, lockorder) 563 if cas.receivedp != nil { 564 *cas.receivedp = false 565 } 566 if cas.elem != nil { 567 memclr(cas.elem, uintptr(c.elemsize)) 568 } 569 if raceenabled { 570 raceacquire(unsafe.Pointer(c)) 571 } 572 goto retc 573 574 send: 575 // can send to a sleeping receiver (sg) 576 if raceenabled { 577 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) 578 } 579 if msanenabled { 580 msanread(cas.elem, c.elemtype.size) 581 } 582 send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }) 583 if debugSelect { 584 print("syncsend: sel=", sel, " c=", c, "\n") 585 } 586 goto retc 587 588 retc: 589 if cas.releasetime > 0 { 590 blockevent(cas.releasetime-t0, 2) 591 } 592 return cas.pc, cas.so 593 594 sclose: 595 // send on closed channel 596 selunlock(scases, lockorder) 597 panic(plainError("send on closed channel")) 598 } 599 600 func (c *hchan) sortkey() uintptr { 601 // TODO(khr): if we have a moving garbage collector, we'll need to 602 // change this function. 603 return uintptr(unsafe.Pointer(c)) 604 } 605 606 // A runtimeSelect is a single case passed to rselect. 607 // This must match ../reflect/value.go:/runtimeSelect 608 type runtimeSelect struct { 609 dir selectDir 610 typ unsafe.Pointer // channel type (not used here) 611 ch *hchan // channel 612 val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir) 613 } 614 615 // These values must match ../reflect/value.go:/SelectDir. 616 type selectDir int 617 618 const ( 619 _ selectDir = iota 620 selectSend // case Chan <- Send 621 selectRecv // case <-Chan: 622 selectDefault // default 623 ) 624 625 //go:linkname reflect_rselect reflect.rselect 626 func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) { 627 // flagNoScan is safe here, because all objects are also referenced from cases. 628 size := selectsize(uintptr(len(cases))) 629 sel := (*hselect)(mallocgc(size, nil, true)) 630 newselect(sel, int64(size), int32(len(cases))) 631 r := new(bool) 632 for i := range cases { 633 rc := &cases[i] 634 switch rc.dir { 635 case selectDefault: 636 selectdefaultImpl(sel, uintptr(i), 0) 637 case selectSend: 638 if rc.ch == nil { 639 break 640 } 641 selectsendImpl(sel, rc.ch, uintptr(i), rc.val, 0) 642 case selectRecv: 643 if rc.ch == nil { 644 break 645 } 646 selectrecvImpl(sel, rc.ch, uintptr(i), rc.val, r, 0) 647 } 648 } 649 650 pc, _ := selectgoImpl(sel) 651 chosen = int(pc) 652 recvOK = *r 653 return 654 } 655 656 func (q *waitq) dequeueSudoG(sgp *sudog) { 657 x := sgp.prev 658 y := sgp.next 659 if x != nil { 660 if y != nil { 661 // middle of queue 662 x.next = y 663 y.prev = x 664 sgp.next = nil 665 sgp.prev = nil 666 return 667 } 668 // end of queue 669 x.next = nil 670 q.last = x 671 sgp.prev = nil 672 return 673 } 674 if y != nil { 675 // start of queue 676 y.prev = nil 677 q.first = y 678 sgp.next = nil 679 return 680 } 681 682 // x==y==nil. Either sgp is the only element in the queue, 683 // or it has already been removed. Use q.first to disambiguate. 684 if q.first == sgp { 685 q.first = nil 686 q.last = nil 687 } 688 }