github.com/epfl-dcsl/gotee@v0.0.0-20200909122901-014b35f5e5e9/src/runtime/chan.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 // This file contains the implementation of Go channels. 8 9 // Invariants: 10 // At least one of c.sendq and c.recvq is empty, 11 // except for the case of an unbuffered channel with a single goroutine 12 // blocked on it for both sending and receiving using a select statement, 13 // in which case the length of c.sendq and c.recvq is limited only by the 14 // size of the select statement. 15 // 16 // For buffered channels, also: 17 // c.qcount > 0 implies that c.recvq is empty. 18 // c.qcount < c.dataqsiz implies that c.sendq is empty. 19 20 import ( 21 "runtime/internal/atomic" 22 "unsafe" 23 ) 24 25 const ( 26 maxAlign = 8 27 hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1)) 28 debugChan = false 29 ) 30 31 type hchan struct { 32 qcount uint // total data in the queue 33 dataqsiz uint // size of the circular queue 34 buf unsafe.Pointer // points to an array of dataqsiz elements 35 elemsize uint16 36 closed uint32 37 elemtype *_type // element type 38 sendx uint // send index 39 recvx uint // receive index 40 recvq waitq // list of recv waiters 41 sendq waitq // list of send waiters 42 43 // lock protects all fields in hchan, as well as several 44 // fields in sudogs blocked on this channel. 45 // 46 // Do not change another G's status while holding this lock 47 // (in particular, do not ready a G), as this can deadlock 48 // with stack shrinking. 49 lock mutex 50 51 isencl bool 52 encltpe *_type // pointer to the enclave type 53 } 54 55 type waitq struct { 56 first *sudog 57 last *sudog 58 } 59 60 //go:linkname reflect_makechan reflect.makechan 61 func reflect_makechan(t *chantype, size int) *hchan { 62 return makechan(t, size) 63 } 64 65 func makechan64(t *chantype, size int64) *hchan { 66 if int64(int(size)) != size { 67 panic(plainError("makechan: size out of range")) 68 } 69 70 return makechan(t, int(size)) 71 } 72 73 func makechan(t *chantype, size int) *hchan { 74 elem := t.elem 75 76 // compiler checks this but be safe. 77 if elem.size >= 1<<16 { 78 throw("makechan: invalid channel element type") 79 } 80 if hchanSize%maxAlign != 0 || elem.align > maxAlign { 81 throw("makechan: bad alignment") 82 } 83 84 if size < 0 || uintptr(size) > maxSliceCap(elem.size) || uintptr(size)*elem.size > _MaxMem-hchanSize { 85 panic(plainError("makechan: size out of range")) 86 } 87 88 // Hchan does not contain pointers interesting for GC when elements stored in buf do not contain pointers. 89 // buf points into the same allocation, elemtype is persistent. 90 // SudoG's are referenced from their owning thread so they can't be collected. 91 // TODO(dvyukov,rlh): Rethink when collector can move allocated objects. 92 var c *hchan 93 switch { 94 case size == 0 || elem.size == 0: 95 // Queue or element size is zero. 96 c = (*hchan)(mallocgc(hchanSize, nil, true)) 97 // Race detector uses this location for synchronization. 98 c.buf = unsafe.Pointer(c) 99 case elem.kind&kindNoPointers != 0: 100 // Elements do not contain pointers. 101 // Allocate hchan and buf in one call. 102 c = (*hchan)(mallocgc(hchanSize+uintptr(size)*elem.size, nil, true)) 103 c.buf = add(unsafe.Pointer(c), hchanSize) 104 default: 105 // Elements contain pointers. 106 c = new(hchan) 107 c.buf = mallocgc(uintptr(size)*elem.size, elem, true) 108 } 109 110 c.elemsize = uint16(elem.size) 111 c.elemtype = elem 112 c.dataqsiz = uint(size) 113 114 if debugChan { 115 print("makechan: chan=", c, "; elemsize=", elem.size, "; elemalg=", elem.alg, "; dataqsiz=", size, "\n") 116 } 117 c.isencl = isEnclave 118 return c 119 } 120 121 // chanbuf(c, i) is pointer to the i'th slot in the buffer. 122 func chanbuf(c *hchan, i uint) unsafe.Pointer { 123 return add(c.buf, uintptr(i)*uintptr(c.elemsize)) 124 } 125 126 // entry point for c <- x from compiled code 127 //go:nosplit 128 func chansend1(c *hchan, elem unsafe.Pointer) { 129 chansend(c, elem, true, getcallerpc()) 130 } 131 132 /* 133 * generic single channel send/recv 134 * If block is not nil, 135 * then the protocol will not 136 * sleep but return if it could 137 * not complete. 138 * 139 * sleep can wake up with g.param == nil 140 * when a channel involved in the sleep has 141 * been closed. it is easiest to loop and re-run 142 * the operation; we'll see that it's now closed. 143 */ 144 func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { 145 if c == nil { 146 if !block { 147 return false 148 } 149 gopark(nil, nil, "chan send (nil chan)", traceEvGoStop, 2) 150 throw("unreachable") 151 } 152 153 if debugChan { 154 print("chansend: chan=", c, "\n") 155 } 156 157 if raceenabled { 158 racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend)) 159 } 160 161 // Fast path: check for failed non-blocking operation without acquiring the lock. 162 // 163 // After observing that the channel is not closed, we observe that the channel is 164 // not ready for sending. Each of these observations is a single word-sized read 165 // (first c.closed and second c.recvq.first or c.qcount depending on kind of channel). 166 // Because a closed channel cannot transition from 'ready for sending' to 167 // 'not ready for sending', even if the channel is closed between the two observations, 168 // they imply a moment between the two when the channel was both not yet closed 169 // and not ready for sending. We behave as if we observed the channel at that moment, 170 // and report that the send cannot proceed. 171 // 172 // It is okay if the reads are reordered here: if we observe that the channel is not 173 // ready for sending and then observe that it is not closed, that implies that the 174 // channel wasn't closed during the first observation. 175 if !block && c.closed == 0 && ((c.dataqsiz == 0 && c.recvq.first == nil) || 176 (c.dataqsiz > 0 && c.qcount == c.dataqsiz)) { 177 return false 178 } 179 180 var t0 int64 181 if blockprofilerate > 0 { 182 t0 = cputicks() 183 } 184 185 lock(&c.lock) 186 187 if c.closed != 0 { 188 unlock(&c.lock) 189 panic(plainError("send on closed channel")) 190 } 191 192 if sg := c.recvq.dequeue(); sg != nil { 193 // Found a waiting receiver. We pass the value we want to send 194 // directly to the receiver, bypassing the channel buffer (if any). 195 send(c, sg, ep, func() { unlock(&c.lock) }, 3) 196 return true 197 } 198 199 if c.qcount < c.dataqsiz { 200 // Space is available in the channel buffer. Enqueue the element to send. 201 qp := chanbuf(c, c.sendx) 202 if raceenabled { 203 raceacquire(qp) 204 racerelease(qp) 205 } 206 typedmemmove(c.elemtype, qp, ep) 207 c.sendx++ 208 if c.sendx == c.dataqsiz { 209 c.sendx = 0 210 } 211 c.qcount++ 212 unlock(&c.lock) 213 return true 214 } 215 216 if !block { 217 unlock(&c.lock) 218 return false 219 } 220 221 // Block on the channel. Some receiver will complete our operation for us. 222 gp := getg() 223 224 // @aghosn special case when inter-domain communication. 225 var mysg *sudog = nil 226 if checkinterdomain(gp.isencl, c.isencl) { 227 // Blocking on a send from enclave. 228 // take a sudog from the free list and use it. 229 if c.encltpe == nil { 230 mysg, ep = UnsafeAllocator.AcquireUnsafeSudog(ep, false, c.elemsize, c.elemtype) 231 } else { 232 mysg, ep = UnsafeAllocator.AcquireUnsafeSudogSend(ep, c.elemsize, c.encltpe) 233 } 234 if !gp.isencl || !isEnclave { 235 panic("Acquiring sudog from the pool in wrong environment.") 236 } 237 if c.elemsize > SG_BUF_SIZE { 238 panic("Not enough space in the sudog buffer.") 239 } 240 } else { 241 mysg = acquireSudog() 242 } 243 244 mysg.releasetime = 0 245 if t0 != 0 { 246 mysg.releasetime = -1 247 } 248 249 // No stack splits between assigning elem and enqueuing mysg 250 // on gp.waiting where copystack can find it. 251 mysg.elem = ep 252 mysg.waitlink = nil 253 mysg.g = gp 254 gp.param = unsafe.Pointer(mysg) 255 mysg.isSelect = false 256 mysg.c = c 257 gp.waiting = mysg 258 //gp.param = nil 259 c.sendq.enqueue(mysg) 260 goparkunlock(&c.lock, "chan send", traceEvGoBlockSend, 3) 261 262 // someone woke us up. 263 if mysg != gp.waiting { 264 throw("G waiting list is corrupted") 265 } 266 gp.waiting = nil 267 if gp.param == nil { 268 if c.closed == 0 { 269 throw("chansend: spurious wakeup") 270 } 271 panic(plainError("send on closed channel")) 272 } 273 gp.param = nil 274 if mysg.releasetime > 0 { 275 blockevent(mysg.releasetime-t0, 2) 276 } 277 mysg.c = nil 278 279 crossReleaseSudog(mysg, c.elemsize) 280 return true 281 } 282 283 // send processes a send operation on an empty channel c. 284 // The value ep sent by the sender is copied to the receiver sg. 285 // The receiver is then woken up to go on its merry way. 286 // Channel c must be empty and locked. send unlocks c with unlockf. 287 // sg must already be dequeued from c. 288 // ep must be non-nil and point to the heap or the caller's stack. 289 func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { 290 if raceenabled { 291 if c.dataqsiz == 0 { 292 racesync(c, sg) 293 } else { 294 // Pretend we go through the buffer, even though 295 // we copy directly. Note that we need to increment 296 // the head/tail locations only when raceenabled. 297 qp := chanbuf(c, c.recvx) 298 raceacquire(qp) 299 racerelease(qp) 300 raceacquireg(sg.g, qp) 301 racereleaseg(sg.g, qp) 302 c.recvx++ 303 if c.recvx == c.dataqsiz { 304 c.recvx = 0 305 } 306 c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz 307 } 308 } 309 if sg.elem != nil { 310 if !sendCopy(sg, ep, c) { 311 sendDirect(c.elemtype, sg, ep) 312 } 313 sg.elem = nil 314 } 315 316 if !isReschedulable(sg) { 317 unlockf() 318 if !isEnclave && sg.releasetime != 0 { 319 sg.releasetime = cputicks() 320 } 321 Cooprt.crossGoready(sg) 322 return 323 } 324 325 gp := sg.g 326 unlockf() 327 gp.param = unsafe.Pointer(sg) 328 if !isEnclave && sg.releasetime != 0 { 329 sg.releasetime = cputicks() 330 } 331 goready(gp, skip+1) 332 } 333 334 // Sends and receives on unbuffered or empty-buffered channels are the 335 // only operations where one running goroutine writes to the stack of 336 // another running goroutine. The GC assumes that stack writes only 337 // happen when the goroutine is running and are only done by that 338 // goroutine. Using a write barrier is sufficient to make up for 339 // violating that assumption, but the write barrier has to work. 340 // typedmemmove will call bulkBarrierPreWrite, but the target bytes 341 // are not in the heap, so that will not help. We arrange to call 342 // memmove and typeBitsBulkBarrier instead. 343 344 func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) { 345 // src is on our stack, dst is a slot on another stack. 346 347 // Once we read sg.elem out of sg, it will no longer 348 // be updated if the destination's stack gets copied (shrunk). 349 // So make sure that no preemption points can happen between read & use. 350 dst := sg.elem 351 checkEnclaveBounds(uintptr(dst)) 352 typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size) 353 memmove(dst, src, t.size) 354 } 355 356 func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) { 357 // dst is on our stack or the heap, src is on another stack. 358 // The channel is locked, so src will not move during this 359 // operation. 360 src := sg.elem 361 checkEnclaveBounds(uintptr(src)) 362 typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size) 363 memmove(dst, src, t.size) 364 } 365 366 func closechan(c *hchan) { 367 if c == nil { 368 panic(plainError("close of nil channel")) 369 } 370 371 lock(&c.lock) 372 if c.closed != 0 { 373 unlock(&c.lock) 374 panic(plainError("close of closed channel")) 375 } 376 377 if raceenabled { 378 callerpc := getcallerpc() 379 racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan)) 380 racerelease(unsafe.Pointer(c)) 381 } 382 383 c.closed = 1 384 385 var glist *g 386 387 // release all readers 388 for { 389 sg := c.recvq.dequeue() 390 if sg == nil { 391 break 392 } 393 if sg.elem != nil { 394 typedmemclr(c.elemtype, sg.elem) 395 sg.elem = nil 396 } 397 if sg.releasetime != 0 { 398 sg.releasetime = cputicks() 399 } 400 gp := sg.g 401 gp.param = nil 402 if raceenabled { 403 raceacquireg(gp, unsafe.Pointer(c)) 404 } 405 gp.schedlink.set(glist) 406 glist = gp 407 } 408 409 // release all writers (they will panic) 410 for { 411 sg := c.sendq.dequeue() 412 if sg == nil { 413 break 414 } 415 sg.elem = nil 416 if sg.releasetime != 0 { 417 sg.releasetime = cputicks() 418 } 419 gp := sg.g 420 gp.param = nil 421 if raceenabled { 422 raceacquireg(gp, unsafe.Pointer(c)) 423 } 424 gp.schedlink.set(glist) 425 glist = gp 426 } 427 unlock(&c.lock) 428 429 // Ready all Gs now that we've dropped the channel lock. 430 for glist != nil { 431 gp := glist 432 glist = glist.schedlink.ptr() 433 gp.schedlink = 0 434 goready(gp, 3) 435 } 436 } 437 438 // entry points for <- c from compiled code 439 //go:nosplit 440 func chanrecv1(c *hchan, elem unsafe.Pointer) { 441 chanrecv(c, elem, true) 442 } 443 444 //go:nosplit 445 func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool) { 446 _, received = chanrecv(c, elem, true) 447 return 448 } 449 450 // chanrecv receives on channel c and writes the received data to ep. 451 // ep may be nil, in which case received data is ignored. 452 // If block == false and no elements are available, returns (false, false). 453 // Otherwise, if c is closed, zeros *ep and returns (true, false). 454 // Otherwise, fills in *ep with an element and returns (true, true). 455 // A non-nil ep must point to the heap or the caller's stack. 456 func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) { 457 // raceenabled: don't need to check ep, as it is always on the stack 458 // or is new memory allocated by reflect. 459 460 if debugChan { 461 print("chanrecv: chan=", c, "\n") 462 } 463 464 if c == nil { 465 if !block { 466 return 467 } 468 gopark(nil, nil, "chan receive (nil chan)", traceEvGoStop, 2) 469 throw("unreachable") 470 } 471 472 // Fast path: check for failed non-blocking operation without acquiring the lock. 473 // 474 // After observing that the channel is not ready for receiving, we observe that the 475 // channel is not closed. Each of these observations is a single word-sized read 476 // (first c.sendq.first or c.qcount, and second c.closed). 477 // Because a channel cannot be reopened, the later observation of the channel 478 // being not closed implies that it was also not closed at the moment of the 479 // first observation. We behave as if we observed the channel at that moment 480 // and report that the receive cannot proceed. 481 // 482 // The order of operations is important here: reversing the operations can lead to 483 // incorrect behavior when racing with a close. 484 if !block && (c.dataqsiz == 0 && c.sendq.first == nil || 485 c.dataqsiz > 0 && atomic.Loaduint(&c.qcount) == 0) && 486 atomic.Load(&c.closed) == 0 { 487 return 488 } 489 490 var t0 int64 491 if blockprofilerate > 0 { 492 t0 = cputicks() 493 } 494 495 lock(&c.lock) 496 497 if c.closed != 0 && c.qcount == 0 { 498 if raceenabled { 499 raceacquire(unsafe.Pointer(c)) 500 } 501 unlock(&c.lock) 502 if ep != nil { 503 typedmemclr(c.elemtype, ep) 504 } 505 return true, false 506 } 507 508 if sg := c.sendq.dequeue(); sg != nil { 509 // Found a waiting sender. If buffer is size 0, receive value 510 // directly from sender. Otherwise, receive from head of queue 511 // and add sender's value to the tail of the queue (both map to 512 // the same buffer slot because the queue is full). 513 recv(c, sg, ep, func() { unlock(&c.lock) }, 3) 514 return true, true 515 } 516 517 if c.qcount > 0 { 518 // Receive directly from queue 519 qp := chanbuf(c, c.recvx) 520 if raceenabled { 521 raceacquire(qp) 522 racerelease(qp) 523 } 524 if ep != nil { 525 typedmemmove(c.elemtype, ep, qp) 526 } 527 typedmemclr(c.elemtype, qp) 528 c.recvx++ 529 if c.recvx == c.dataqsiz { 530 c.recvx = 0 531 } 532 c.qcount-- 533 unlock(&c.lock) 534 return true, true 535 } 536 537 if !block { 538 unlock(&c.lock) 539 return false, false 540 } 541 542 // no sender available: block on this channel. 543 gp := getg() 544 545 // @aghosn for inter-domain communication. 546 var mysg *sudog = nil 547 if checkinterdomain(gp.isencl, c.isencl) { 548 mysg, ep = UnsafeAllocator.AcquireUnsafeSudog(ep, true, c.elemsize, c.elemtype) 549 } else { 550 mysg = acquireSudog() 551 } 552 553 mysg.releasetime = 0 554 if t0 != 0 { 555 mysg.releasetime = -1 556 } 557 558 // No stack splits between assigning elem and enqueuing mysg 559 // on gp.waiting where copystack can find it. 560 mysg.elem = ep 561 mysg.waitlink = nil 562 gp.waiting = mysg 563 mysg.g = gp 564 mysg.isSelect = false 565 mysg.c = c 566 gp.param = nil 567 c.recvq.enqueue(mysg) 568 goparkunlock(&c.lock, "chan receive", traceEvGoBlockRecv, 3) 569 570 // someone woke us up 571 if mysg != gp.waiting { 572 throw("G waiting list is corrupted") 573 } 574 gp.waiting = nil 575 if mysg.releasetime > 0 { 576 blockevent(mysg.releasetime-t0, 2) 577 } 578 // perform a deep copy 579 if mysg.needcpy { 580 doCopy(mysg, ep, c) 581 } 582 583 closed := gp.param == nil 584 gp.param = nil 585 mysg.c = nil 586 587 crossReleaseSudog(mysg, c.elemsize) 588 return true, !closed 589 } 590 591 // recv processes a receive operation on a full channel c. 592 // There are 2 parts: 593 // 1) The value sent by the sender sg is put into the channel 594 // and the sender is woken up to go on its merry way. 595 // 2) The value received by the receiver (the current G) is 596 // written to ep. 597 // For synchronous channels, both values are the same. 598 // For asynchronous channels, the receiver gets its data from 599 // the channel buffer and the sender's data is put in the 600 // channel buffer. 601 // Channel c must be full and locked. recv unlocks c with unlockf. 602 // sg must already be dequeued from c. 603 // A non-nil ep must point to the heap or the caller's stack. 604 func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { 605 if c.dataqsiz == 0 { 606 if raceenabled { 607 racesync(c, sg) 608 } 609 if ep != nil { 610 // copy data from sender 611 recvDirect(c.elemtype, sg, ep) 612 } 613 } else { 614 // Queue is full. Take the item at the 615 // head of the queue. Make the sender enqueue 616 // its item at the tail of the queue. Since the 617 // queue is full, those are both the same slot. 618 qp := chanbuf(c, c.recvx) 619 if raceenabled { 620 raceacquire(qp) 621 racerelease(qp) 622 raceacquireg(sg.g, qp) 623 racereleaseg(sg.g, qp) 624 } 625 // copy data from queue to receiver 626 if ep != nil { 627 typedmemmove(c.elemtype, ep, qp) 628 } 629 // copy data from sender to queue 630 typedmemmove(c.elemtype, qp, sg.elem) 631 c.recvx++ 632 if c.recvx == c.dataqsiz { 633 c.recvx = 0 634 } 635 c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz 636 } 637 // Do we need to copy 638 doCopy(sg, ep, c) 639 640 sg.elem = nil 641 if !isReschedulable(sg) { 642 //TODO @aghosn don't know what to do with gp.param 643 unlockf() 644 if sg.releasetime != 0 { 645 sg.releasetime = cputicks() 646 } 647 Cooprt.crossGoready(sg) 648 return 649 } 650 651 gp := sg.g 652 unlockf() 653 gp.param = unsafe.Pointer(sg) 654 if sg.releasetime != 0 { 655 sg.releasetime = cputicks() 656 } 657 goready(gp, skip+1) 658 } 659 660 // compiler implements 661 // 662 // select { 663 // case c <- v: 664 // ... foo 665 // default: 666 // ... bar 667 // } 668 // 669 // as 670 // 671 // if selectnbsend(c, v) { 672 // ... foo 673 // } else { 674 // ... bar 675 // } 676 // 677 func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool) { 678 return chansend(c, elem, false, getcallerpc()) 679 } 680 681 // compiler implements 682 // 683 // select { 684 // case v = <-c: 685 // ... foo 686 // default: 687 // ... bar 688 // } 689 // 690 // as 691 // 692 // if selectnbrecv(&v, c) { 693 // ... foo 694 // } else { 695 // ... bar 696 // } 697 // 698 func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected bool) { 699 selected, _ = chanrecv(c, elem, false) 700 return 701 } 702 703 // compiler implements 704 // 705 // select { 706 // case v, ok = <-c: 707 // ... foo 708 // default: 709 // ... bar 710 // } 711 // 712 // as 713 // 714 // if c != nil && selectnbrecv2(&v, &ok, c) { 715 // ... foo 716 // } else { 717 // ... bar 718 // } 719 // 720 func selectnbrecv2(elem unsafe.Pointer, received *bool, c *hchan) (selected bool) { 721 // TODO(khr): just return 2 values from this function, now that it is in Go. 722 selected, *received = chanrecv(c, elem, false) 723 return 724 } 725 726 //go:linkname reflect_chansend reflect.chansend 727 func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool) { 728 return chansend(c, elem, !nb, getcallerpc()) 729 } 730 731 //go:linkname reflect_chanrecv reflect.chanrecv 732 func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) { 733 return chanrecv(c, elem, !nb) 734 } 735 736 //go:linkname reflect_chanlen reflect.chanlen 737 func reflect_chanlen(c *hchan) int { 738 if c == nil { 739 return 0 740 } 741 return int(c.qcount) 742 } 743 744 //go:linkname reflect_chancap reflect.chancap 745 func reflect_chancap(c *hchan) int { 746 if c == nil { 747 return 0 748 } 749 return int(c.dataqsiz) 750 } 751 752 //go:linkname reflect_chanclose reflect.chanclose 753 func reflect_chanclose(c *hchan) { 754 closechan(c) 755 } 756 757 func (q *waitq) enqueue(sgp *sudog) { 758 sgp.next = nil 759 x := q.last 760 if x == nil { 761 sgp.prev = nil 762 q.first = sgp 763 q.last = sgp 764 return 765 } 766 sgp.prev = x 767 x.next = sgp 768 q.last = sgp 769 } 770 771 func (q *waitq) dequeue() *sudog { 772 for { 773 sgp := q.first 774 if sgp == nil { 775 return nil 776 } 777 y := sgp.next 778 if y == nil { 779 q.first = nil 780 q.last = nil 781 } else { 782 y.prev = nil 783 q.first = y 784 sgp.next = nil // mark as removed (see dequeueSudog) 785 } 786 787 // if a goroutine was put on this queue because of a 788 // select, there is a small window between the goroutine 789 // being woken up by a different case and it grabbing the 790 // channel locks. Once it has the lock 791 // it removes itself from the queue, so we won't see it after that. 792 // We use a flag in the G struct to tell us when someone 793 // else has won the race to signal this goroutine but the goroutine 794 // hasn't removed itself from the queue yet. 795 if sgp.isSelect { 796 if !atomic.Cas(&sgp.g.selectDone, 0, 1) { 797 continue 798 } 799 } 800 801 return sgp 802 } 803 } 804 805 func racesync(c *hchan, sg *sudog) { 806 racerelease(chanbuf(c, 0)) 807 raceacquireg(sg.g, chanbuf(c, 0)) 808 racereleaseg(sg.g, chanbuf(c, 0)) 809 raceacquire(chanbuf(c, 0)) 810 }