github.com/karrick/go@v0.0.0-20170817181416-d5b0ec858b37/src/runtime/chan.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 // This file contains the implementation of Go channels. 8 9 // Invariants: 10 // At least one of c.sendq and c.recvq is empty, 11 // except for the case of an unbuffered channel with a single goroutine 12 // blocked on it for both sending and receiving using a select statement, 13 // in which case the length of c.sendq and c.recvq is limited only by the 14 // size of the select statement. 15 // 16 // For buffered channels, also: 17 // c.qcount > 0 implies that c.recvq is empty. 18 // c.qcount < c.dataqsiz implies that c.sendq is empty. 19 20 import ( 21 "runtime/internal/atomic" 22 "unsafe" 23 ) 24 25 const ( 26 maxAlign = 8 27 hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1)) 28 debugChan = false 29 ) 30 31 type hchan struct { 32 qcount uint // total data in the queue 33 dataqsiz uint // size of the circular queue 34 buf unsafe.Pointer // points to an array of dataqsiz elements 35 elemsize uint16 36 closed uint32 37 elemtype *_type // element type 38 sendx uint // send index 39 recvx uint // receive index 40 recvq waitq // list of recv waiters 41 sendq waitq // list of send waiters 42 43 // lock protects all fields in hchan, as well as several 44 // fields in sudogs blocked on this channel. 45 // 46 // Do not change another G's status while holding this lock 47 // (in particular, do not ready a G), as this can deadlock 48 // with stack shrinking. 49 lock mutex 50 } 51 52 type waitq struct { 53 first *sudog 54 last *sudog 55 } 56 57 //go:linkname reflect_makechan reflect.makechan 58 func reflect_makechan(t *chantype, size int) *hchan { 59 return makechan(t, size) 60 } 61 62 func makechan64(t *chantype, size int64) *hchan { 63 if int64(int(size)) != size { 64 panic(plainError("makechan: size out of range")) 65 } 66 67 return makechan(t, int(size)) 68 } 69 70 func makechan(t *chantype, size int) *hchan { 71 elem := t.elem 72 73 // compiler checks this but be safe. 74 if elem.size >= 1<<16 { 75 throw("makechan: invalid channel element type") 76 } 77 if hchanSize%maxAlign != 0 || elem.align > maxAlign { 78 throw("makechan: bad alignment") 79 } 80 if size < 0 || (elem.size > 0 && uintptr(size) > (_MaxMem-hchanSize)/elem.size) { 81 panic(plainError("makechan: size out of range")) 82 } 83 84 var c *hchan 85 if elem.kind&kindNoPointers != 0 || size == 0 { 86 // Allocate memory in one call. 87 // Hchan does not contain pointers interesting for GC in this case: 88 // buf points into the same allocation, elemtype is persistent. 89 // SudoG's are referenced from their owning thread so they can't be collected. 90 // TODO(dvyukov,rlh): Rethink when collector can move allocated objects. 91 c = (*hchan)(mallocgc(hchanSize+uintptr(size)*elem.size, nil, true)) 92 if size > 0 && elem.size != 0 { 93 c.buf = add(unsafe.Pointer(c), hchanSize) 94 } else { 95 // race detector uses this location for synchronization 96 // Also prevents us from pointing beyond the allocation (see issue 9401). 97 c.buf = unsafe.Pointer(c) 98 } 99 } else { 100 c = new(hchan) 101 c.buf = newarray(elem, int(size)) 102 } 103 c.elemsize = uint16(elem.size) 104 c.elemtype = elem 105 c.dataqsiz = uint(size) 106 107 if debugChan { 108 print("makechan: chan=", c, "; elemsize=", elem.size, "; elemalg=", elem.alg, "; dataqsiz=", size, "\n") 109 } 110 return c 111 } 112 113 // chanbuf(c, i) is pointer to the i'th slot in the buffer. 114 func chanbuf(c *hchan, i uint) unsafe.Pointer { 115 return add(c.buf, uintptr(i)*uintptr(c.elemsize)) 116 } 117 118 // entry point for c <- x from compiled code 119 //go:nosplit 120 func chansend1(c *hchan, elem unsafe.Pointer) { 121 chansend(c, elem, true, getcallerpc(unsafe.Pointer(&c))) 122 } 123 124 /* 125 * generic single channel send/recv 126 * If block is not nil, 127 * then the protocol will not 128 * sleep but return if it could 129 * not complete. 130 * 131 * sleep can wake up with g.param == nil 132 * when a channel involved in the sleep has 133 * been closed. it is easiest to loop and re-run 134 * the operation; we'll see that it's now closed. 135 */ 136 func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { 137 if c == nil { 138 if !block { 139 return false 140 } 141 gopark(nil, nil, "chan send (nil chan)", traceEvGoStop, 2) 142 throw("unreachable") 143 } 144 145 if debugChan { 146 print("chansend: chan=", c, "\n") 147 } 148 149 if raceenabled { 150 racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend)) 151 } 152 153 // Fast path: check for failed non-blocking operation without acquiring the lock. 154 // 155 // After observing that the channel is not closed, we observe that the channel is 156 // not ready for sending. Each of these observations is a single word-sized read 157 // (first c.closed and second c.recvq.first or c.qcount depending on kind of channel). 158 // Because a closed channel cannot transition from 'ready for sending' to 159 // 'not ready for sending', even if the channel is closed between the two observations, 160 // they imply a moment between the two when the channel was both not yet closed 161 // and not ready for sending. We behave as if we observed the channel at that moment, 162 // and report that the send cannot proceed. 163 // 164 // It is okay if the reads are reordered here: if we observe that the channel is not 165 // ready for sending and then observe that it is not closed, that implies that the 166 // channel wasn't closed during the first observation. 167 if !block && c.closed == 0 && ((c.dataqsiz == 0 && c.recvq.first == nil) || 168 (c.dataqsiz > 0 && c.qcount == c.dataqsiz)) { 169 return false 170 } 171 172 var t0 int64 173 if blockprofilerate > 0 { 174 t0 = cputicks() 175 } 176 177 lock(&c.lock) 178 179 if c.closed != 0 { 180 unlock(&c.lock) 181 panic(plainError("send on closed channel")) 182 } 183 184 if sg := c.recvq.dequeue(); sg != nil { 185 // Found a waiting receiver. We pass the value we want to send 186 // directly to the receiver, bypassing the channel buffer (if any). 187 send(c, sg, ep, func() { unlock(&c.lock) }, 3) 188 return true 189 } 190 191 if c.qcount < c.dataqsiz { 192 // Space is available in the channel buffer. Enqueue the element to send. 193 qp := chanbuf(c, c.sendx) 194 if raceenabled { 195 raceacquire(qp) 196 racerelease(qp) 197 } 198 typedmemmove(c.elemtype, qp, ep) 199 c.sendx++ 200 if c.sendx == c.dataqsiz { 201 c.sendx = 0 202 } 203 c.qcount++ 204 unlock(&c.lock) 205 return true 206 } 207 208 if !block { 209 unlock(&c.lock) 210 return false 211 } 212 213 // Block on the channel. Some receiver will complete our operation for us. 214 gp := getg() 215 mysg := acquireSudog() 216 mysg.releasetime = 0 217 if t0 != 0 { 218 mysg.releasetime = -1 219 } 220 // No stack splits between assigning elem and enqueuing mysg 221 // on gp.waiting where copystack can find it. 222 mysg.elem = ep 223 mysg.waitlink = nil 224 mysg.g = gp 225 mysg.isSelect = false 226 mysg.c = c 227 gp.waiting = mysg 228 gp.param = nil 229 c.sendq.enqueue(mysg) 230 goparkunlock(&c.lock, "chan send", traceEvGoBlockSend, 3) 231 232 // someone woke us up. 233 if mysg != gp.waiting { 234 throw("G waiting list is corrupted") 235 } 236 gp.waiting = nil 237 if gp.param == nil { 238 if c.closed == 0 { 239 throw("chansend: spurious wakeup") 240 } 241 panic(plainError("send on closed channel")) 242 } 243 gp.param = nil 244 if mysg.releasetime > 0 { 245 blockevent(mysg.releasetime-t0, 2) 246 } 247 mysg.c = nil 248 releaseSudog(mysg) 249 return true 250 } 251 252 // send processes a send operation on an empty channel c. 253 // The value ep sent by the sender is copied to the receiver sg. 254 // The receiver is then woken up to go on its merry way. 255 // Channel c must be empty and locked. send unlocks c with unlockf. 256 // sg must already be dequeued from c. 257 // ep must be non-nil and point to the heap or the caller's stack. 258 func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { 259 if raceenabled { 260 if c.dataqsiz == 0 { 261 racesync(c, sg) 262 } else { 263 // Pretend we go through the buffer, even though 264 // we copy directly. Note that we need to increment 265 // the head/tail locations only when raceenabled. 266 qp := chanbuf(c, c.recvx) 267 raceacquire(qp) 268 racerelease(qp) 269 raceacquireg(sg.g, qp) 270 racereleaseg(sg.g, qp) 271 c.recvx++ 272 if c.recvx == c.dataqsiz { 273 c.recvx = 0 274 } 275 c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz 276 } 277 } 278 if sg.elem != nil { 279 sendDirect(c.elemtype, sg, ep) 280 sg.elem = nil 281 } 282 gp := sg.g 283 unlockf() 284 gp.param = unsafe.Pointer(sg) 285 if sg.releasetime != 0 { 286 sg.releasetime = cputicks() 287 } 288 goready(gp, skip+1) 289 } 290 291 // Sends and receives on unbuffered or empty-buffered channels are the 292 // only operations where one running goroutine writes to the stack of 293 // another running goroutine. The GC assumes that stack writes only 294 // happen when the goroutine is running and are only done by that 295 // goroutine. Using a write barrier is sufficient to make up for 296 // violating that assumption, but the write barrier has to work. 297 // typedmemmove will call bulkBarrierPreWrite, but the target bytes 298 // are not in the heap, so that will not help. We arrange to call 299 // memmove and typeBitsBulkBarrier instead. 300 301 func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) { 302 // src is on our stack, dst is a slot on another stack. 303 304 // Once we read sg.elem out of sg, it will no longer 305 // be updated if the destination's stack gets copied (shrunk). 306 // So make sure that no preemption points can happen between read & use. 307 dst := sg.elem 308 typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size) 309 memmove(dst, src, t.size) 310 } 311 312 func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) { 313 // dst is on our stack or the heap, src is on another stack. 314 // The channel is locked, so src will not move during this 315 // operation. 316 src := sg.elem 317 typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size) 318 memmove(dst, src, t.size) 319 } 320 321 func closechan(c *hchan) { 322 if c == nil { 323 panic(plainError("close of nil channel")) 324 } 325 326 lock(&c.lock) 327 if c.closed != 0 { 328 unlock(&c.lock) 329 panic(plainError("close of closed channel")) 330 } 331 332 if raceenabled { 333 callerpc := getcallerpc(unsafe.Pointer(&c)) 334 racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan)) 335 racerelease(unsafe.Pointer(c)) 336 } 337 338 c.closed = 1 339 340 var glist *g 341 342 // release all readers 343 for { 344 sg := c.recvq.dequeue() 345 if sg == nil { 346 break 347 } 348 if sg.elem != nil { 349 typedmemclr(c.elemtype, sg.elem) 350 sg.elem = nil 351 } 352 if sg.releasetime != 0 { 353 sg.releasetime = cputicks() 354 } 355 gp := sg.g 356 gp.param = nil 357 if raceenabled { 358 raceacquireg(gp, unsafe.Pointer(c)) 359 } 360 gp.schedlink.set(glist) 361 glist = gp 362 } 363 364 // release all writers (they will panic) 365 for { 366 sg := c.sendq.dequeue() 367 if sg == nil { 368 break 369 } 370 sg.elem = nil 371 if sg.releasetime != 0 { 372 sg.releasetime = cputicks() 373 } 374 gp := sg.g 375 gp.param = nil 376 if raceenabled { 377 raceacquireg(gp, unsafe.Pointer(c)) 378 } 379 gp.schedlink.set(glist) 380 glist = gp 381 } 382 unlock(&c.lock) 383 384 // Ready all Gs now that we've dropped the channel lock. 385 for glist != nil { 386 gp := glist 387 glist = glist.schedlink.ptr() 388 gp.schedlink = 0 389 goready(gp, 3) 390 } 391 } 392 393 // entry points for <- c from compiled code 394 //go:nosplit 395 func chanrecv1(c *hchan, elem unsafe.Pointer) { 396 chanrecv(c, elem, true) 397 } 398 399 //go:nosplit 400 func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool) { 401 _, received = chanrecv(c, elem, true) 402 return 403 } 404 405 // chanrecv receives on channel c and writes the received data to ep. 406 // ep may be nil, in which case received data is ignored. 407 // If block == false and no elements are available, returns (false, false). 408 // Otherwise, if c is closed, zeros *ep and returns (true, false). 409 // Otherwise, fills in *ep with an element and returns (true, true). 410 // A non-nil ep must point to the heap or the caller's stack. 411 func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) { 412 // raceenabled: don't need to check ep, as it is always on the stack 413 // or is new memory allocated by reflect. 414 415 if debugChan { 416 print("chanrecv: chan=", c, "\n") 417 } 418 419 if c == nil { 420 if !block { 421 return 422 } 423 gopark(nil, nil, "chan receive (nil chan)", traceEvGoStop, 2) 424 throw("unreachable") 425 } 426 427 // Fast path: check for failed non-blocking operation without acquiring the lock. 428 // 429 // After observing that the channel is not ready for receiving, we observe that the 430 // channel is not closed. Each of these observations is a single word-sized read 431 // (first c.sendq.first or c.qcount, and second c.closed). 432 // Because a channel cannot be reopened, the later observation of the channel 433 // being not closed implies that it was also not closed at the moment of the 434 // first observation. We behave as if we observed the channel at that moment 435 // and report that the receive cannot proceed. 436 // 437 // The order of operations is important here: reversing the operations can lead to 438 // incorrect behavior when racing with a close. 439 if !block && (c.dataqsiz == 0 && c.sendq.first == nil || 440 c.dataqsiz > 0 && atomic.Loaduint(&c.qcount) == 0) && 441 atomic.Load(&c.closed) == 0 { 442 return 443 } 444 445 var t0 int64 446 if blockprofilerate > 0 { 447 t0 = cputicks() 448 } 449 450 lock(&c.lock) 451 452 if c.closed != 0 && c.qcount == 0 { 453 if raceenabled { 454 raceacquire(unsafe.Pointer(c)) 455 } 456 unlock(&c.lock) 457 if ep != nil { 458 typedmemclr(c.elemtype, ep) 459 } 460 return true, false 461 } 462 463 if sg := c.sendq.dequeue(); sg != nil { 464 // Found a waiting sender. If buffer is size 0, receive value 465 // directly from sender. Otherwise, receive from head of queue 466 // and add sender's value to the tail of the queue (both map to 467 // the same buffer slot because the queue is full). 468 recv(c, sg, ep, func() { unlock(&c.lock) }, 3) 469 return true, true 470 } 471 472 if c.qcount > 0 { 473 // Receive directly from queue 474 qp := chanbuf(c, c.recvx) 475 if raceenabled { 476 raceacquire(qp) 477 racerelease(qp) 478 } 479 if ep != nil { 480 typedmemmove(c.elemtype, ep, qp) 481 } 482 typedmemclr(c.elemtype, qp) 483 c.recvx++ 484 if c.recvx == c.dataqsiz { 485 c.recvx = 0 486 } 487 c.qcount-- 488 unlock(&c.lock) 489 return true, true 490 } 491 492 if !block { 493 unlock(&c.lock) 494 return false, false 495 } 496 497 // no sender available: block on this channel. 498 gp := getg() 499 mysg := acquireSudog() 500 mysg.releasetime = 0 501 if t0 != 0 { 502 mysg.releasetime = -1 503 } 504 // No stack splits between assigning elem and enqueuing mysg 505 // on gp.waiting where copystack can find it. 506 mysg.elem = ep 507 mysg.waitlink = nil 508 gp.waiting = mysg 509 mysg.g = gp 510 mysg.isSelect = false 511 mysg.c = c 512 gp.param = nil 513 c.recvq.enqueue(mysg) 514 goparkunlock(&c.lock, "chan receive", traceEvGoBlockRecv, 3) 515 516 // someone woke us up 517 if mysg != gp.waiting { 518 throw("G waiting list is corrupted") 519 } 520 gp.waiting = nil 521 if mysg.releasetime > 0 { 522 blockevent(mysg.releasetime-t0, 2) 523 } 524 closed := gp.param == nil 525 gp.param = nil 526 mysg.c = nil 527 releaseSudog(mysg) 528 return true, !closed 529 } 530 531 // recv processes a receive operation on a full channel c. 532 // There are 2 parts: 533 // 1) The value sent by the sender sg is put into the channel 534 // and the sender is woken up to go on its merry way. 535 // 2) The value received by the receiver (the current G) is 536 // written to ep. 537 // For synchronous channels, both values are the same. 538 // For asynchronous channels, the receiver gets its data from 539 // the channel buffer and the sender's data is put in the 540 // channel buffer. 541 // Channel c must be full and locked. recv unlocks c with unlockf. 542 // sg must already be dequeued from c. 543 // A non-nil ep must point to the heap or the caller's stack. 544 func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { 545 if c.dataqsiz == 0 { 546 if raceenabled { 547 racesync(c, sg) 548 } 549 if ep != nil { 550 // copy data from sender 551 recvDirect(c.elemtype, sg, ep) 552 } 553 } else { 554 // Queue is full. Take the item at the 555 // head of the queue. Make the sender enqueue 556 // its item at the tail of the queue. Since the 557 // queue is full, those are both the same slot. 558 qp := chanbuf(c, c.recvx) 559 if raceenabled { 560 raceacquire(qp) 561 racerelease(qp) 562 raceacquireg(sg.g, qp) 563 racereleaseg(sg.g, qp) 564 } 565 // copy data from queue to receiver 566 if ep != nil { 567 typedmemmove(c.elemtype, ep, qp) 568 } 569 // copy data from sender to queue 570 typedmemmove(c.elemtype, qp, sg.elem) 571 c.recvx++ 572 if c.recvx == c.dataqsiz { 573 c.recvx = 0 574 } 575 c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz 576 } 577 sg.elem = nil 578 gp := sg.g 579 unlockf() 580 gp.param = unsafe.Pointer(sg) 581 if sg.releasetime != 0 { 582 sg.releasetime = cputicks() 583 } 584 goready(gp, skip+1) 585 } 586 587 // compiler implements 588 // 589 // select { 590 // case c <- v: 591 // ... foo 592 // default: 593 // ... bar 594 // } 595 // 596 // as 597 // 598 // if selectnbsend(c, v) { 599 // ... foo 600 // } else { 601 // ... bar 602 // } 603 // 604 func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool) { 605 return chansend(c, elem, false, getcallerpc(unsafe.Pointer(&c))) 606 } 607 608 // compiler implements 609 // 610 // select { 611 // case v = <-c: 612 // ... foo 613 // default: 614 // ... bar 615 // } 616 // 617 // as 618 // 619 // if selectnbrecv(&v, c) { 620 // ... foo 621 // } else { 622 // ... bar 623 // } 624 // 625 func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected bool) { 626 selected, _ = chanrecv(c, elem, false) 627 return 628 } 629 630 // compiler implements 631 // 632 // select { 633 // case v, ok = <-c: 634 // ... foo 635 // default: 636 // ... bar 637 // } 638 // 639 // as 640 // 641 // if c != nil && selectnbrecv2(&v, &ok, c) { 642 // ... foo 643 // } else { 644 // ... bar 645 // } 646 // 647 func selectnbrecv2(elem unsafe.Pointer, received *bool, c *hchan) (selected bool) { 648 // TODO(khr): just return 2 values from this function, now that it is in Go. 649 selected, *received = chanrecv(c, elem, false) 650 return 651 } 652 653 //go:linkname reflect_chansend reflect.chansend 654 func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool) { 655 return chansend(c, elem, !nb, getcallerpc(unsafe.Pointer(&c))) 656 } 657 658 //go:linkname reflect_chanrecv reflect.chanrecv 659 func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) { 660 return chanrecv(c, elem, !nb) 661 } 662 663 //go:linkname reflect_chanlen reflect.chanlen 664 func reflect_chanlen(c *hchan) int { 665 if c == nil { 666 return 0 667 } 668 return int(c.qcount) 669 } 670 671 //go:linkname reflect_chancap reflect.chancap 672 func reflect_chancap(c *hchan) int { 673 if c == nil { 674 return 0 675 } 676 return int(c.dataqsiz) 677 } 678 679 //go:linkname reflect_chanclose reflect.chanclose 680 func reflect_chanclose(c *hchan) { 681 closechan(c) 682 } 683 684 func (q *waitq) enqueue(sgp *sudog) { 685 sgp.next = nil 686 x := q.last 687 if x == nil { 688 sgp.prev = nil 689 q.first = sgp 690 q.last = sgp 691 return 692 } 693 sgp.prev = x 694 x.next = sgp 695 q.last = sgp 696 } 697 698 func (q *waitq) dequeue() *sudog { 699 for { 700 sgp := q.first 701 if sgp == nil { 702 return nil 703 } 704 y := sgp.next 705 if y == nil { 706 q.first = nil 707 q.last = nil 708 } else { 709 y.prev = nil 710 q.first = y 711 sgp.next = nil // mark as removed (see dequeueSudog) 712 } 713 714 // if a goroutine was put on this queue because of a 715 // select, there is a small window between the goroutine 716 // being woken up by a different case and it grabbing the 717 // channel locks. Once it has the lock 718 // it removes itself from the queue, so we won't see it after that. 719 // We use a flag in the G struct to tell us when someone 720 // else has won the race to signal this goroutine but the goroutine 721 // hasn't removed itself from the queue yet. 722 if sgp.isSelect { 723 if !atomic.Cas(&sgp.g.selectDone, 0, 1) { 724 continue 725 } 726 } 727 728 return sgp 729 } 730 } 731 732 func racesync(c *hchan, sg *sudog) { 733 racerelease(chanbuf(c, 0)) 734 raceacquireg(sg.g, chanbuf(c, 0)) 735 racereleaseg(sg.g, chanbuf(c, 0)) 736 raceacquire(chanbuf(c, 0)) 737 }