github.com/ltltlt/go-source-code@v0.0.0-20190830023027-95be009773aa/runtime/chan.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // channel 定义及实现, channel的发送和接收goroutine使用队列
     6  // 先入队的优先级高
     7  
     8  package runtime
     9  
    10  // This file contains the implementation of Go channels.
    11  
    12  // Invariants:
    13  //  At least one of c.sendq and c.recvq is empty,
    14  //  except for the case of an unbuffered channel with a single goroutine
    15  //  blocked on it for both sending and receiving using a select statement,
    16  //  in which case the length of c.sendq and c.recvq is limited only by the
    17  //  size of the select statement.
    18  //
    19  // For buffered channels, also:
    20  //  c.qcount > 0 implies that c.recvq is empty.
    21  //  c.qcount < c.dataqsiz implies that c.sendq is empty.
    22  
    23  import (
    24  	"runtime/internal/atomic"
    25  	"unsafe"
    26  )
    27  
    28  const (
    29  	maxAlign  = 8
    30  	hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
    31  	debugChan = false
    32  )
    33  
    34  type hchan struct {
    35  	qcount   uint           // total data in the queue
    36  	dataqsiz uint           // size of the circular queue
    37  	buf      unsafe.Pointer // points to an array of dataqsiz elements
    38  	elemsize uint16
    39  	closed   uint32
    40  	elemtype *_type // element type
    41  	sendx    uint   // send index
    42  	recvx    uint   // receive index
    43  	recvq    waitq  // list of recv waiters
    44  	sendq    waitq  // list of send waiters
    45  
    46  	// lock protects all fields in hchan, as well as several
    47  	// fields in sudogs blocked on this channel.
    48  	//
    49  	// Do not change another G's status while holding this lock
    50  	// (in particular, do not ready a G), as this can deadlock
    51  	// with stack shrinking.
    52  	lock mutex
    53  }
    54  
    55  type waitq struct {
    56  	first *sudog
    57  	last  *sudog
    58  }
    59  
    60  //go:linkname reflect_makechan reflect.makechan
    61  func reflect_makechan(t *chantype, size int) *hchan {
    62  	return makechan(t, size)
    63  }
    64  
    65  // size的长度如果超出int最大大小就出错
    66  func makechan64(t *chantype, size int64) *hchan {
    67  	if int64(int(size)) != size {
    68  		panic(plainError("makechan: size out of range"))
    69  	}
    70  
    71  	return makechan(t, int(size))
    72  }
    73  
    74  func makechan(t *chantype, size int) *hchan {
    75  	elem := t.elem
    76  
    77  	// compiler checks this but be safe.
    78  	if elem.size >= 1<<16 {
    79  		throw("makechan: invalid channel element type")
    80  	}
    81  	if hchanSize%maxAlign != 0 || elem.align > maxAlign {
    82  		throw("makechan: bad alignment")
    83  	}
    84  
    85  	if size < 0 || uintptr(size) > maxSliceCap(elem.size) || uintptr(size)*elem.size > _MaxMem-hchanSize {
    86  		panic(plainError("makechan: size out of range"))
    87  	}
    88  
    89  	// Hchan does not contain pointers interesting for GC when elements stored in buf do not contain pointers.
    90  	// buf points into the same allocation, elemtype is persistent.
    91  	// SudoG's are referenced from their owning thread so they can't be collected.
    92  	// TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
    93  	var c *hchan
    94  	switch {
    95  	case size == 0 || elem.size == 0:
    96  		// Queue or element size is zero.
    97  		c = (*hchan)(mallocgc(hchanSize, nil, true))
    98  		// Race detector uses this location for synchronization.
    99  		c.buf = unsafe.Pointer(c)
   100  	case elem.kind&kindNoPointers != 0:
   101  		// Elements do not contain pointers.
   102  		// Allocate hchan and buf in one call.
   103  		c = (*hchan)(mallocgc(hchanSize+uintptr(size)*elem.size, nil, true))
   104  		c.buf = add(unsafe.Pointer(c), hchanSize)
   105  	default:
   106  		// Elements contain pointers.
   107  		c = new(hchan)
   108  		c.buf = mallocgc(uintptr(size)*elem.size, elem, true)
   109  	}
   110  
   111  	c.elemsize = uint16(elem.size)
   112  	c.elemtype = elem
   113  	c.dataqsiz = uint(size)
   114  
   115  	if debugChan {
   116  		print("makechan: chan=", c, "; elemsize=", elem.size, "; elemalg=", elem.alg, "; dataqsiz=", size, "\n")
   117  	}
   118  	return c
   119  }
   120  
   121  // chanbuf(c, i) is pointer to the i'th slot in the buffer.
   122  func chanbuf(c *hchan, i uint) unsafe.Pointer {
   123  	return add(c.buf, uintptr(i)*uintptr(c.elemsize))
   124  }
   125  
   126  // entry point for c <- x from compiled code
   127  //go:nosplit
   128  func chansend1(c *hchan, elem unsafe.Pointer) {
   129  	chansend(c, elem, true, getcallerpc())
   130  }
   131  
   132  /*
   133   * generic single channel send/recv
   134   * If block is not nil,
   135   * then the protocol will not
   136   * sleep but return if it could
   137   * not complete.
   138   *
   139   * sleep can wake up with g.param == nil
   140   * when a channel involved in the sleep has
   141   * been closed.  it is easiest to loop and re-run
   142   * the operation; we'll see that it's now closed.
   143   */
   144  func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
   145  	if c == nil {
   146  		if !block {
   147  			return false
   148  		}
   149  		gopark(nil, nil, "chan send (nil chan)", traceEvGoStop, 2)
   150  		throw("unreachable")
   151  	}
   152  
   153  	if debugChan {
   154  		print("chansend: chan=", c, "\n")
   155  	}
   156  
   157  	if raceenabled {
   158  		racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend))
   159  	}
   160  
   161  	// Fast path: check for failed non-blocking operation without acquiring the lock.
   162  	//
   163  	// After observing that the channel is not closed, we observe that the channel is
   164  	// not ready for sending. Each of these observations is a single word-sized read
   165  	// (first c.closed and second c.recvq.first or c.qcount depending on kind of channel).
   166  	// Because a closed channel cannot transition from 'ready for sending' to
   167  	// 'not ready for sending', even if the channel is closed between the two observations,
   168  	// they imply a moment between the two when the channel was both not yet closed
   169  	// and not ready for sending. We behave as if we observed the channel at that moment,
   170  	// and report that the send cannot proceed.
   171  	//
   172  	// It is okay if the reads are reordered here: if we observe that the channel is not
   173  	// ready for sending and then observe that it is not closed, that implies that the
   174  	// channel wasn't closed during the first observation.
   175  	if !block && c.closed == 0 && ((c.dataqsiz == 0 && c.recvq.first == nil) ||
   176  		(c.dataqsiz > 0 && c.qcount == c.dataqsiz)) {
   177  		return false
   178  	}
   179  
   180  	var t0 int64
   181  	if blockprofilerate > 0 {
   182  		t0 = cputicks()
   183  	}
   184  
   185  	lock(&c.lock)
   186  
   187  	if c.closed != 0 {
   188  		unlock(&c.lock)
   189  		panic(plainError("send on closed channel"))
   190  	}
   191  
   192  	if sg := c.recvq.dequeue(); sg != nil {
   193  		// Found a waiting receiver. We pass the value we want to send
   194  		// directly to the receiver, bypassing the channel buffer (if any).
   195  		send(c, sg, ep, func() { unlock(&c.lock) }, 3)
   196  		return true
   197  	}
   198  
   199  	if c.qcount < c.dataqsiz {
   200  		// Space is available in the channel buffer. Enqueue the element to send.
   201  		qp := chanbuf(c, c.sendx)
   202  		if raceenabled {
   203  			raceacquire(qp)
   204  			racerelease(qp)
   205  		}
   206  		typedmemmove(c.elemtype, qp, ep)
   207  		c.sendx++
   208  		if c.sendx == c.dataqsiz {
   209  			c.sendx = 0
   210  		}
   211  		c.qcount++
   212  		unlock(&c.lock)
   213  		return true
   214  	}
   215  
   216  	if !block {
   217  		unlock(&c.lock)
   218  		return false
   219  	}
   220  
   221  	// Block on the channel. Some receiver will complete our operation for us.
   222  	gp := getg()
   223  	mysg := acquireSudog()
   224  	mysg.releasetime = 0
   225  	if t0 != 0 {
   226  		mysg.releasetime = -1
   227  	}
   228  	// No stack splits between assigning elem and enqueuing mysg
   229  	// on gp.waiting where copystack can find it.
   230  	mysg.elem = ep
   231  	mysg.waitlink = nil
   232  	mysg.g = gp
   233  	mysg.isSelect = false
   234  	mysg.c = c
   235  	gp.waiting = mysg
   236  	gp.param = nil
   237  	c.sendq.enqueue(mysg)
   238  	goparkunlock(&c.lock, "chan send", traceEvGoBlockSend, 3) // 这还有点像信号量, wait(unlock lock, wait for notify), notify(notify a waiting thread)
   239  
   240  	// someone woke us up.
   241  	if mysg != gp.waiting {
   242  		throw("G waiting list is corrupted")
   243  	}
   244  	gp.waiting = nil
   245  	if gp.param == nil {
   246  		if c.closed == 0 {
   247  			throw("chansend: spurious wakeup")
   248  		}
   249  		panic(plainError("send on closed channel"))
   250  	}
   251  	gp.param = nil
   252  	if mysg.releasetime > 0 {
   253  		blockevent(mysg.releasetime-t0, 2)
   254  	}
   255  	mysg.c = nil
   256  	releaseSudog(mysg)
   257  	return true
   258  }
   259  
   260  // send processes a send operation on an empty channel c.
   261  // The value ep sent by the sender is copied to the receiver sg.
   262  // The receiver is then woken up to go on its merry way.
   263  // Channel c must be empty and locked.  send unlocks c with unlockf.
   264  // sg must already be dequeued from c.
   265  // ep must be non-nil and point to the heap or the caller's stack.
   266  func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
   267  	if raceenabled {
   268  		if c.dataqsiz == 0 {
   269  			racesync(c, sg)
   270  		} else {
   271  			// Pretend we go through the buffer, even though
   272  			// we copy directly. Note that we need to increment
   273  			// the head/tail locations only when raceenabled.
   274  			qp := chanbuf(c, c.recvx)
   275  			raceacquire(qp)
   276  			racerelease(qp)
   277  			raceacquireg(sg.g, qp)
   278  			racereleaseg(sg.g, qp)
   279  			c.recvx++
   280  			if c.recvx == c.dataqsiz {
   281  				c.recvx = 0
   282  			}
   283  			c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   284  		}
   285  	}
   286  	if sg.elem != nil {
   287  		sendDirect(c.elemtype, sg, ep)
   288  		sg.elem = nil
   289  	}
   290  	gp := sg.g
   291  	unlockf()
   292  	gp.param = unsafe.Pointer(sg)
   293  	if sg.releasetime != 0 {
   294  		sg.releasetime = cputicks()
   295  	}
   296  	goready(gp, skip+1)
   297  }
   298  
   299  // Sends and receives on unbuffered or empty-buffered channels are the
   300  // only operations where one running goroutine writes to the stack of
   301  // another running goroutine. The GC assumes that stack writes only
   302  // happen when the goroutine is running and are only done by that
   303  // goroutine. Using a write barrier is sufficient to make up for
   304  // violating that assumption, but the write barrier has to work.
   305  // typedmemmove will call bulkBarrierPreWrite, but the target bytes
   306  // are not in the heap, so that will not help. We arrange to call
   307  // memmove and typeBitsBulkBarrier instead.
   308  
   309  func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
   310  	// src is on our stack, dst is a slot on another stack.
   311  
   312  	// Once we read sg.elem out of sg, it will no longer
   313  	// be updated if the destination's stack gets copied (shrunk).
   314  	// So make sure that no preemption points can happen between read & use.
   315  	dst := sg.elem
   316  	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
   317  	memmove(dst, src, t.size)
   318  }
   319  
   320  func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) {
   321  	// dst is on our stack or the heap, src is on another stack.
   322  	// The channel is locked, so src will not move during this
   323  	// operation.
   324  	src := sg.elem
   325  	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
   326  	memmove(dst, src, t.size)
   327  }
   328  
   329  func closechan(c *hchan) {
   330  	if c == nil {
   331  		panic(plainError("close of nil channel"))
   332  	}
   333  
   334  	lock(&c.lock)
   335  	if c.closed != 0 {
   336  		unlock(&c.lock)
   337  		panic(plainError("close of closed channel"))
   338  	}
   339  
   340  	if raceenabled {
   341  		callerpc := getcallerpc()
   342  		racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan))
   343  		racerelease(unsafe.Pointer(c))
   344  	}
   345  
   346  	c.closed = 1
   347  
   348  	var glist *g
   349  
   350  	// release all readers
   351  	for {
   352  		sg := c.recvq.dequeue()
   353  		if sg == nil {
   354  			break
   355  		}
   356  		if sg.elem != nil {
   357  			typedmemclr(c.elemtype, sg.elem)
   358  			sg.elem = nil
   359  		}
   360  		if sg.releasetime != 0 {
   361  			sg.releasetime = cputicks()
   362  		}
   363  		gp := sg.g
   364  		gp.param = nil
   365  		if raceenabled {
   366  			raceacquireg(gp, unsafe.Pointer(c))
   367  		}
   368  		gp.schedlink.set(glist)
   369  		glist = gp
   370  	}
   371  
   372  	// release all writers (they will panic)
   373  	for {
   374  		sg := c.sendq.dequeue()
   375  		if sg == nil {
   376  			break
   377  		}
   378  		sg.elem = nil
   379  		if sg.releasetime != 0 {
   380  			sg.releasetime = cputicks()
   381  		}
   382  		gp := sg.g
   383  		gp.param = nil
   384  		if raceenabled {
   385  			raceacquireg(gp, unsafe.Pointer(c))
   386  		}
   387  		gp.schedlink.set(glist)
   388  		glist = gp
   389  	}
   390  	unlock(&c.lock)
   391  
   392  	// Ready all Gs now that we've dropped the channel lock.
   393  	for glist != nil {
   394  		gp := glist
   395  		glist = glist.schedlink.ptr()
   396  		gp.schedlink = 0
   397  		goready(gp, 3)
   398  	}
   399  }
   400  
   401  // entry points for <- c from compiled code
   402  //go:nosplit
   403  func chanrecv1(c *hchan, elem unsafe.Pointer) {
   404  	chanrecv(c, elem, true)
   405  }
   406  
   407  //go:nosplit
   408  func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool) {
   409  	_, received = chanrecv(c, elem, true)
   410  	return
   411  }
   412  
   413  // chanrecv receives on channel c and writes the received data to ep.
   414  // ep may be nil, in which case received data is ignored.
   415  // If block == false and no elements are available, returns (false, false).
   416  // Otherwise, if c is closed, zeros *ep and returns (true, false).
   417  // Otherwise, fills in *ep with an element and returns (true, true).
   418  // A non-nil ep must point to the heap or the caller's stack.
   419  func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
   420  	// raceenabled: don't need to check ep, as it is always on the stack
   421  	// or is new memory allocated by reflect.
   422  
   423  	if debugChan {
   424  		print("chanrecv: chan=", c, "\n")
   425  	}
   426  
   427  	if c == nil {
   428  		if !block {
   429  			return
   430  		}
   431  		gopark(nil, nil, "chan receive (nil chan)", traceEvGoStop, 2)
   432  		throw("unreachable")
   433  	}
   434  
   435  	// Fast path: check for failed non-blocking operation without acquiring the lock.
   436  	//
   437  	// After observing that the channel is not ready for receiving, we observe that the
   438  	// channel is not closed. Each of these observations is a single word-sized read
   439  	// (first c.sendq.first or c.qcount, and second c.closed).
   440  	// Because a channel cannot be reopened, the later observation of the channel
   441  	// being not closed implies that it was also not closed at the moment of the
   442  	// first observation. We behave as if we observed the channel at that moment
   443  	// and report that the receive cannot proceed.
   444  	//
   445  	// The order of operations is important here: reversing the operations can lead to
   446  	// incorrect behavior when racing with a close.
   447  	if !block && (c.dataqsiz == 0 && c.sendq.first == nil ||
   448  		c.dataqsiz > 0 && atomic.Loaduint(&c.qcount) == 0) &&
   449  		atomic.Load(&c.closed) == 0 {
   450  		return
   451  	}
   452  
   453  	var t0 int64
   454  	if blockprofilerate > 0 {
   455  		t0 = cputicks()
   456  	}
   457  
   458  	lock(&c.lock)
   459  
   460  	if c.closed != 0 && c.qcount == 0 {
   461  		if raceenabled {
   462  			raceacquire(unsafe.Pointer(c))
   463  		}
   464  		unlock(&c.lock)
   465  		if ep != nil {
   466  			typedmemclr(c.elemtype, ep)
   467  		}
   468  		return true, false
   469  	}
   470  
   471  	if sg := c.sendq.dequeue(); sg != nil {
   472  		// Found a waiting sender. If buffer is size 0, receive value
   473  		// directly from sender. Otherwise, receive from head of queue
   474  		// and add sender's value to the tail of the queue (both map to
   475  		// the same buffer slot because the queue is full).
   476  		recv(c, sg, ep, func() { unlock(&c.lock) }, 3)
   477  		return true, true
   478  	}
   479  
   480  	if c.qcount > 0 {
   481  		// Receive directly from queue
   482  		qp := chanbuf(c, c.recvx)
   483  		if raceenabled {
   484  			raceacquire(qp)
   485  			racerelease(qp)
   486  		}
   487  		if ep != nil {
   488  			typedmemmove(c.elemtype, ep, qp)
   489  		}
   490  		typedmemclr(c.elemtype, qp)
   491  		c.recvx++
   492  		if c.recvx == c.dataqsiz {
   493  			c.recvx = 0
   494  		}
   495  		c.qcount--
   496  		unlock(&c.lock)
   497  		return true, true
   498  	}
   499  
   500  	if !block {
   501  		unlock(&c.lock)
   502  		return false, false
   503  	}
   504  
   505  	// no sender available: block on this channel.
   506  	gp := getg()
   507  	mysg := acquireSudog()
   508  	mysg.releasetime = 0
   509  	if t0 != 0 {
   510  		mysg.releasetime = -1
   511  	}
   512  	// No stack splits between assigning elem and enqueuing mysg
   513  	// on gp.waiting where copystack can find it.
   514  	mysg.elem = ep
   515  	mysg.waitlink = nil
   516  	gp.waiting = mysg
   517  	mysg.g = gp
   518  	mysg.isSelect = false
   519  	mysg.c = c
   520  	gp.param = nil
   521  	c.recvq.enqueue(mysg)
   522  	goparkunlock(&c.lock, "chan receive", traceEvGoBlockRecv, 3)
   523  
   524  	// someone woke us up
   525  	if mysg != gp.waiting {
   526  		throw("G waiting list is corrupted")
   527  	}
   528  	gp.waiting = nil
   529  	if mysg.releasetime > 0 {
   530  		blockevent(mysg.releasetime-t0, 2)
   531  	}
   532  	closed := gp.param == nil
   533  	gp.param = nil
   534  	mysg.c = nil
   535  	releaseSudog(mysg)
   536  	return true, !closed
   537  }
   538  
   539  // recv processes a receive operation on a full channel c.
   540  // There are 2 parts:
   541  // 1) The value sent by the sender sg is put into the channel
   542  //    and the sender is woken up to go on its merry way.
   543  // 2) The value received by the receiver (the current G) is
   544  //    written to ep.
   545  // For synchronous channels, both values are the same.
   546  // For asynchronous channels, the receiver gets its data from
   547  // the channel buffer and the sender's data is put in the
   548  // channel buffer.
   549  // Channel c must be full and locked. recv unlocks c with unlockf.
   550  // sg must already be dequeued from c.
   551  // A non-nil ep must point to the heap or the caller's stack.
   552  func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
   553  	if c.dataqsiz == 0 {
   554  		if raceenabled {
   555  			racesync(c, sg)
   556  		}
   557  		if ep != nil {
   558  			// copy data from sender
   559  			recvDirect(c.elemtype, sg, ep)
   560  		}
   561  	} else {
   562  		// Queue is full. Take the item at the
   563  		// head of the queue. Make the sender enqueue
   564  		// its item at the tail of the queue. Since the
   565  		// queue is full, those are both the same slot.
   566  		qp := chanbuf(c, c.recvx)
   567  		if raceenabled {
   568  			raceacquire(qp)
   569  			racerelease(qp)
   570  			raceacquireg(sg.g, qp)
   571  			racereleaseg(sg.g, qp)
   572  		}
   573  		// copy data from queue to receiver
   574  		if ep != nil {
   575  			typedmemmove(c.elemtype, ep, qp)
   576  		}
   577  		// copy data from sender to queue
   578  		typedmemmove(c.elemtype, qp, sg.elem)
   579  		c.recvx++
   580  		if c.recvx == c.dataqsiz {
   581  			c.recvx = 0
   582  		}
   583  		c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   584  	}
   585  	sg.elem = nil
   586  	gp := sg.g
   587  	unlockf()
   588  	gp.param = unsafe.Pointer(sg)
   589  	if sg.releasetime != 0 {
   590  		sg.releasetime = cputicks()
   591  	}
   592  	goready(gp, skip+1)
   593  }
   594  
   595  // compiler implements
   596  //
   597  //	select {
   598  //	case c <- v:
   599  //		... foo
   600  //	default:
   601  //		... bar
   602  //	}
   603  //
   604  // as
   605  //
   606  //	if selectnbsend(c, v) {
   607  //		... foo
   608  //	} else {
   609  //		... bar
   610  //	}
   611  //
   612  func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool) {
   613  	return chansend(c, elem, false, getcallerpc())
   614  }
   615  
   616  // compiler implements
   617  //
   618  //	select {
   619  //	case v = <-c:
   620  //		... foo
   621  //	default:
   622  //		... bar
   623  //	}
   624  //
   625  // as
   626  //
   627  //	if selectnbrecv(&v, c) {
   628  //		... foo
   629  //	} else {
   630  //		... bar
   631  //	}
   632  //
   633  func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected bool) {
   634  	selected, _ = chanrecv(c, elem, false)
   635  	return
   636  }
   637  
   638  // compiler implements
   639  //
   640  //	select {
   641  //	case v, ok = <-c:
   642  //		... foo
   643  //	default:
   644  //		... bar
   645  //	}
   646  //
   647  // as
   648  //
   649  //	if c != nil && selectnbrecv2(&v, &ok, c) {
   650  //		... foo
   651  //	} else {
   652  //		... bar
   653  //	}
   654  //
   655  func selectnbrecv2(elem unsafe.Pointer, received *bool, c *hchan) (selected bool) {
   656  	// TODO(khr): just return 2 values from this function, now that it is in Go.
   657  	selected, *received = chanrecv(c, elem, false)
   658  	return
   659  }
   660  
   661  //go:linkname reflect_chansend reflect.chansend
   662  func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
   663  	return chansend(c, elem, !nb, getcallerpc())
   664  }
   665  
   666  //go:linkname reflect_chanrecv reflect.chanrecv
   667  func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
   668  	return false, chanrecv(c, elem, !nb)
   669  }
   670  
   671  //go:linkname reflect_chanlen reflect.chanlen
   672  func reflect_chanlen(c *hchan) int {
   673  	if c == nil {
   674  		return 0
   675  	}
   676  	return int(c.qcount)
   677  }
   678  
   679  //go:linkname reflect_chancap reflect.chancap
   680  func reflect_chancap(c *hchan) int {
   681  	if c == nil {
   682  		return 0
   683  	}
   684  	return int(c.dataqsiz)
   685  }
   686  
   687  //go:linkname reflect_chanclose reflect.chanclose
   688  func reflect_chanclose(c *hchan) {
   689  	closechan(c)
   690  }
   691  
   692  func (q *waitq) enqueue(sgp *sudog) {
   693  	sgp.next = nil
   694  	x := q.last
   695  	if x == nil {
   696  		sgp.prev = nil
   697  		q.first = sgp
   698  		q.last = sgp
   699  		return
   700  	}
   701  	sgp.prev = x
   702  	x.next = sgp
   703  	q.last = sgp
   704  }
   705  
   706  func (q *waitq) dequeue() *sudog {
   707  	for {
   708  		sgp := q.first
   709  		if sgp == nil {
   710  			return nil
   711  		}
   712  		y := sgp.next
   713  		if y == nil {
   714  			q.first = nil
   715  			q.last = nil
   716  		} else {
   717  			y.prev = nil
   718  			q.first = y
   719  			sgp.next = nil // mark as removed (see dequeueSudog)
   720  		}
   721  
   722  		// if a goroutine was put on this queue because of a
   723  		// select, there is a small window between the goroutine
   724  		// being woken up by a different case and it grabbing the
   725  		// channel locks. Once it has the lock
   726  		// it removes itself from the queue, so we won't see it after that.
   727  		// We use a flag in the G struct to tell us when someone
   728  		// else has won the race to signal this goroutine but the goroutine
   729  		// hasn't removed itself from the queue yet.
   730  		if sgp.isSelect {
   731  			if !atomic.Cas(&sgp.g.selectDone, 0, 1) {
   732  				continue
   733  			}
   734  		}
   735  
   736  		return sgp
   737  	}
   738  }
   739  
   740  func racesync(c *hchan, sg *sudog) {
   741  	racerelease(chanbuf(c, 0))
   742  	raceacquireg(sg.g, chanbuf(c, 0))
   743  	racereleaseg(sg.g, chanbuf(c, 0))
   744  	raceacquire(chanbuf(c, 0))
   745  }