github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/runtime/chan.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go channels.
     8  
     9  // Invariants:
    10  //  At least one of c.sendq and c.recvq is empty,
    11  //  except for the case of an unbuffered channel with a single goroutine
    12  //  blocked on it for both sending and receiving using a select statement,
    13  //  in which case the length of c.sendq and c.recvq is limited only by the
    14  //  size of the select statement.
    15  //
    16  // For buffered channels, also:
    17  //  c.qcount > 0 implies that c.recvq is empty.
    18  //  c.qcount < c.dataqsiz implies that c.sendq is empty.
    19  
    20  import (
    21  	"runtime/internal/atomic"
    22  	"runtime/internal/math"
    23  	"unsafe"
    24  )
    25  
    26  const (
    27  	maxAlign  = 8
    28  	hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
    29  	debugChan = false
    30  )
    31  
    32  type hchan struct {
    33  	qcount   uint           // total data in the queue
    34  	dataqsiz uint           // size of the circular queue
    35  	buf      unsafe.Pointer // points to an array of dataqsiz elements
    36  	elemsize uint16
    37  	closed   uint32
    38  	elemtype *_type // element type
    39  	sendx    uint   // send index
    40  	recvx    uint   // receive index
    41  	recvq    waitq  // list of recv waiters
    42  	sendq    waitq  // list of send waiters
    43  
    44  	// lock protects all fields in hchan, as well as several
    45  	// fields in sudogs blocked on this channel.
    46  	//
    47  	// Do not change another G's status while holding this lock
    48  	// (in particular, do not ready a G), as this can deadlock
    49  	// with stack shrinking.
    50  	lock mutex
    51  
    52  	///MYCODE:
    53  	chInfo *ChanInfo
    54  
    55  	///MYCODE:
    56  	id uint16 // an uint32 assigned to the channel when it is made. Start from 0 and increase by 1 every time
    57  	preLoc uint16 // used when BoolRecordPerCh is true; stores the hash of last operation of this channel
    58  	chanRecord *ChanRecord // a data struct to record information of this channel
    59  }
    60  
    61  type waitq struct {
    62  	first *sudog
    63  	last  *sudog
    64  }
    65  
    66  //go:linkname reflect_makechan reflect.makechan
    67  func reflect_makechan(t *chantype, size int) *hchan {
    68  	return makechan(t, size)
    69  }
    70  
    71  func makechan64(t *chantype, size int64) *hchan {
    72  	if int64(int(size)) != size {
    73  		panic(plainError("makechan: size out of range"))
    74  	}
    75  
    76  	return makechan(t, int(size))
    77  }
    78  
    79  func makechan(t *chantype, size int) *hchan {
    80  	elem := t.elem
    81  
    82  	// compiler checks this but be safe.
    83  	if elem.size >= 1<<16 {
    84  		throw("makechan: invalid channel element type")
    85  	}
    86  	if hchanSize%maxAlign != 0 || elem.align > maxAlign {
    87  		throw("makechan: bad alignment")
    88  	}
    89  
    90  	mem, overflow := math.MulUintptr(elem.size, uintptr(size))
    91  	if overflow || mem > maxAlloc-hchanSize || size < 0 {
    92  		panic(plainError("makechan: size out of range"))
    93  	}
    94  
    95  	// Hchan does not contain pointers interesting for GC when elements stored in buf do not contain pointers.
    96  	// buf points into the same allocation, elemtype is persistent.
    97  	// SudoG's are referenced from their owning thread so they can't be collected.
    98  	// TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
    99  	var c *hchan
   100  	switch {
   101  	case mem == 0:
   102  		// Queue or element size is zero.
   103  		c = (*hchan)(mallocgc(hchanSize, nil, true))
   104  		// Race detector uses this location for synchronization.
   105  		c.buf = c.raceaddr()
   106  	case elem.ptrdata == 0:
   107  		// Elements do not contain pointers.
   108  		// Allocate hchan and buf in one call.
   109  		c = (*hchan)(mallocgc(hchanSize+mem, nil, true))
   110  		c.buf = add(unsafe.Pointer(c), hchanSize)
   111  	default:
   112  		// Elements contain pointers.
   113  		c = new(hchan)
   114  		c.buf = mallocgc(mem, elem, true)
   115  	}
   116  
   117  
   118  	///MYCODE
   119  	c.chInfo = NewChanInfo(c)
   120  	StoreLastPrimInfo(c.chInfo)
   121  
   122  	///MYCODE
   123  	if BoolRecord {
   124  		RecordChMake(size, c)
   125  	}
   126  
   127  	c.elemsize = uint16(elem.size)
   128  	c.elemtype = elem
   129  	c.dataqsiz = uint(size)
   130  	lockInit(&c.lock, lockRankHchan)
   131  
   132  	if debugChan {
   133  		print("makechan: chan=", c, "; elemsize=", elem.size, "; dataqsiz=", size, "\n")
   134  	}
   135  	return c
   136  }
   137  
   138  // chanbuf(c, i) is pointer to the i'th slot in the buffer.
   139  func chanbuf(c *hchan, i uint) unsafe.Pointer {
   140  	return add(c.buf, uintptr(i)*uintptr(c.elemsize))
   141  }
   142  
   143  // full reports whether a send on c would block (that is, the channel is full).
   144  // It uses a single word-sized read of mutable state, so although
   145  // the answer is instantaneously true, the correct answer may have changed
   146  // by the time the calling function receives the return value.
   147  func full(c *hchan) bool {
   148  	// c.dataqsiz is immutable (never written after the channel is created)
   149  	// so it is safe to read at any time during channel operation.
   150  	if c.dataqsiz == 0 {
   151  		// Assumes that a pointer read is relaxed-atomic.
   152  		return c.recvq.first == nil
   153  	}
   154  	// Assumes that a uint read is relaxed-atomic.
   155  	return c.qcount == c.dataqsiz
   156  }
   157  
   158  // entry point for c <- x from compiled code
   159  //go:nosplit
   160  func chansend1(c *hchan, elem unsafe.Pointer) {
   161  	chansend(c, elem, true, getcallerpc())
   162  }
   163  
   164  /*
   165   * generic single channel send/recv
   166   * If block is not nil,
   167   * then the protocol will not
   168   * sleep but return if it could
   169   * not complete.
   170   *
   171   * sleep can wake up with g.param == nil
   172   * when a channel involved in the sleep has
   173   * been closed.  it is easiest to loop and re-run
   174   * the operation; we'll see that it's now closed.
   175   */
   176  func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
   177  	if c == nil {
   178  		if !block {
   179  			return false
   180  		}
   181  		gopark(nil, nil, waitReasonChanSendNilChan, traceEvGoStop, 2)
   182  		throw("unreachable")
   183  	}
   184  
   185  	///MYCODE
   186  	if okToCheck(c) {
   187  		blockEntry := EnqueueBlockEntry([]PrimInfo{c.chInfo}, Send)
   188  		defer DequeueBlockEntry(blockEntry)
   189  	}
   190  	if c.chInfo != nil {
   191  		Monitor(c.chInfo)
   192  	}
   193  
   194  	if debugChan {
   195  		print("chansend: chan=", c, "\n")
   196  	}
   197  
   198  	if raceenabled {
   199  		racereadpc(c.raceaddr(), callerpc, funcPC(chansend))
   200  	}
   201  
   202  	// Fast path: check for failed non-blocking operation without acquiring the lock.
   203  	//
   204  	// After observing that the channel is not closed, we observe that the channel is
   205  	// not ready for sending. Each of these observations is a single word-sized read
   206  	// (first c.closed and second full()).
   207  	// Because a closed channel cannot transition from 'ready for sending' to
   208  	// 'not ready for sending', even if the channel is closed between the two observations,
   209  	// they imply a moment between the two when the channel was both not yet closed
   210  	// and not ready for sending. We behave as if we observed the channel at that moment,
   211  	// and report that the send cannot proceed.
   212  	//
   213  	// It is okay if the reads are reordered here: if we observe that the channel is not
   214  	// ready for sending and then observe that it is not closed, that implies that the
   215  	// channel wasn't closed during the first observation. However, nothing here
   216  	// guarantees forward progress. We rely on the side effects of lock release in
   217  	// chanrecv() and closechan() to update this thread's view of c.closed and full().
   218  	if !block && c.closed == 0 && full(c) {
   219  		return false
   220  	}
   221  
   222  	var t0 int64
   223  	if blockprofilerate > 0 {
   224  		t0 = cputicks()
   225  	}
   226  
   227  	lock(&c.lock)
   228  
   229  	///MYCODE
   230  	if GlobalEnableOracle && c.chInfo.OKToCheck && okToCheck(c) {
   231  		currentGo := CurrentGoInfo()
   232  		AddRefGoroutine(c.chInfo, currentGo)
   233  		currentGo.SetBlockAt(c.chInfo, Send)
   234  		CS := []PrimInfo{c.chInfo}
   235  		var checkEntry *CheckEntry
   236  		if BoolDelayCheck {
   237  			checkEntry = EnqueueCheckEntry(CS)
   238  		} else {
   239  			CheckBlockBug(CS)
   240  		}
   241  		defer currentGo.WithdrawBlock(checkEntry)
   242  	}
   243  
   244  	///MYCODE
   245  
   246  	if BoolRecord {
   247  		RecordChOp(c)
   248  	}
   249  
   250  
   251  	if c.closed != 0 {
   252  		unlock(&c.lock)
   253  		panic(plainError("send on closed channel"))
   254  	}
   255  
   256  	if sg := c.recvq.dequeue(); sg != nil {
   257  		// Found a waiting receiver. We pass the value we want to send
   258  		// directly to the receiver, bypassing the channel buffer (if any).
   259  		send(c, sg, ep, func() { unlock(&c.lock) }, 3)
   260  		return true
   261  	}
   262  
   263  	if c.qcount < c.dataqsiz {
   264  		// Space is available in the channel buffer. Enqueue the element to send.
   265  		qp := chanbuf(c, c.sendx)
   266  		if raceenabled {
   267  			racenotify(c, c.sendx, nil)
   268  		}
   269  		typedmemmove(c.elemtype, qp, ep)
   270  		c.sendx++
   271  		if c.sendx == c.dataqsiz {
   272  			c.sendx = 0
   273  		}
   274  		c.qcount++
   275  		unlock(&c.lock)
   276  		return true
   277  	}
   278  
   279  	if !block {
   280  		unlock(&c.lock)
   281  		return false
   282  	}
   283  
   284  	// Block on the channel. Some receiver will complete our operation for us.
   285  	gp := getg()
   286  	mysg := acquireSudog()
   287  	mysg.releasetime = 0
   288  	if t0 != 0 {
   289  		mysg.releasetime = -1
   290  	}
   291  	// No stack splits between assigning elem and enqueuing mysg
   292  	// on gp.waiting where copystack can find it.
   293  	mysg.elem = ep
   294  	mysg.waitlink = nil
   295  	mysg.g = gp
   296  	mysg.isSelect = false
   297  	mysg.c = c
   298  	gp.waiting = mysg
   299  	gp.param = nil
   300  	c.sendq.enqueue(mysg)
   301  	// Signal to anyone trying to shrink our stack that we're about
   302  	// to park on a channel. The window between when this G's status
   303  	// changes and when we set gp.activeStackChans is not safe for
   304  	// stack shrinking.
   305  	atomic.Store8(&gp.parkingOnChan, 1)
   306  	gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2)
   307  	// Ensure the value being sent is kept alive until the
   308  	// receiver copies it out. The sudog has a pointer to the
   309  	// stack object, but sudogs aren't considered as roots of the
   310  	// stack tracer.
   311  	KeepAlive(ep)
   312  
   313  	// someone woke us up.
   314  	if mysg != gp.waiting {
   315  		throw("G waiting list is corrupted")
   316  	}
   317  	gp.waiting = nil
   318  	gp.activeStackChans = false
   319  	closed := !mysg.success
   320  	gp.param = nil
   321  	if mysg.releasetime > 0 {
   322  		blockevent(mysg.releasetime-t0, 2)
   323  	}
   324  	mysg.c = nil
   325  	releaseSudog(mysg)
   326  	if closed {
   327  		if c.closed == 0 {
   328  			throw("chansend: spurious wakeup")
   329  		}
   330  		///MYCODE
   331  		ReportNonBlockingBug()
   332  
   333  		panic(plainError("send on closed channel"))
   334  	}
   335  	return true
   336  }
   337  
   338  // send processes a send operation on an empty channel c.
   339  // The value ep sent by the sender is copied to the receiver sg.
   340  // The receiver is then woken up to go on its merry way.
   341  // Channel c must be empty and locked.  send unlocks c with unlockf.
   342  // sg must already be dequeued from c.
   343  // ep must be non-nil and point to the heap or the caller's stack.
   344  func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
   345  	if raceenabled {
   346  		if c.dataqsiz == 0 {
   347  			racesync(c, sg)
   348  		} else {
   349  			// Pretend we go through the buffer, even though
   350  			// we copy directly. Note that we need to increment
   351  			// the head/tail locations only when raceenabled.
   352  			racenotify(c, c.recvx, nil)
   353  			racenotify(c, c.recvx, sg)
   354  			c.recvx++
   355  			if c.recvx == c.dataqsiz {
   356  				c.recvx = 0
   357  			}
   358  			c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   359  		}
   360  	}
   361  	if sg.elem != nil {
   362  		sendDirect(c.elemtype, sg, ep)
   363  		sg.elem = nil
   364  	}
   365  	gp := sg.g
   366  	unlockf()
   367  	gp.param = unsafe.Pointer(sg)
   368  	sg.success = true
   369  	if sg.releasetime != 0 {
   370  		sg.releasetime = cputicks()
   371  	}
   372  	goready(gp, skip+1)
   373  }
   374  
   375  // Sends and receives on unbuffered or empty-buffered channels are the
   376  // only operations where one running goroutine writes to the stack of
   377  // another running goroutine. The GC assumes that stack writes only
   378  // happen when the goroutine is running and are only done by that
   379  // goroutine. Using a write barrier is sufficient to make up for
   380  // violating that assumption, but the write barrier has to work.
   381  // typedmemmove will call bulkBarrierPreWrite, but the target bytes
   382  // are not in the heap, so that will not help. We arrange to call
   383  // memmove and typeBitsBulkBarrier instead.
   384  
   385  func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
   386  	// src is on our stack, dst is a slot on another stack.
   387  
   388  	// Once we read sg.elem out of sg, it will no longer
   389  	// be updated if the destination's stack gets copied (shrunk).
   390  	// So make sure that no preemption points can happen between read & use.
   391  	dst := sg.elem
   392  	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
   393  	// No need for cgo write barrier checks because dst is always
   394  	// Go memory.
   395  	memmove(dst, src, t.size)
   396  }
   397  
   398  func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) {
   399  	// dst is on our stack or the heap, src is on another stack.
   400  	// The channel is locked, so src will not move during this
   401  	// operation.
   402  	src := sg.elem
   403  	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
   404  	memmove(dst, src, t.size)
   405  }
   406  
   407  func closechan(c *hchan) {
   408  	if c == nil {
   409  		///MYCODE
   410  		ReportNonBlockingBug()
   411  
   412  		panic(plainError("close of nil channel"))
   413  	}
   414  	if c.chInfo != nil {
   415  		Monitor(c.chInfo)
   416  	}
   417  
   418  	lock(&c.lock)
   419  
   420  	///MYCODE
   421  	if BoolRecord {
   422  		RecordChOp(c)
   423  	}
   424  	if c.chInfo != nil {
   425  		if GlobalEnableOracle && c.chInfo.OKToCheck && okToCheck(c) {
   426  			currentGo := CurrentGoInfo()
   427  			AddRefGoroutine(c.chInfo, currentGo)
   428  		}
   429  	}
   430  
   431  
   432  	if c.closed != 0 {
   433  		unlock(&c.lock)
   434  		///MYCODE
   435  		ReportNonBlockingBug()
   436  
   437  		panic(plainError("close of closed channel"))
   438  	}
   439  
   440  	if raceenabled {
   441  		callerpc := getcallerpc()
   442  		racewritepc(c.raceaddr(), callerpc, funcPC(closechan))
   443  		racerelease(c.raceaddr())
   444  	}
   445  
   446  	c.closed = 1
   447  
   448  	var glist gList
   449  
   450  	// release all readers
   451  	for {
   452  		sg := c.recvq.dequeue()
   453  		if sg == nil {
   454  			break
   455  		}
   456  		if sg.elem != nil {
   457  			typedmemclr(c.elemtype, sg.elem)
   458  			sg.elem = nil
   459  		}
   460  		if sg.releasetime != 0 {
   461  			sg.releasetime = cputicks()
   462  		}
   463  		gp := sg.g
   464  		gp.param = unsafe.Pointer(sg)
   465  		sg.success = false
   466  		if raceenabled {
   467  			raceacquireg(gp, c.raceaddr())
   468  		}
   469  		glist.push(gp)
   470  	}
   471  
   472  	// release all writers (they will panic)
   473  	for {
   474  		sg := c.sendq.dequeue()
   475  		if sg == nil {
   476  			break
   477  		}
   478  		sg.elem = nil
   479  		if sg.releasetime != 0 {
   480  			sg.releasetime = cputicks()
   481  		}
   482  		gp := sg.g
   483  		gp.param = unsafe.Pointer(sg)
   484  		sg.success = false
   485  		if raceenabled {
   486  			raceacquireg(gp, c.raceaddr())
   487  		}
   488  		glist.push(gp)
   489  	}
   490  	unlock(&c.lock)
   491  
   492  	// Ready all Gs now that we've dropped the channel lock.
   493  	for !glist.empty() {
   494  		gp := glist.pop()
   495  		gp.schedlink = 0
   496  		goready(gp, 3)
   497  	}
   498  }
   499  
   500  // empty reports whether a read from c would block (that is, the channel is
   501  // empty).  It uses a single atomic read of mutable state.
   502  func empty(c *hchan) bool {
   503  	// c.dataqsiz is immutable.
   504  	if c.dataqsiz == 0 {
   505  		return atomic.Loadp(unsafe.Pointer(&c.sendq.first)) == nil
   506  	}
   507  	return atomic.Loaduint(&c.qcount) == 0
   508  }
   509  
   510  // entry points for <- c from compiled code
   511  //go:nosplit
   512  func chanrecv1(c *hchan, elem unsafe.Pointer) {
   513  	chanrecv(c, elem, true)
   514  }
   515  
   516  //go:nosplit
   517  func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool) {
   518  	_, received = chanrecv(c, elem, true)
   519  	return
   520  }
   521  
   522  // chanrecv receives on channel c and writes the received data to ep.
   523  // ep may be nil, in which case received data is ignored.
   524  // If block == false and no elements are available, returns (false, false).
   525  // Otherwise, if c is closed, zeros *ep and returns (true, false).
   526  // Otherwise, fills in *ep with an element and returns (true, true).
   527  // A non-nil ep must point to the heap or the caller's stack.
   528  func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
   529  	// raceenabled: don't need to check ep, as it is always on the stack
   530  	// or is new memory allocated by reflect.
   531  
   532  	if debugChan {
   533  		print("chanrecv: chan=", c, "\n")
   534  	}
   535  
   536  	if c == nil {
   537  		if !block {
   538  			return
   539  		}
   540  		gopark(nil, nil, waitReasonChanReceiveNilChan, traceEvGoStop, 2)
   541  		throw("unreachable")
   542  	}
   543  
   544  	///MYCODE
   545  	if okToCheck(c) {
   546  		blockEntry := EnqueueBlockEntry([]PrimInfo{c.chInfo}, Recv)
   547  		defer DequeueBlockEntry(blockEntry)
   548  	}
   549  	if c.chInfo != nil {
   550  		Monitor(c.chInfo)
   551  	}
   552  
   553  	// Fast path: check for failed non-blocking operation without acquiring the lock.
   554  	if !block && empty(c) {
   555  		// After observing that the channel is not ready for receiving, we observe whether the
   556  		// channel is closed.
   557  		//
   558  		// Reordering of these checks could lead to incorrect behavior when racing with a close.
   559  		// For example, if the channel was open and not empty, was closed, and then drained,
   560  		// reordered reads could incorrectly indicate "open and empty". To prevent reordering,
   561  		// we use atomic loads for both checks, and rely on emptying and closing to happen in
   562  		// separate critical sections under the same lock.  This assumption fails when closing
   563  		// an unbuffered channel with a blocked send, but that is an error condition anyway.
   564  		if atomic.Load(&c.closed) == 0 {
   565  			// Because a channel cannot be reopened, the later observation of the channel
   566  			// being not closed implies that it was also not closed at the moment of the
   567  			// first observation. We behave as if we observed the channel at that moment
   568  			// and report that the receive cannot proceed.
   569  			return
   570  		}
   571  		// The channel is irreversibly closed. Re-check whether the channel has any pending data
   572  		// to receive, which could have arrived between the empty and closed checks above.
   573  		// Sequential consistency is also required here, when racing with such a send.
   574  		if empty(c) {
   575  			// The channel is irreversibly closed and empty.
   576  			if raceenabled {
   577  				raceacquire(c.raceaddr())
   578  			}
   579  			if ep != nil {
   580  				typedmemclr(c.elemtype, ep)
   581  			}
   582  			return true, false
   583  		}
   584  	}
   585  
   586  	var t0 int64
   587  	if blockprofilerate > 0 {
   588  		t0 = cputicks()
   589  	}
   590  
   591  	lock(&c.lock)
   592  
   593  	///MYCODE
   594  	if GlobalEnableOracle && c.chInfo.OKToCheck && okToCheck(c) {
   595  		currentGo := CurrentGoInfo()
   596  		AddRefGoroutine(c.chInfo, currentGo)
   597  		currentGo.SetBlockAt(c.chInfo, Recv)
   598  		CS := []PrimInfo{c.chInfo}
   599  		var checkEntry *CheckEntry
   600  		if BoolDelayCheck {
   601  			checkEntry = EnqueueCheckEntry(CS)
   602  		} else {
   603  			CheckBlockBug(CS)
   604  		}
   605  		defer currentGo.WithdrawBlock(checkEntry)
   606  	}
   607  
   608  
   609  	///MYCODE
   610  	if BoolRecord {
   611  		RecordChOp(c)
   612  	}
   613  
   614  	if c.closed != 0 && c.qcount == 0 {
   615  		if raceenabled {
   616  			raceacquire(c.raceaddr())
   617  		}
   618  		unlock(&c.lock)
   619  		if ep != nil {
   620  			typedmemclr(c.elemtype, ep)
   621  		}
   622  		return true, false
   623  	}
   624  
   625  	if sg := c.sendq.dequeue(); sg != nil {
   626  		// Found a waiting sender. If buffer is size 0, receive value
   627  		// directly from sender. Otherwise, receive from head of queue
   628  		// and add sender's value to the tail of the queue (both map to
   629  		// the same buffer slot because the queue is full).
   630  		recv(c, sg, ep, func() { unlock(&c.lock) }, 3)
   631  		return true, true
   632  	}
   633  
   634  	if c.qcount > 0 {
   635  		// Receive directly from queue
   636  		qp := chanbuf(c, c.recvx)
   637  		if raceenabled {
   638  			racenotify(c, c.recvx, nil)
   639  		}
   640  		if ep != nil {
   641  			typedmemmove(c.elemtype, ep, qp)
   642  		}
   643  		typedmemclr(c.elemtype, qp)
   644  		c.recvx++
   645  		if c.recvx == c.dataqsiz {
   646  			c.recvx = 0
   647  		}
   648  		c.qcount--
   649  		unlock(&c.lock)
   650  		return true, true
   651  	}
   652  
   653  	if !block {
   654  		unlock(&c.lock)
   655  		return false, false
   656  	}
   657  
   658  	// no sender available: block on this channel.
   659  	gp := getg()
   660  	mysg := acquireSudog()
   661  	mysg.releasetime = 0
   662  	if t0 != 0 {
   663  		mysg.releasetime = -1
   664  	}
   665  	// No stack splits between assigning elem and enqueuing mysg
   666  	// on gp.waiting where copystack can find it.
   667  	mysg.elem = ep
   668  	mysg.waitlink = nil
   669  	gp.waiting = mysg
   670  	mysg.g = gp
   671  	mysg.isSelect = false
   672  	mysg.c = c
   673  	gp.param = nil
   674  	c.recvq.enqueue(mysg)
   675  	// Signal to anyone trying to shrink our stack that we're about
   676  	// to park on a channel. The window between when this G's status
   677  	// changes and when we set gp.activeStackChans is not safe for
   678  	// stack shrinking.
   679  	atomic.Store8(&gp.parkingOnChan, 1)
   680  	gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2)
   681  
   682  	// someone woke us up
   683  	if mysg != gp.waiting {
   684  		throw("G waiting list is corrupted")
   685  	}
   686  	gp.waiting = nil
   687  	gp.activeStackChans = false
   688  	if mysg.releasetime > 0 {
   689  		blockevent(mysg.releasetime-t0, 2)
   690  	}
   691  	success := mysg.success
   692  	gp.param = nil
   693  	mysg.c = nil
   694  	releaseSudog(mysg)
   695  	return true, success
   696  }
   697  
   698  // recv processes a receive operation on a full channel c.
   699  // There are 2 parts:
   700  // 1) The value sent by the sender sg is put into the channel
   701  //    and the sender is woken up to go on its merry way.
   702  // 2) The value received by the receiver (the current G) is
   703  //    written to ep.
   704  // For synchronous channels, both values are the same.
   705  // For asynchronous channels, the receiver gets its data from
   706  // the channel buffer and the sender's data is put in the
   707  // channel buffer.
   708  // Channel c must be full and locked. recv unlocks c with unlockf.
   709  // sg must already be dequeued from c.
   710  // A non-nil ep must point to the heap or the caller's stack.
   711  func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
   712  	if c.dataqsiz == 0 {
   713  		if raceenabled {
   714  			racesync(c, sg)
   715  		}
   716  		if ep != nil {
   717  			// copy data from sender
   718  			recvDirect(c.elemtype, sg, ep)
   719  		}
   720  	} else {
   721  		// Queue is full. Take the item at the
   722  		// head of the queue. Make the sender enqueue
   723  		// its item at the tail of the queue. Since the
   724  		// queue is full, those are both the same slot.
   725  		qp := chanbuf(c, c.recvx)
   726  		if raceenabled {
   727  			racenotify(c, c.recvx, nil)
   728  			racenotify(c, c.recvx, sg)
   729  		}
   730  		// copy data from queue to receiver
   731  		if ep != nil {
   732  			typedmemmove(c.elemtype, ep, qp)
   733  		}
   734  		// copy data from sender to queue
   735  		typedmemmove(c.elemtype, qp, sg.elem)
   736  		c.recvx++
   737  		if c.recvx == c.dataqsiz {
   738  			c.recvx = 0
   739  		}
   740  		c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   741  	}
   742  	sg.elem = nil
   743  	gp := sg.g
   744  	unlockf()
   745  	gp.param = unsafe.Pointer(sg)
   746  	sg.success = true
   747  	if sg.releasetime != 0 {
   748  		sg.releasetime = cputicks()
   749  	}
   750  	goready(gp, skip+1)
   751  }
   752  
   753  func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
   754  	// There are unlocked sudogs that point into gp's stack. Stack
   755  	// copying must lock the channels of those sudogs.
   756  	// Set activeStackChans here instead of before we try parking
   757  	// because we could self-deadlock in stack growth on the
   758  	// channel lock.
   759  	gp.activeStackChans = true
   760  	// Mark that it's safe for stack shrinking to occur now,
   761  	// because any thread acquiring this G's stack for shrinking
   762  	// is guaranteed to observe activeStackChans after this store.
   763  	atomic.Store8(&gp.parkingOnChan, 0)
   764  	// Make sure we unlock after setting activeStackChans and
   765  	// unsetting parkingOnChan. The moment we unlock chanLock
   766  	// we risk gp getting readied by a channel operation and
   767  	// so gp could continue running before everything before
   768  	// the unlock is visible (even to gp itself).
   769  	unlock((*mutex)(chanLock))
   770  	return true
   771  }
   772  
   773  // compiler implements
   774  //
   775  //	select {
   776  //	case c <- v:
   777  //		... foo
   778  //	default:
   779  //		... bar
   780  //	}
   781  //
   782  // as
   783  //
   784  //	if selectnbsend(c, v) {
   785  //		... foo
   786  //	} else {
   787  //		... bar
   788  //	}
   789  //
   790  func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool) {
   791  	return chansend(c, elem, false, getcallerpc())
   792  }
   793  
   794  // compiler implements
   795  //
   796  //	select {
   797  //	case v = <-c:
   798  //		... foo
   799  //	default:
   800  //		... bar
   801  //	}
   802  //
   803  // as
   804  //
   805  //	if selectnbrecv(&v, c) {
   806  //		... foo
   807  //	} else {
   808  //		... bar
   809  //	}
   810  //
   811  func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected bool) {
   812  	selected, _ = chanrecv(c, elem, false)
   813  	return
   814  }
   815  
   816  // compiler implements
   817  //
   818  //	select {
   819  //	case v, ok = <-c:
   820  //		... foo
   821  //	default:
   822  //		... bar
   823  //	}
   824  //
   825  // as
   826  //
   827  //	if c != nil && selectnbrecv2(&v, &ok, c) {
   828  //		... foo
   829  //	} else {
   830  //		... bar
   831  //	}
   832  //
   833  func selectnbrecv2(elem unsafe.Pointer, received *bool, c *hchan) (selected bool) {
   834  	// TODO(khr): just return 2 values from this function, now that it is in Go.
   835  	selected, *received = chanrecv(c, elem, false)
   836  	return
   837  }
   838  
   839  //go:linkname reflect_chansend reflect.chansend
   840  func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
   841  	return chansend(c, elem, !nb, getcallerpc())
   842  }
   843  
   844  //go:linkname reflect_chanrecv reflect.chanrecv
   845  func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
   846  	return chanrecv(c, elem, !nb)
   847  }
   848  
   849  //go:linkname reflect_chanlen reflect.chanlen
   850  func reflect_chanlen(c *hchan) int {
   851  	if c == nil {
   852  		return 0
   853  	}
   854  	return int(c.qcount)
   855  }
   856  
   857  //go:linkname reflectlite_chanlen internal/reflectlite.chanlen
   858  func reflectlite_chanlen(c *hchan) int {
   859  	if c == nil {
   860  		return 0
   861  	}
   862  	return int(c.qcount)
   863  }
   864  
   865  //go:linkname reflect_chancap reflect.chancap
   866  func reflect_chancap(c *hchan) int {
   867  	if c == nil {
   868  		return 0
   869  	}
   870  	return int(c.dataqsiz)
   871  }
   872  
   873  //go:linkname reflect_chanclose reflect.chanclose
   874  func reflect_chanclose(c *hchan) {
   875  	closechan(c)
   876  }
   877  
   878  func (q *waitq) enqueue(sgp *sudog) {
   879  	sgp.next = nil
   880  	x := q.last
   881  	if x == nil {
   882  		sgp.prev = nil
   883  		q.first = sgp
   884  		q.last = sgp
   885  		return
   886  	}
   887  	sgp.prev = x
   888  	x.next = sgp
   889  	q.last = sgp
   890  }
   891  
   892  func (q *waitq) dequeue() *sudog {
   893  	for {
   894  		sgp := q.first
   895  		if sgp == nil {
   896  			return nil
   897  		}
   898  		y := sgp.next
   899  		if y == nil {
   900  			q.first = nil
   901  			q.last = nil
   902  		} else {
   903  			y.prev = nil
   904  			q.first = y
   905  			sgp.next = nil // mark as removed (see dequeueSudog)
   906  		}
   907  
   908  		// if a goroutine was put on this queue because of a
   909  		// select, there is a small window between the goroutine
   910  		// being woken up by a different case and it grabbing the
   911  		// channel locks. Once it has the lock
   912  		// it removes itself from the queue, so we won't see it after that.
   913  		// We use a flag in the G struct to tell us when someone
   914  		// else has won the race to signal this goroutine but the goroutine
   915  		// hasn't removed itself from the queue yet.
   916  		if sgp.isSelect && !atomic.Cas(&sgp.g.selectDone, 0, 1) {
   917  			continue
   918  		}
   919  
   920  		return sgp
   921  	}
   922  }
   923  
   924  func (c *hchan) raceaddr() unsafe.Pointer {
   925  	// Treat read-like and write-like operations on the channel to
   926  	// happen at this address. Avoid using the address of qcount
   927  	// or dataqsiz, because the len() and cap() builtins read
   928  	// those addresses, and we don't want them racing with
   929  	// operations like close().
   930  	return unsafe.Pointer(&c.buf)
   931  }
   932  
   933  func racesync(c *hchan, sg *sudog) {
   934  	racerelease(chanbuf(c, 0))
   935  	raceacquireg(sg.g, chanbuf(c, 0))
   936  	racereleaseg(sg.g, chanbuf(c, 0))
   937  	raceacquire(chanbuf(c, 0))
   938  }
   939  
   940  // Notify the race detector of a send or receive involving buffer entry idx
   941  // and a channel c or its communicating partner sg.
   942  // This function handles the special case of c.elemsize==0.
   943  func racenotify(c *hchan, idx uint, sg *sudog) {
   944  	// We could have passed the unsafe.Pointer corresponding to entry idx
   945  	// instead of idx itself.  However, in a future version of this function,
   946  	// we can use idx to better handle the case of elemsize==0.
   947  	// A future improvement to the detector is to call TSan with c and idx:
   948  	// this way, Go will continue to not allocating buffer entries for channels
   949  	// of elemsize==0, yet the race detector can be made to handle multiple
   950  	// sync objects underneath the hood (one sync object per idx)
   951  	qp := chanbuf(c, idx)
   952  	// When elemsize==0, we don't allocate a full buffer for the channel.
   953  	// Instead of individual buffer entries, the race detector uses the
   954  	// c.buf as the only buffer entry.  This simplification prevents us from
   955  	// following the memory model's happens-before rules (rules that are
   956  	// implemented in racereleaseacquire).  Instead, we accumulate happens-before
   957  	// information in the synchronization object associated with c.buf.
   958  	if c.elemsize == 0 {
   959  		if sg == nil {
   960  			raceacquire(qp)
   961  			racerelease(qp)
   962  		} else {
   963  			raceacquireg(sg.g, qp)
   964  			racereleaseg(sg.g, qp)
   965  		}
   966  	} else {
   967  		if sg == nil {
   968  			racereleaseacquire(qp)
   969  		} else {
   970  			racereleaseacquireg(sg.g, qp)
   971  		}
   972  	}
   973  }