github.com/sean-/go@v0.0.0-20151219100004-97f854cd7bb6/src/runtime/chan.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go channels.
     8  
     9  // Invariants:
    10  //  At least one of c.sendq and c.recvq is empty.
    11  // For buffered channels, also:
    12  //  c.qcount > 0 implies that c.recvq is empty.
    13  //  c.qcount < c.dataqsiz implies that c.sendq is empty.
    14  import (
    15  	"runtime/internal/atomic"
    16  	"unsafe"
    17  )
    18  
    19  const (
    20  	maxAlign  = 8
    21  	hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
    22  	debugChan = false
    23  )
    24  
    25  type hchan struct {
    26  	qcount   uint           // total data in the queue
    27  	dataqsiz uint           // size of the circular queue
    28  	buf      unsafe.Pointer // points to an array of dataqsiz elements
    29  	elemsize uint16
    30  	closed   uint32
    31  	elemtype *_type // element type
    32  	sendx    uint   // send index
    33  	recvx    uint   // receive index
    34  	recvq    waitq  // list of recv waiters
    35  	sendq    waitq  // list of send waiters
    36  	lock     mutex
    37  }
    38  
    39  type waitq struct {
    40  	first *sudog
    41  	last  *sudog
    42  }
    43  
    44  //go:linkname reflect_makechan reflect.makechan
    45  func reflect_makechan(t *chantype, size int64) *hchan {
    46  	return makechan(t, size)
    47  }
    48  
    49  func makechan(t *chantype, size int64) *hchan {
    50  	elem := t.elem
    51  
    52  	// compiler checks this but be safe.
    53  	if elem.size >= 1<<16 {
    54  		throw("makechan: invalid channel element type")
    55  	}
    56  	if hchanSize%maxAlign != 0 || elem.align > maxAlign {
    57  		throw("makechan: bad alignment")
    58  	}
    59  	if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (_MaxMem-hchanSize)/uintptr(elem.size)) {
    60  		panic("makechan: size out of range")
    61  	}
    62  
    63  	var c *hchan
    64  	if elem.kind&kindNoPointers != 0 || size == 0 {
    65  		// Allocate memory in one call.
    66  		// Hchan does not contain pointers interesting for GC in this case:
    67  		// buf points into the same allocation, elemtype is persistent.
    68  		// SudoG's are referenced from their owning thread so they can't be collected.
    69  		// TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
    70  		c = (*hchan)(mallocgc(hchanSize+uintptr(size)*uintptr(elem.size), nil, flagNoScan))
    71  		if size > 0 && elem.size != 0 {
    72  			c.buf = add(unsafe.Pointer(c), hchanSize)
    73  		} else {
    74  			// race detector uses this location for synchronization
    75  			// Also prevents us from pointing beyond the allocation (see issue 9401).
    76  			c.buf = unsafe.Pointer(c)
    77  		}
    78  	} else {
    79  		c = new(hchan)
    80  		c.buf = newarray(elem, uintptr(size))
    81  	}
    82  	c.elemsize = uint16(elem.size)
    83  	c.elemtype = elem
    84  	c.dataqsiz = uint(size)
    85  
    86  	if debugChan {
    87  		print("makechan: chan=", c, "; elemsize=", elem.size, "; elemalg=", elem.alg, "; dataqsiz=", size, "\n")
    88  	}
    89  	return c
    90  }
    91  
    92  // chanbuf(c, i) is pointer to the i'th slot in the buffer.
    93  func chanbuf(c *hchan, i uint) unsafe.Pointer {
    94  	return add(c.buf, uintptr(i)*uintptr(c.elemsize))
    95  }
    96  
    97  // entry point for c <- x from compiled code
    98  //go:nosplit
    99  func chansend1(t *chantype, c *hchan, elem unsafe.Pointer) {
   100  	chansend(t, c, elem, true, getcallerpc(unsafe.Pointer(&t)))
   101  }
   102  
   103  /*
   104   * generic single channel send/recv
   105   * If block is not nil,
   106   * then the protocol will not
   107   * sleep but return if it could
   108   * not complete.
   109   *
   110   * sleep can wake up with g.param == nil
   111   * when a channel involved in the sleep has
   112   * been closed.  it is easiest to loop and re-run
   113   * the operation; we'll see that it's now closed.
   114   */
   115  func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
   116  	if raceenabled {
   117  		raceReadObjectPC(t.elem, ep, callerpc, funcPC(chansend))
   118  	}
   119  	if msanenabled {
   120  		msanread(ep, t.elem.size)
   121  	}
   122  
   123  	if c == nil {
   124  		if !block {
   125  			return false
   126  		}
   127  		gopark(nil, nil, "chan send (nil chan)", traceEvGoStop, 2)
   128  		throw("unreachable")
   129  	}
   130  
   131  	if debugChan {
   132  		print("chansend: chan=", c, "\n")
   133  	}
   134  
   135  	if raceenabled {
   136  		racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend))
   137  	}
   138  
   139  	// Fast path: check for failed non-blocking operation without acquiring the lock.
   140  	//
   141  	// After observing that the channel is not closed, we observe that the channel is
   142  	// not ready for sending. Each of these observations is a single word-sized read
   143  	// (first c.closed and second c.recvq.first or c.qcount depending on kind of channel).
   144  	// Because a closed channel cannot transition from 'ready for sending' to
   145  	// 'not ready for sending', even if the channel is closed between the two observations,
   146  	// they imply a moment between the two when the channel was both not yet closed
   147  	// and not ready for sending. We behave as if we observed the channel at that moment,
   148  	// and report that the send cannot proceed.
   149  	//
   150  	// It is okay if the reads are reordered here: if we observe that the channel is not
   151  	// ready for sending and then observe that it is not closed, that implies that the
   152  	// channel wasn't closed during the first observation.
   153  	if !block && c.closed == 0 && ((c.dataqsiz == 0 && c.recvq.first == nil) ||
   154  		(c.dataqsiz > 0 && c.qcount == c.dataqsiz)) {
   155  		return false
   156  	}
   157  
   158  	var t0 int64
   159  	if blockprofilerate > 0 {
   160  		t0 = cputicks()
   161  	}
   162  
   163  	lock(&c.lock)
   164  
   165  	if c.closed != 0 {
   166  		unlock(&c.lock)
   167  		panic("send on closed channel")
   168  	}
   169  
   170  	if sg := c.recvq.dequeue(); sg != nil {
   171  		// Found a waiting receiver. We pass the value we want to send
   172  		// directly to the receiver, bypassing the channel buffer (if any).
   173  		send(c, sg, ep, func() { unlock(&c.lock) })
   174  		return true
   175  	}
   176  
   177  	if c.qcount < c.dataqsiz {
   178  		// Space is available in the channel buffer.  Enqueue the element to send.
   179  		qp := chanbuf(c, c.sendx)
   180  		if raceenabled {
   181  			raceacquire(qp)
   182  			racerelease(qp)
   183  		}
   184  		typedmemmove(c.elemtype, qp, ep)
   185  		c.sendx++
   186  		if c.sendx == c.dataqsiz {
   187  			c.sendx = 0
   188  		}
   189  		c.qcount++
   190  		unlock(&c.lock)
   191  		return true
   192  	}
   193  
   194  	if !block {
   195  		unlock(&c.lock)
   196  		return false
   197  	}
   198  
   199  	// Block on the channel.  Some receiver will complete our operation for us.
   200  	gp := getg()
   201  	mysg := acquireSudog()
   202  	mysg.releasetime = 0
   203  	if t0 != 0 {
   204  		mysg.releasetime = -1
   205  	}
   206  	mysg.elem = ep
   207  	mysg.waitlink = nil
   208  	mysg.g = gp
   209  	mysg.selectdone = nil
   210  	gp.waiting = mysg
   211  	gp.param = nil
   212  	c.sendq.enqueue(mysg)
   213  	goparkunlock(&c.lock, "chan send", traceEvGoBlockSend, 3)
   214  
   215  	// someone woke us up.
   216  	if mysg != gp.waiting {
   217  		throw("G waiting list is corrupted")
   218  	}
   219  	gp.waiting = nil
   220  	if gp.param == nil {
   221  		if c.closed == 0 {
   222  			throw("chansend: spurious wakeup")
   223  		}
   224  		panic("send on closed channel")
   225  	}
   226  	gp.param = nil
   227  	if mysg.releasetime > 0 {
   228  		blockevent(int64(mysg.releasetime)-t0, 2)
   229  	}
   230  	releaseSudog(mysg)
   231  	return true
   232  }
   233  
   234  // send processes a send operation on an empty channel c.
   235  // The value ep sent by the sender is copied to the receiver sg.
   236  // The receiver is then woken up to go on its merry way.
   237  // Channel c must be empty and locked.  send unlocks c with unlockf.
   238  // sg must already be dequeued from c.
   239  // ep must be non-nil and point to the heap or the caller's stack.
   240  func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
   241  	if raceenabled {
   242  		if c.dataqsiz == 0 {
   243  			racesync(c, sg)
   244  		} else {
   245  			// Pretend we go through the buffer, even though
   246  			// we copy directly.  Note that we need to increment
   247  			// the head/tail locations only when raceenabled.
   248  			qp := chanbuf(c, c.recvx)
   249  			raceacquire(qp)
   250  			racerelease(qp)
   251  			raceacquireg(sg.g, qp)
   252  			racereleaseg(sg.g, qp)
   253  			c.recvx++
   254  			if c.recvx == c.dataqsiz {
   255  				c.recvx = 0
   256  			}
   257  			c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   258  		}
   259  	}
   260  	unlockf()
   261  	if sg.elem != nil {
   262  		sendDirect(c.elemtype, sg, ep)
   263  		sg.elem = nil
   264  	}
   265  	gp := sg.g
   266  	gp.param = unsafe.Pointer(sg)
   267  	if sg.releasetime != 0 {
   268  		sg.releasetime = cputicks()
   269  	}
   270  	goready(gp, 4)
   271  }
   272  
   273  func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
   274  	// Send on an unbuffered or empty-buffered channel is the only operation
   275  	// in the entire runtime where one goroutine
   276  	// writes to the stack of another goroutine. The GC assumes that
   277  	// stack writes only happen when the goroutine is running and are
   278  	// only done by that goroutine. Using a write barrier is sufficient to
   279  	// make up for violating that assumption, but the write barrier has to work.
   280  	// typedmemmove will call heapBitsBulkBarrier, but the target bytes
   281  	// are not in the heap, so that will not help. We arrange to call
   282  	// memmove and typeBitsBulkBarrier instead.
   283  
   284  	// Once we read sg.elem out of sg, it will no longer
   285  	// be updated if the destination's stack gets copied (shrunk).
   286  	// So make sure that no preemption points can happen between read & use.
   287  	dst := sg.elem
   288  	memmove(dst, src, t.size)
   289  	typeBitsBulkBarrier(t, uintptr(dst), t.size)
   290  }
   291  
   292  func closechan(c *hchan) {
   293  	if c == nil {
   294  		panic("close of nil channel")
   295  	}
   296  
   297  	lock(&c.lock)
   298  	if c.closed != 0 {
   299  		unlock(&c.lock)
   300  		panic("close of closed channel")
   301  	}
   302  
   303  	if raceenabled {
   304  		callerpc := getcallerpc(unsafe.Pointer(&c))
   305  		racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan))
   306  		racerelease(unsafe.Pointer(c))
   307  	}
   308  
   309  	c.closed = 1
   310  
   311  	// release all readers
   312  	for {
   313  		sg := c.recvq.dequeue()
   314  		if sg == nil {
   315  			break
   316  		}
   317  		if sg.elem != nil {
   318  			memclr(sg.elem, uintptr(c.elemsize))
   319  			sg.elem = nil
   320  		}
   321  		if sg.releasetime != 0 {
   322  			sg.releasetime = cputicks()
   323  		}
   324  		gp := sg.g
   325  		gp.param = nil
   326  		if raceenabled {
   327  			raceacquireg(gp, unsafe.Pointer(c))
   328  		}
   329  		goready(gp, 3)
   330  	}
   331  
   332  	// release all writers (they will panic)
   333  	for {
   334  		sg := c.sendq.dequeue()
   335  		if sg == nil {
   336  			break
   337  		}
   338  		sg.elem = nil
   339  		if sg.releasetime != 0 {
   340  			sg.releasetime = cputicks()
   341  		}
   342  		gp := sg.g
   343  		gp.param = nil
   344  		if raceenabled {
   345  			raceacquireg(gp, unsafe.Pointer(c))
   346  		}
   347  		goready(gp, 3)
   348  	}
   349  	unlock(&c.lock)
   350  }
   351  
   352  // entry points for <- c from compiled code
   353  //go:nosplit
   354  func chanrecv1(t *chantype, c *hchan, elem unsafe.Pointer) {
   355  	chanrecv(t, c, elem, true)
   356  }
   357  
   358  //go:nosplit
   359  func chanrecv2(t *chantype, c *hchan, elem unsafe.Pointer) (received bool) {
   360  	_, received = chanrecv(t, c, elem, true)
   361  	return
   362  }
   363  
   364  // chanrecv receives on channel c and writes the received data to ep.
   365  // ep may be nil, in which case received data is ignored.
   366  // If block == false and no elements are available, returns (false, false).
   367  // Otherwise, if c is closed, zeros *ep and returns (true, false).
   368  // Otherwise, fills in *ep with an element and returns (true, true).
   369  // A non-nil ep must point to the heap or the caller's stack.
   370  func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
   371  	// raceenabled: don't need to check ep, as it is always on the stack
   372  	// or is new memory allocated by reflect.
   373  
   374  	if debugChan {
   375  		print("chanrecv: chan=", c, "\n")
   376  	}
   377  
   378  	if c == nil {
   379  		if !block {
   380  			return
   381  		}
   382  		gopark(nil, nil, "chan receive (nil chan)", traceEvGoStop, 2)
   383  		throw("unreachable")
   384  	}
   385  
   386  	// Fast path: check for failed non-blocking operation without acquiring the lock.
   387  	//
   388  	// After observing that the channel is not ready for receiving, we observe that the
   389  	// channel is not closed. Each of these observations is a single word-sized read
   390  	// (first c.sendq.first or c.qcount, and second c.closed).
   391  	// Because a channel cannot be reopened, the later observation of the channel
   392  	// being not closed implies that it was also not closed at the moment of the
   393  	// first observation. We behave as if we observed the channel at that moment
   394  	// and report that the receive cannot proceed.
   395  	//
   396  	// The order of operations is important here: reversing the operations can lead to
   397  	// incorrect behavior when racing with a close.
   398  	if !block && (c.dataqsiz == 0 && c.sendq.first == nil ||
   399  		c.dataqsiz > 0 && atomic.Loaduint(&c.qcount) == 0) &&
   400  		atomic.Load(&c.closed) == 0 {
   401  		return
   402  	}
   403  
   404  	var t0 int64
   405  	if blockprofilerate > 0 {
   406  		t0 = cputicks()
   407  	}
   408  
   409  	lock(&c.lock)
   410  
   411  	if c.closed != 0 && c.qcount == 0 {
   412  		if raceenabled {
   413  			raceacquire(unsafe.Pointer(c))
   414  		}
   415  		unlock(&c.lock)
   416  		if ep != nil {
   417  			memclr(ep, uintptr(c.elemsize))
   418  		}
   419  		return true, false
   420  	}
   421  
   422  	if sg := c.sendq.dequeue(); sg != nil {
   423  		// Found a waiting sender.  If buffer is size 0, receive value
   424  		// directly from sender.  Otherwise, recieve from head of queue
   425  		// and add sender's value to the tail of the queue (both map to
   426  		// the same buffer slot because the queue is full).
   427  		recv(c, sg, ep, func() { unlock(&c.lock) })
   428  		return true, true
   429  	}
   430  
   431  	if c.qcount > 0 {
   432  		// Receive directly from queue
   433  		qp := chanbuf(c, c.recvx)
   434  		if raceenabled {
   435  			raceacquire(qp)
   436  			racerelease(qp)
   437  		}
   438  		if ep != nil {
   439  			typedmemmove(c.elemtype, ep, qp)
   440  		}
   441  		memclr(qp, uintptr(c.elemsize))
   442  		c.recvx++
   443  		if c.recvx == c.dataqsiz {
   444  			c.recvx = 0
   445  		}
   446  		c.qcount--
   447  		unlock(&c.lock)
   448  		return true, true
   449  	}
   450  
   451  	if !block {
   452  		unlock(&c.lock)
   453  		return false, false
   454  	}
   455  
   456  	// no sender available: block on this channel.
   457  	gp := getg()
   458  	mysg := acquireSudog()
   459  	mysg.releasetime = 0
   460  	if t0 != 0 {
   461  		mysg.releasetime = -1
   462  	}
   463  	mysg.elem = ep
   464  	mysg.waitlink = nil
   465  	gp.waiting = mysg
   466  	mysg.g = gp
   467  	mysg.selectdone = nil
   468  	gp.param = nil
   469  	c.recvq.enqueue(mysg)
   470  	goparkunlock(&c.lock, "chan receive", traceEvGoBlockRecv, 3)
   471  
   472  	// someone woke us up
   473  	if mysg != gp.waiting {
   474  		throw("G waiting list is corrupted")
   475  	}
   476  	gp.waiting = nil
   477  	if mysg.releasetime > 0 {
   478  		blockevent(mysg.releasetime-t0, 2)
   479  	}
   480  	closed := gp.param == nil
   481  	gp.param = nil
   482  	releaseSudog(mysg)
   483  	return true, !closed
   484  }
   485  
   486  // recv processes a receive operation on a full channel c.
   487  // There are 2 parts:
   488  // 1) The value sent by the sender sg is put into the channel
   489  //    and the sender is woken up to go on its merry way.
   490  // 2) The value received by the receiver (the current G) is
   491  //    written to ep.
   492  // For synchronous channels, both values are the same.
   493  // For asynchronous channels, the receiver gets its data from
   494  // the channel buffer and the sender's data is put in the
   495  // channel buffer.
   496  // Channel c must be full and locked. recv unlocks c with unlockf.
   497  // sg must already be dequeued from c.
   498  // A non-nil ep must point to the heap or the caller's stack.
   499  func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
   500  	if c.dataqsiz == 0 {
   501  		if raceenabled {
   502  			racesync(c, sg)
   503  		}
   504  		unlockf()
   505  		if ep != nil {
   506  			// copy data from sender
   507  			// ep points to our own stack or heap, so nothing
   508  			// special (ala sendDirect) needed here.
   509  			typedmemmove(c.elemtype, ep, sg.elem)
   510  		}
   511  	} else {
   512  		// Queue is full.  Take the item at the
   513  		// head of the queue.  Make the sender enqueue
   514  		// its item at the tail of the queue.  Since the
   515  		// queue is full, those are both the same slot.
   516  		qp := chanbuf(c, c.recvx)
   517  		if raceenabled {
   518  			raceacquire(qp)
   519  			racerelease(qp)
   520  			raceacquireg(sg.g, qp)
   521  			racereleaseg(sg.g, qp)
   522  		}
   523  		// copy data from queue to receiver
   524  		if ep != nil {
   525  			typedmemmove(c.elemtype, ep, qp)
   526  		}
   527  		// copy data from sender to queue
   528  		typedmemmove(c.elemtype, qp, sg.elem)
   529  		c.recvx++
   530  		if c.recvx == c.dataqsiz {
   531  			c.recvx = 0
   532  		}
   533  		c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
   534  		unlockf()
   535  	}
   536  	sg.elem = nil
   537  	gp := sg.g
   538  	gp.param = unsafe.Pointer(sg)
   539  	if sg.releasetime != 0 {
   540  		sg.releasetime = cputicks()
   541  	}
   542  	goready(gp, 4)
   543  }
   544  
   545  // compiler implements
   546  //
   547  //	select {
   548  //	case c <- v:
   549  //		... foo
   550  //	default:
   551  //		... bar
   552  //	}
   553  //
   554  // as
   555  //
   556  //	if selectnbsend(c, v) {
   557  //		... foo
   558  //	} else {
   559  //		... bar
   560  //	}
   561  //
   562  func selectnbsend(t *chantype, c *hchan, elem unsafe.Pointer) (selected bool) {
   563  	return chansend(t, c, elem, false, getcallerpc(unsafe.Pointer(&t)))
   564  }
   565  
   566  // compiler implements
   567  //
   568  //	select {
   569  //	case v = <-c:
   570  //		... foo
   571  //	default:
   572  //		... bar
   573  //	}
   574  //
   575  // as
   576  //
   577  //	if selectnbrecv(&v, c) {
   578  //		... foo
   579  //	} else {
   580  //		... bar
   581  //	}
   582  //
   583  func selectnbrecv(t *chantype, elem unsafe.Pointer, c *hchan) (selected bool) {
   584  	selected, _ = chanrecv(t, c, elem, false)
   585  	return
   586  }
   587  
   588  // compiler implements
   589  //
   590  //	select {
   591  //	case v, ok = <-c:
   592  //		... foo
   593  //	default:
   594  //		... bar
   595  //	}
   596  //
   597  // as
   598  //
   599  //	if c != nil && selectnbrecv2(&v, &ok, c) {
   600  //		... foo
   601  //	} else {
   602  //		... bar
   603  //	}
   604  //
   605  func selectnbrecv2(t *chantype, elem unsafe.Pointer, received *bool, c *hchan) (selected bool) {
   606  	// TODO(khr): just return 2 values from this function, now that it is in Go.
   607  	selected, *received = chanrecv(t, c, elem, false)
   608  	return
   609  }
   610  
   611  //go:linkname reflect_chansend reflect.chansend
   612  func reflect_chansend(t *chantype, c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
   613  	return chansend(t, c, elem, !nb, getcallerpc(unsafe.Pointer(&t)))
   614  }
   615  
   616  //go:linkname reflect_chanrecv reflect.chanrecv
   617  func reflect_chanrecv(t *chantype, c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
   618  	return chanrecv(t, c, elem, !nb)
   619  }
   620  
   621  //go:linkname reflect_chanlen reflect.chanlen
   622  func reflect_chanlen(c *hchan) int {
   623  	if c == nil {
   624  		return 0
   625  	}
   626  	return int(c.qcount)
   627  }
   628  
   629  //go:linkname reflect_chancap reflect.chancap
   630  func reflect_chancap(c *hchan) int {
   631  	if c == nil {
   632  		return 0
   633  	}
   634  	return int(c.dataqsiz)
   635  }
   636  
   637  //go:linkname reflect_chanclose reflect.chanclose
   638  func reflect_chanclose(c *hchan) {
   639  	closechan(c)
   640  }
   641  
   642  func (q *waitq) enqueue(sgp *sudog) {
   643  	sgp.next = nil
   644  	x := q.last
   645  	if x == nil {
   646  		sgp.prev = nil
   647  		q.first = sgp
   648  		q.last = sgp
   649  		return
   650  	}
   651  	sgp.prev = x
   652  	x.next = sgp
   653  	q.last = sgp
   654  }
   655  
   656  func (q *waitq) dequeue() *sudog {
   657  	for {
   658  		sgp := q.first
   659  		if sgp == nil {
   660  			return nil
   661  		}
   662  		y := sgp.next
   663  		if y == nil {
   664  			q.first = nil
   665  			q.last = nil
   666  		} else {
   667  			y.prev = nil
   668  			q.first = y
   669  			sgp.next = nil // mark as removed (see dequeueSudog)
   670  		}
   671  
   672  		// if sgp participates in a select and is already signaled, ignore it
   673  		if sgp.selectdone != nil {
   674  			// claim the right to signal
   675  			if *sgp.selectdone != 0 || !atomic.Cas(sgp.selectdone, 0, 1) {
   676  				continue
   677  			}
   678  		}
   679  
   680  		return sgp
   681  	}
   682  }
   683  
   684  func racesync(c *hchan, sg *sudog) {
   685  	racerelease(chanbuf(c, 0))
   686  	raceacquireg(sg.g, chanbuf(c, 0))
   687  	racereleaseg(sg.g, chanbuf(c, 0))
   688  	raceacquire(chanbuf(c, 0))
   689  }