github.com/eun/go@v0.0.0-20170811110501-92cfd07a6cfd/src/runtime/select.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go select statements.
     8  
     9  import (
    10  	"runtime/internal/sys"
    11  	"unsafe"
    12  )
    13  
    14  const debugSelect = false
    15  
    16  const (
    17  	// scase.kind
    18  	caseNil = iota
    19  	caseRecv
    20  	caseSend
    21  	caseDefault
    22  )
    23  
    24  // Select statement header.
    25  // Known to compiler.
    26  // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
    27  type hselect struct {
    28  	tcase     uint16   // total count of scase[]
    29  	ncase     uint16   // currently filled scase[]
    30  	pollorder *uint16  // case poll order
    31  	lockorder *uint16  // channel lock order
    32  	scase     [1]scase // one per case (in order of appearance)
    33  }
    34  
    35  // Select case descriptor.
    36  // Known to compiler.
    37  // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
    38  type scase struct {
    39  	elem        unsafe.Pointer // data element
    40  	c           *hchan         // chan
    41  	pc          uintptr        // return pc (for race detector / msan)
    42  	kind        uint16
    43  	receivedp   *bool // pointer to received bool, if any
    44  	releasetime int64
    45  }
    46  
    47  var (
    48  	chansendpc = funcPC(chansend)
    49  	chanrecvpc = funcPC(chanrecv)
    50  )
    51  
    52  func selectsize(size uintptr) uintptr {
    53  	selsize := unsafe.Sizeof(hselect{}) +
    54  		(size-1)*unsafe.Sizeof(hselect{}.scase[0]) +
    55  		size*unsafe.Sizeof(*hselect{}.lockorder) +
    56  		size*unsafe.Sizeof(*hselect{}.pollorder)
    57  	return round(selsize, sys.Int64Align)
    58  }
    59  
    60  func newselect(sel *hselect, selsize int64, size int32) {
    61  	if selsize != int64(selectsize(uintptr(size))) {
    62  		print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n")
    63  		throw("bad select size")
    64  	}
    65  	sel.tcase = uint16(size)
    66  	sel.ncase = 0
    67  	sel.lockorder = (*uint16)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(hselect{}.scase[0])))
    68  	sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*hselect{}.lockorder)))
    69  
    70  	if debugSelect {
    71  		print("newselect s=", sel, " size=", size, "\n")
    72  	}
    73  }
    74  
    75  func selectsend(sel *hselect, c *hchan, elem unsafe.Pointer) {
    76  	pc := getcallerpc(unsafe.Pointer(&sel))
    77  	i := sel.ncase
    78  	if i >= sel.tcase {
    79  		throw("selectsend: too many cases")
    80  	}
    81  	sel.ncase = i + 1
    82  	if c == nil {
    83  		return
    84  	}
    85  	cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
    86  	cas.pc = pc
    87  	cas.c = c
    88  	cas.kind = caseSend
    89  	cas.elem = elem
    90  
    91  	if debugSelect {
    92  		print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, "\n")
    93  	}
    94  }
    95  
    96  func selectrecv(sel *hselect, c *hchan, elem unsafe.Pointer, received *bool) {
    97  	pc := getcallerpc(unsafe.Pointer(&sel))
    98  	i := sel.ncase
    99  	if i >= sel.tcase {
   100  		throw("selectrecv: too many cases")
   101  	}
   102  	sel.ncase = i + 1
   103  	if c == nil {
   104  		return
   105  	}
   106  	cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
   107  	cas.pc = pc
   108  	cas.c = c
   109  	cas.kind = caseRecv
   110  	cas.elem = elem
   111  	cas.receivedp = received
   112  
   113  	if debugSelect {
   114  		print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, "\n")
   115  	}
   116  }
   117  
   118  func selectdefault(sel *hselect) {
   119  	pc := getcallerpc(unsafe.Pointer(&sel))
   120  	i := sel.ncase
   121  	if i >= sel.tcase {
   122  		throw("selectdefault: too many cases")
   123  	}
   124  	sel.ncase = i + 1
   125  	cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
   126  	cas.pc = pc
   127  	cas.c = nil
   128  	cas.kind = caseDefault
   129  
   130  	if debugSelect {
   131  		print("selectdefault s=", sel, " pc=", hex(cas.pc), "\n")
   132  	}
   133  }
   134  
   135  func sellock(scases []scase, lockorder []uint16) {
   136  	var c *hchan
   137  	for _, o := range lockorder {
   138  		c0 := scases[o].c
   139  		if c0 != nil && c0 != c {
   140  			c = c0
   141  			lock(&c.lock)
   142  		}
   143  	}
   144  }
   145  
   146  func selunlock(scases []scase, lockorder []uint16) {
   147  	// We must be very careful here to not touch sel after we have unlocked
   148  	// the last lock, because sel can be freed right after the last unlock.
   149  	// Consider the following situation.
   150  	// First M calls runtime·park() in runtime·selectgo() passing the sel.
   151  	// Once runtime·park() has unlocked the last lock, another M makes
   152  	// the G that calls select runnable again and schedules it for execution.
   153  	// When the G runs on another M, it locks all the locks and frees sel.
   154  	// Now if the first M touches sel, it will access freed memory.
   155  	for i := len(scases) - 1; i >= 0; i-- {
   156  		c := scases[lockorder[i]].c
   157  		if c == nil {
   158  			break
   159  		}
   160  		if i > 0 && c == scases[lockorder[i-1]].c {
   161  			continue // will unlock it on the next iteration
   162  		}
   163  		unlock(&c.lock)
   164  	}
   165  }
   166  
   167  func selparkcommit(gp *g, _ unsafe.Pointer) bool {
   168  	// This must not access gp's stack (see gopark). In
   169  	// particular, it must not access the *hselect. That's okay,
   170  	// because by the time this is called, gp.waiting has all
   171  	// channels in lock order.
   172  	var lastc *hchan
   173  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   174  		if sg.c != lastc && lastc != nil {
   175  			// As soon as we unlock the channel, fields in
   176  			// any sudog with that channel may change,
   177  			// including c and waitlink. Since multiple
   178  			// sudogs may have the same channel, we unlock
   179  			// only after we've passed the last instance
   180  			// of a channel.
   181  			unlock(&lastc.lock)
   182  		}
   183  		lastc = sg.c
   184  	}
   185  	if lastc != nil {
   186  		unlock(&lastc.lock)
   187  	}
   188  	return true
   189  }
   190  
   191  func block() {
   192  	gopark(nil, nil, "select (no cases)", traceEvGoStop, 1) // forever
   193  }
   194  
   195  // selectgo implements the select statement.
   196  //
   197  // *sel is on the current goroutine's stack (regardless of any
   198  // escaping in selectgo).
   199  //
   200  // selectgo returns the index of the chosen scase, which matches the
   201  // ordinal position of its respective select{recv,send,default} call.
   202  func selectgo(sel *hselect) int {
   203  	if debugSelect {
   204  		print("select: sel=", sel, "\n")
   205  	}
   206  	if sel.ncase != sel.tcase {
   207  		throw("selectgo: case count mismatch")
   208  	}
   209  
   210  	scaseslice := slice{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)}
   211  	scases := *(*[]scase)(unsafe.Pointer(&scaseslice))
   212  
   213  	var t0 int64
   214  	if blockprofilerate > 0 {
   215  		t0 = cputicks()
   216  		for i := 0; i < int(sel.ncase); i++ {
   217  			scases[i].releasetime = -1
   218  		}
   219  	}
   220  
   221  	// The compiler rewrites selects that statically have
   222  	// only 0 or 1 cases plus default into simpler constructs.
   223  	// The only way we can end up with such small sel.ncase
   224  	// values here is for a larger select in which most channels
   225  	// have been nilled out. The general code handles those
   226  	// cases correctly, and they are rare enough not to bother
   227  	// optimizing (and needing to test).
   228  
   229  	// generate permuted order
   230  	pollslice := slice{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)}
   231  	pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice))
   232  	for i := 1; i < int(sel.ncase); i++ {
   233  		j := fastrandn(uint32(i + 1))
   234  		pollorder[i] = pollorder[j]
   235  		pollorder[j] = uint16(i)
   236  	}
   237  
   238  	// sort the cases by Hchan address to get the locking order.
   239  	// simple heap sort, to guarantee n log n time and constant stack footprint.
   240  	lockslice := slice{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)}
   241  	lockorder := *(*[]uint16)(unsafe.Pointer(&lockslice))
   242  	for i := 0; i < int(sel.ncase); i++ {
   243  		j := i
   244  		// Start with the pollorder to permute cases on the same channel.
   245  		c := scases[pollorder[i]].c
   246  		for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() {
   247  			k := (j - 1) / 2
   248  			lockorder[j] = lockorder[k]
   249  			j = k
   250  		}
   251  		lockorder[j] = pollorder[i]
   252  	}
   253  	for i := int(sel.ncase) - 1; i >= 0; i-- {
   254  		o := lockorder[i]
   255  		c := scases[o].c
   256  		lockorder[i] = lockorder[0]
   257  		j := 0
   258  		for {
   259  			k := j*2 + 1
   260  			if k >= i {
   261  				break
   262  			}
   263  			if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() {
   264  				k++
   265  			}
   266  			if c.sortkey() < scases[lockorder[k]].c.sortkey() {
   267  				lockorder[j] = lockorder[k]
   268  				j = k
   269  				continue
   270  			}
   271  			break
   272  		}
   273  		lockorder[j] = o
   274  	}
   275  	/*
   276  		for i := 0; i+1 < int(sel.ncase); i++ {
   277  			if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
   278  				print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
   279  				throw("select: broken sort")
   280  			}
   281  		}
   282  	*/
   283  
   284  	// lock all the channels involved in the select
   285  	sellock(scases, lockorder)
   286  
   287  	var (
   288  		gp     *g
   289  		done   uint32
   290  		sg     *sudog
   291  		c      *hchan
   292  		k      *scase
   293  		sglist *sudog
   294  		sgnext *sudog
   295  		qp     unsafe.Pointer
   296  		nextp  **sudog
   297  	)
   298  
   299  loop:
   300  	// pass 1 - look for something already waiting
   301  	var dfli int
   302  	var dfl *scase
   303  	var casi int
   304  	var cas *scase
   305  	for i := 0; i < int(sel.ncase); i++ {
   306  		casi = int(pollorder[i])
   307  		cas = &scases[casi]
   308  		c = cas.c
   309  
   310  		switch cas.kind {
   311  		case caseNil:
   312  			continue
   313  
   314  		case caseRecv:
   315  			sg = c.sendq.dequeue()
   316  			if sg != nil {
   317  				goto recv
   318  			}
   319  			if c.qcount > 0 {
   320  				goto bufrecv
   321  			}
   322  			if c.closed != 0 {
   323  				goto rclose
   324  			}
   325  
   326  		case caseSend:
   327  			if raceenabled {
   328  				racereadpc(unsafe.Pointer(c), cas.pc, chansendpc)
   329  			}
   330  			if c.closed != 0 {
   331  				goto sclose
   332  			}
   333  			sg = c.recvq.dequeue()
   334  			if sg != nil {
   335  				goto send
   336  			}
   337  			if c.qcount < c.dataqsiz {
   338  				goto bufsend
   339  			}
   340  
   341  		case caseDefault:
   342  			dfli = casi
   343  			dfl = cas
   344  		}
   345  	}
   346  
   347  	if dfl != nil {
   348  		selunlock(scases, lockorder)
   349  		casi = dfli
   350  		cas = dfl
   351  		goto retc
   352  	}
   353  
   354  	// pass 2 - enqueue on all chans
   355  	gp = getg()
   356  	done = 0
   357  	if gp.waiting != nil {
   358  		throw("gp.waiting != nil")
   359  	}
   360  	nextp = &gp.waiting
   361  	for _, casei := range lockorder {
   362  		casi = int(casei)
   363  		cas = &scases[casi]
   364  		if cas.kind == caseNil {
   365  			continue
   366  		}
   367  		c = cas.c
   368  		sg := acquireSudog()
   369  		sg.g = gp
   370  		// Note: selectdone is adjusted for stack copies in stack1.go:adjustsudogs
   371  		sg.selectdone = (*uint32)(noescape(unsafe.Pointer(&done)))
   372  		// No stack splits between assigning elem and enqueuing
   373  		// sg on gp.waiting where copystack can find it.
   374  		sg.elem = cas.elem
   375  		sg.releasetime = 0
   376  		if t0 != 0 {
   377  			sg.releasetime = -1
   378  		}
   379  		sg.c = c
   380  		// Construct waiting list in lock order.
   381  		*nextp = sg
   382  		nextp = &sg.waitlink
   383  
   384  		switch cas.kind {
   385  		case caseRecv:
   386  			c.recvq.enqueue(sg)
   387  
   388  		case caseSend:
   389  			c.sendq.enqueue(sg)
   390  		}
   391  	}
   392  
   393  	// wait for someone to wake us up
   394  	gp.param = nil
   395  	gopark(selparkcommit, nil, "select", traceEvGoBlockSelect, 1)
   396  
   397  	// While we were asleep, some goroutine came along and completed
   398  	// one of the cases in the select and woke us up (called ready).
   399  	// As part of that process, the goroutine did a cas on done above
   400  	// (aka *sg.selectdone for all queued sg) to win the right to
   401  	// complete the select. Now done = 1.
   402  	//
   403  	// If we copy (grow) our own stack, we will update the
   404  	// selectdone pointers inside the gp.waiting sudog list to point
   405  	// at the new stack. Another goroutine attempting to
   406  	// complete one of our (still linked in) select cases might
   407  	// see the new selectdone pointer (pointing at the new stack)
   408  	// before the new stack has real data; if the new stack has done = 0
   409  	// (before the old values are copied over), the goroutine might
   410  	// do a cas via sg.selectdone and incorrectly believe that it has
   411  	// won the right to complete the select, executing a second
   412  	// communication and attempting to wake us (call ready) again.
   413  	//
   414  	// Then things break.
   415  	//
   416  	// The best break is that the goroutine doing ready sees the
   417  	// _Gcopystack status and throws, as in #17007.
   418  	// A worse break would be for us to continue on, start running real code,
   419  	// block in a semaphore acquisition (sema.go), and have the other
   420  	// goroutine wake us up without having really acquired the semaphore.
   421  	// That would result in the goroutine spuriously running and then
   422  	// queue up another spurious wakeup when the semaphore really is ready.
   423  	// In general the situation can cascade until something notices the
   424  	// problem and causes a crash.
   425  	//
   426  	// A stack shrink does not have this problem, because it locks
   427  	// all the channels that are involved first, blocking out the
   428  	// possibility of a cas on selectdone.
   429  	//
   430  	// A stack growth before gopark above does not have this
   431  	// problem, because we hold those channel locks (released by
   432  	// selparkcommit).
   433  	//
   434  	// A stack growth after sellock below does not have this
   435  	// problem, because again we hold those channel locks.
   436  	//
   437  	// The only problem is a stack growth during sellock.
   438  	// To keep that from happening, run sellock on the system stack.
   439  	//
   440  	// It might be that we could avoid this if copystack copied the
   441  	// stack before calling adjustsudogs. In that case,
   442  	// syncadjustsudogs would need to recopy the tiny part that
   443  	// it copies today, resulting in a little bit of extra copying.
   444  	//
   445  	// An even better fix, not for the week before a release candidate,
   446  	// would be to put space in every sudog and make selectdone
   447  	// point at (say) the space in the first sudog.
   448  
   449  	systemstack(func() {
   450  		sellock(scases, lockorder)
   451  	})
   452  
   453  	sg = (*sudog)(gp.param)
   454  	gp.param = nil
   455  
   456  	// pass 3 - dequeue from unsuccessful chans
   457  	// otherwise they stack up on quiet channels
   458  	// record the successful case, if any.
   459  	// We singly-linked up the SudoGs in lock order.
   460  	casi = -1
   461  	cas = nil
   462  	sglist = gp.waiting
   463  	// Clear all elem before unlinking from gp.waiting.
   464  	for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
   465  		sg1.selectdone = nil
   466  		sg1.elem = nil
   467  		sg1.c = nil
   468  	}
   469  	gp.waiting = nil
   470  
   471  	for _, casei := range lockorder {
   472  		k = &scases[casei]
   473  		if k.kind == caseNil {
   474  			continue
   475  		}
   476  		if sglist.releasetime > 0 {
   477  			k.releasetime = sglist.releasetime
   478  		}
   479  		if sg == sglist {
   480  			// sg has already been dequeued by the G that woke us up.
   481  			casi = int(casei)
   482  			cas = k
   483  		} else {
   484  			c = k.c
   485  			if k.kind == caseSend {
   486  				c.sendq.dequeueSudoG(sglist)
   487  			} else {
   488  				c.recvq.dequeueSudoG(sglist)
   489  			}
   490  		}
   491  		sgnext = sglist.waitlink
   492  		sglist.waitlink = nil
   493  		releaseSudog(sglist)
   494  		sglist = sgnext
   495  	}
   496  
   497  	if cas == nil {
   498  		// We can wake up with gp.param == nil (so cas == nil)
   499  		// when a channel involved in the select has been closed.
   500  		// It is easiest to loop and re-run the operation;
   501  		// we'll see that it's now closed.
   502  		// Maybe some day we can signal the close explicitly,
   503  		// but we'd have to distinguish close-on-reader from close-on-writer.
   504  		// It's easiest not to duplicate the code and just recheck above.
   505  		// We know that something closed, and things never un-close,
   506  		// so we won't block again.
   507  		goto loop
   508  	}
   509  
   510  	c = cas.c
   511  
   512  	if debugSelect {
   513  		print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n")
   514  	}
   515  
   516  	if cas.kind == caseRecv {
   517  		if cas.receivedp != nil {
   518  			*cas.receivedp = true
   519  		}
   520  	}
   521  
   522  	if raceenabled {
   523  		if cas.kind == caseRecv && cas.elem != nil {
   524  			raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
   525  		} else if cas.kind == caseSend {
   526  			raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
   527  		}
   528  	}
   529  	if msanenabled {
   530  		if cas.kind == caseRecv && cas.elem != nil {
   531  			msanwrite(cas.elem, c.elemtype.size)
   532  		} else if cas.kind == caseSend {
   533  			msanread(cas.elem, c.elemtype.size)
   534  		}
   535  	}
   536  
   537  	selunlock(scases, lockorder)
   538  	goto retc
   539  
   540  bufrecv:
   541  	// can receive from buffer
   542  	if raceenabled {
   543  		if cas.elem != nil {
   544  			raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
   545  		}
   546  		raceacquire(chanbuf(c, c.recvx))
   547  		racerelease(chanbuf(c, c.recvx))
   548  	}
   549  	if msanenabled && cas.elem != nil {
   550  		msanwrite(cas.elem, c.elemtype.size)
   551  	}
   552  	if cas.receivedp != nil {
   553  		*cas.receivedp = true
   554  	}
   555  	qp = chanbuf(c, c.recvx)
   556  	if cas.elem != nil {
   557  		typedmemmove(c.elemtype, cas.elem, qp)
   558  	}
   559  	typedmemclr(c.elemtype, qp)
   560  	c.recvx++
   561  	if c.recvx == c.dataqsiz {
   562  		c.recvx = 0
   563  	}
   564  	c.qcount--
   565  	selunlock(scases, lockorder)
   566  	goto retc
   567  
   568  bufsend:
   569  	// can send to buffer
   570  	if raceenabled {
   571  		raceacquire(chanbuf(c, c.sendx))
   572  		racerelease(chanbuf(c, c.sendx))
   573  		raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
   574  	}
   575  	if msanenabled {
   576  		msanread(cas.elem, c.elemtype.size)
   577  	}
   578  	typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem)
   579  	c.sendx++
   580  	if c.sendx == c.dataqsiz {
   581  		c.sendx = 0
   582  	}
   583  	c.qcount++
   584  	selunlock(scases, lockorder)
   585  	goto retc
   586  
   587  recv:
   588  	// can receive from sleeping sender (sg)
   589  	recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
   590  	if debugSelect {
   591  		print("syncrecv: sel=", sel, " c=", c, "\n")
   592  	}
   593  	if cas.receivedp != nil {
   594  		*cas.receivedp = true
   595  	}
   596  	goto retc
   597  
   598  rclose:
   599  	// read at end of closed channel
   600  	selunlock(scases, lockorder)
   601  	if cas.receivedp != nil {
   602  		*cas.receivedp = false
   603  	}
   604  	if cas.elem != nil {
   605  		typedmemclr(c.elemtype, cas.elem)
   606  	}
   607  	if raceenabled {
   608  		raceacquire(unsafe.Pointer(c))
   609  	}
   610  	goto retc
   611  
   612  send:
   613  	// can send to a sleeping receiver (sg)
   614  	if raceenabled {
   615  		raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
   616  	}
   617  	if msanenabled {
   618  		msanread(cas.elem, c.elemtype.size)
   619  	}
   620  	send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
   621  	if debugSelect {
   622  		print("syncsend: sel=", sel, " c=", c, "\n")
   623  	}
   624  	goto retc
   625  
   626  retc:
   627  	if cas.releasetime > 0 {
   628  		blockevent(cas.releasetime-t0, 1)
   629  	}
   630  	return casi
   631  
   632  sclose:
   633  	// send on closed channel
   634  	selunlock(scases, lockorder)
   635  	panic(plainError("send on closed channel"))
   636  }
   637  
   638  func (c *hchan) sortkey() uintptr {
   639  	// TODO(khr): if we have a moving garbage collector, we'll need to
   640  	// change this function.
   641  	return uintptr(unsafe.Pointer(c))
   642  }
   643  
   644  // A runtimeSelect is a single case passed to rselect.
   645  // This must match ../reflect/value.go:/runtimeSelect
   646  type runtimeSelect struct {
   647  	dir selectDir
   648  	typ unsafe.Pointer // channel type (not used here)
   649  	ch  *hchan         // channel
   650  	val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
   651  }
   652  
   653  // These values must match ../reflect/value.go:/SelectDir.
   654  type selectDir int
   655  
   656  const (
   657  	_             selectDir = iota
   658  	selectSend              // case Chan <- Send
   659  	selectRecv              // case <-Chan:
   660  	selectDefault           // default
   661  )
   662  
   663  //go:linkname reflect_rselect reflect.rselect
   664  func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) {
   665  	// flagNoScan is safe here, because all objects are also referenced from cases.
   666  	size := selectsize(uintptr(len(cases)))
   667  	sel := (*hselect)(mallocgc(size, nil, true))
   668  	newselect(sel, int64(size), int32(len(cases)))
   669  	r := new(bool)
   670  	for i := range cases {
   671  		rc := &cases[i]
   672  		switch rc.dir {
   673  		case selectDefault:
   674  			selectdefault(sel)
   675  		case selectSend:
   676  			selectsend(sel, rc.ch, rc.val)
   677  		case selectRecv:
   678  			selectrecv(sel, rc.ch, rc.val, r)
   679  		}
   680  	}
   681  
   682  	chosen = selectgo(sel)
   683  	recvOK = *r
   684  	return
   685  }
   686  
   687  func (q *waitq) dequeueSudoG(sgp *sudog) {
   688  	x := sgp.prev
   689  	y := sgp.next
   690  	if x != nil {
   691  		if y != nil {
   692  			// middle of queue
   693  			x.next = y
   694  			y.prev = x
   695  			sgp.next = nil
   696  			sgp.prev = nil
   697  			return
   698  		}
   699  		// end of queue
   700  		x.next = nil
   701  		q.last = x
   702  		sgp.prev = nil
   703  		return
   704  	}
   705  	if y != nil {
   706  		// start of queue
   707  		y.prev = nil
   708  		q.first = y
   709  		sgp.next = nil
   710  		return
   711  	}
   712  
   713  	// x==y==nil. Either sgp is the only element in the queue,
   714  	// or it has already been removed. Use q.first to disambiguate.
   715  	if q.first == sgp {
   716  		q.first = nil
   717  		q.last = nil
   718  	}
   719  }