github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/runtime/select.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go select statements.
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  const debugSelect = false
    15  
    16  // Select case descriptor.
    17  // Known to compiler.
    18  // Changes here must also be made in src/cmd/internal/gc/select.go's scasetype.
    19  type scase struct {
    20  	c    *hchan         // chan
    21  	elem unsafe.Pointer // data element
    22  }
    23  
    24  var (
    25  	chansendpc = funcPC(chansend)
    26  	chanrecvpc = funcPC(chanrecv)
    27  )
    28  
    29  func selectsetpc(pc *uintptr) {
    30  	*pc = getcallerpc()
    31  }
    32  
    33  func sellock(scases []scase, lockorder []uint16) {
    34  	var c *hchan
    35  	for _, o := range lockorder {
    36  		c0 := scases[o].c
    37  		if c0 != c {
    38  			c = c0
    39  			lock(&c.lock)
    40  		}
    41  	}
    42  }
    43  
    44  func selunlock(scases []scase, lockorder []uint16) {
    45  	// We must be very careful here to not touch sel after we have unlocked
    46  	// the last lock, because sel can be freed right after the last unlock.
    47  	// Consider the following situation.
    48  	// First M calls runtime·park() in runtime·selectgo() passing the sel.
    49  	// Once runtime·park() has unlocked the last lock, another M makes
    50  	// the G that calls select runnable again and schedules it for execution.
    51  	// When the G runs on another M, it locks all the locks and frees sel.
    52  	// Now if the first M touches sel, it will access freed memory.
    53  	for i := len(lockorder) - 1; i >= 0; i-- {
    54  		c := scases[lockorder[i]].c
    55  		if i > 0 && c == scases[lockorder[i-1]].c {
    56  			continue // will unlock it on the next iteration
    57  		}
    58  		unlock(&c.lock)
    59  	}
    60  }
    61  
    62  func selparkcommit(gp *g, _ unsafe.Pointer) bool {
    63  	// There are unlocked sudogs that point into gp's stack. Stack
    64  	// copying must lock the channels of those sudogs.
    65  	// Set activeStackChans here instead of before we try parking
    66  	// because we could self-deadlock in stack growth on a
    67  	// channel lock.
    68  	gp.activeStackChans = true
    69  	// Mark that it's safe for stack shrinking to occur now,
    70  	// because any thread acquiring this G's stack for shrinking
    71  	// is guaranteed to observe activeStackChans after this store.
    72  	atomic.Store8(&gp.parkingOnChan, 0)
    73  	// Make sure we unlock after setting activeStackChans and
    74  	// unsetting parkingOnChan. The moment we unlock any of the
    75  	// channel locks we risk gp getting readied by a channel operation
    76  	// and so gp could continue running before everything before the
    77  	// unlock is visible (even to gp itself).
    78  
    79  	// This must not access gp's stack (see gopark). In
    80  	// particular, it must not access the *hselect. That's okay,
    81  	// because by the time this is called, gp.waiting has all
    82  	// channels in lock order.
    83  	var lastc *hchan
    84  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
    85  		if sg.c != lastc && lastc != nil {
    86  			// As soon as we unlock the channel, fields in
    87  			// any sudog with that channel may change,
    88  			// including c and waitlink. Since multiple
    89  			// sudogs may have the same channel, we unlock
    90  			// only after we've passed the last instance
    91  			// of a channel.
    92  			unlock(&lastc.lock)
    93  		}
    94  		lastc = sg.c
    95  	}
    96  	if lastc != nil {
    97  		unlock(&lastc.lock)
    98  	}
    99  	return true
   100  }
   101  
   102  func block() {
   103  	gopark(nil, nil, waitReasonSelectNoCases, traceEvGoStop, 1) // forever
   104  }
   105  
   106  // selectgo implements the select statement.
   107  //
   108  // cas0 points to an array of type [ncases]scase, and order0 points to
   109  // an array of type [2*ncases]uint16 where ncases must be <= 65536.
   110  // Both reside on the goroutine's stack (regardless of any escaping in
   111  // selectgo).
   112  //
   113  // For race detector builds, pc0 points to an array of type
   114  // [ncases]uintptr (also on the stack); for other builds, it's set to
   115  // nil.
   116  //
   117  // selectgo returns the index of the chosen scase, which matches the
   118  // ordinal position of its respective select{recv,send,default} call.
   119  // Also, if the chosen scase was a receive operation, it reports whether
   120  // a value was received.
   121  func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, block bool) (int, bool) {
   122  	if debugSelect {
   123  		print("select: cas0=", cas0, "\n")
   124  	}
   125  
   126  	///MYCODE
   127  	if BoolSelectCount {
   128  		SelectCount()
   129  	}
   130  
   131  	//Note: the return value casei doesn't represent the order of cases in original select. Need experiments
   132  
   133  	// NOTE: In order to maintain a lean stack size, the number of scases
   134  	// is capped at 65536.
   135  	cas1 := (*[1 << 16]scase)(unsafe.Pointer(cas0))
   136  	order1 := (*[1 << 17]uint16)(unsafe.Pointer(order0))
   137  
   138  	ncases := nsends + nrecvs
   139  	scases := cas1[:ncases:ncases]
   140  	pollorder := order1[:ncases:ncases]
   141  	lockorder := order1[ncases:][:ncases:ncases]
   142  	// NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler.
   143  
   144  	// Even when raceenabled is true, there might be select
   145  	// statements in packages compiled without -race (e.g.,
   146  	// ensureSigM in runtime/signal_unix.go).
   147  	var pcs []uintptr
   148  	if raceenabled && pc0 != nil {
   149  		pc1 := (*[1 << 16]uintptr)(unsafe.Pointer(pc0))
   150  		pcs = pc1[:ncases:ncases]
   151  	}
   152  	casePC := func(casi int) uintptr {
   153  		if pcs == nil {
   154  			return 0
   155  		}
   156  		return pcs[casi]
   157  	}
   158  
   159  	var t0 int64
   160  	if blockprofilerate > 0 {
   161  		t0 = cputicks()
   162  	}
   163  
   164  	// The compiler rewrites selects that statically have
   165  	// only 0 or 1 cases plus default into simpler constructs.
   166  	// The only way we can end up with such small sel.ncase
   167  	// values here is for a larger select in which most channels
   168  	// have been nilled out. The general code handles those
   169  	// cases correctly, and they are rare enough not to bother
   170  	// optimizing (and needing to test).
   171  
   172  	// generate permuted order
   173  	norder := 0
   174  	for i := range scases {
   175  		cas := &scases[i]
   176  
   177  		// Omit cases without channels from the poll and lock orders.
   178  		if cas.c == nil {
   179  			cas.elem = nil // allow GC
   180  			continue
   181  		}
   182  
   183  		j := fastrandn(uint32(norder + 1))
   184  		pollorder[norder] = pollorder[j]
   185  		pollorder[j] = uint16(i)
   186  		norder++
   187  	}
   188  	pollorder = pollorder[:norder]
   189  	lockorder = lockorder[:norder]
   190  
   191  	// sort the cases by Hchan address to get the locking order.
   192  	// simple heap sort, to guarantee n log n time and constant stack footprint.
   193  	for i := range lockorder {
   194  		j := i
   195  		// Start with the pollorder to permute cases on the same channel.
   196  		c := scases[pollorder[i]].c
   197  		for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() {
   198  			k := (j - 1) / 2
   199  			lockorder[j] = lockorder[k]
   200  			j = k
   201  		}
   202  		lockorder[j] = pollorder[i]
   203  	}
   204  	for i := len(lockorder) - 1; i >= 0; i-- {
   205  		o := lockorder[i]
   206  		c := scases[o].c
   207  		lockorder[i] = lockorder[0]
   208  		j := 0
   209  		for {
   210  			k := j*2 + 1
   211  			if k >= i {
   212  				break
   213  			}
   214  			if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() {
   215  				k++
   216  			}
   217  			if c.sortkey() < scases[lockorder[k]].c.sortkey() {
   218  				lockorder[j] = lockorder[k]
   219  				j = k
   220  				continue
   221  			}
   222  			break
   223  		}
   224  		lockorder[j] = o
   225  	}
   226  
   227  	if debugSelect {
   228  		for i := 0; i+1 < len(lockorder); i++ {
   229  			if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
   230  				print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
   231  				throw("select: broken sort")
   232  			}
   233  		}
   234  	}
   235  
   236  	// lock all the channels involved in the select
   237  	sellock(scases, lockorder)
   238  
   239  	var (
   240  		gp     *g
   241  		sg     *sudog
   242  		c      *hchan
   243  		k      *scase
   244  		sglist *sudog
   245  		sgnext *sudog
   246  		qp     unsafe.Pointer
   247  		nextp  **sudog
   248  	)
   249  
   250  	///MYCODE
   251  	var lastC *hchan
   252  	var currentGo *GoInfo
   253  	boolInvolveChNotOK := false
   254  	CS := []PrimInfo{}
   255  	vecHChan := []*hchan{}
   256  	vecPrimInfo := []PrimInfo{}
   257  
   258  	// pass 1 - look for something already waiting
   259  	var casi int
   260  	var cas *scase
   261  	var caseSuccess bool
   262  	var caseReleaseTime int64 = -1
   263  	var recvOK bool
   264  	for _, casei := range pollorder {
   265  		casi = int(casei)
   266  		cas = &scases[casi]
   267  		c = cas.c
   268  
   269  		if casi >= nsends {
   270  			sg = c.sendq.dequeue()
   271  			if sg != nil {
   272  				goto recv
   273  			}
   274  			if c.qcount > 0 {
   275  				goto bufrecv
   276  			}
   277  			if c.closed != 0 {
   278  				goto rclose
   279  			}
   280  		} else {
   281  			if raceenabled {
   282  				racereadpc(c.raceaddr(), casePC(casi), chansendpc)
   283  			}
   284  			if c.closed != 0 {
   285  				goto sclose
   286  			}
   287  			sg = c.recvq.dequeue()
   288  			if sg != nil {
   289  				goto send
   290  			}
   291  			if c.qcount < c.dataqsiz {
   292  				goto bufsend
   293  			}
   294  		}
   295  	}
   296  
   297  	if !block {
   298  		selunlock(scases, lockorder)
   299  		casi = -1
   300  		goto retc
   301  	}
   302  
   303  	///MYCODE
   304  	for _, o := range lockorder {
   305  		c0 := scases[o].c
   306  		if c0.chInfo != nil {
   307  			Monitor(c0.chInfo)
   308  		}
   309  		if okToCheck(c0) == false {
   310  			boolInvolveChNotOK = true
   311  		}
   312  		vecHChan = append(vecHChan, c0)
   313  	}
   314  	if boolInvolveChNotOK{
   315  		goto outOfOracle
   316  	}
   317  	if LastMySwitchChoice() == -1 { // If LastMySwitchChoice is not -1, then we are blocked at our fabricate select. Don't report bug here
   318  		for _, hc := range vecHChan {
   319  			vecPrimInfo = append(vecPrimInfo, hc.chInfo)
   320  		}
   321  		blockEntry := EnqueueBlockEntry(vecPrimInfo, Select)
   322  		defer DequeueBlockEntry(blockEntry)
   323  	}
   324  
   325  
   326  
   327  	if GlobalEnableOracle && LastMySwitchChoice() == -1 { // If LastMySwitchChoice is not -1, then we are blocked at our fabricate select. Don't report bug here
   328  		currentGo = CurrentGoInfo()
   329  		for _, o := range lockorder {
   330  			c0 := scases[o].c
   331  			if c0 != lastC {
   332  				lastC = c0
   333  				if lastC.chInfo.OKToCheck == false {
   334  					currentGo.WithdrawBlock(nil)
   335  					goto outOfOracle
   336  				}
   337  				AddRefGoroutine(c.chInfo, currentGo)
   338  				currentGo.SetBlockAt(lastC.chInfo, Select)
   339  				CS = append(CS, lastC.chInfo)
   340  			}
   341  		}
   342  		var checkEntry *CheckEntry
   343  		if lastC != nil {
   344  			CS := []PrimInfo{lastC.chInfo}
   345  			if BoolDelayCheck {
   346  				checkEntry = EnqueueCheckEntry(CS)
   347  			} else {
   348  				CheckBlockBug(CS)
   349  			}
   350  		}
   351  		defer currentGo.WithdrawBlock(checkEntry)
   352  	}
   353  	outOfOracle:
   354  
   355  
   356  	// pass 2 - enqueue on all chans
   357  	gp = getg()
   358  	if gp.waiting != nil {
   359  		throw("gp.waiting != nil")
   360  	}
   361  	nextp = &gp.waiting
   362  	for _, casei := range lockorder {
   363  		casi = int(casei)
   364  		cas = &scases[casi]
   365  		c = cas.c
   366  		sg := acquireSudog()
   367  		sg.g = gp
   368  		sg.isSelect = true
   369  		// No stack splits between assigning elem and enqueuing
   370  		// sg on gp.waiting where copystack can find it.
   371  		sg.elem = cas.elem
   372  		sg.releasetime = 0
   373  		if t0 != 0 {
   374  			sg.releasetime = -1
   375  		}
   376  		sg.c = c
   377  		// Construct waiting list in lock order.
   378  		*nextp = sg
   379  		nextp = &sg.waitlink
   380  
   381  		if casi < nsends {
   382  			c.sendq.enqueue(sg)
   383  		} else {
   384  			c.recvq.enqueue(sg)
   385  		}
   386  	}
   387  
   388  	// wait for someone to wake us up
   389  	gp.param = nil
   390  	// Signal to anyone trying to shrink our stack that we're about
   391  	// to park on a channel. The window between when this G's status
   392  	// changes and when we set gp.activeStackChans is not safe for
   393  	// stack shrinking.
   394  	atomic.Store8(&gp.parkingOnChan, 1)
   395  	gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1)
   396  	gp.activeStackChans = false
   397  
   398  	sellock(scases, lockorder)
   399  
   400  	gp.selectDone = 0
   401  	sg = (*sudog)(gp.param)
   402  	gp.param = nil
   403  
   404  	// pass 3 - dequeue from unsuccessful chans
   405  	// otherwise they stack up on quiet channels
   406  	// record the successful case, if any.
   407  	// We singly-linked up the SudoGs in lock order.
   408  	casi = -1
   409  	cas = nil
   410  	caseSuccess = false
   411  	sglist = gp.waiting
   412  	// Clear all elem before unlinking from gp.waiting.
   413  	for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
   414  		sg1.isSelect = false
   415  		sg1.elem = nil
   416  		sg1.c = nil
   417  	}
   418  	gp.waiting = nil
   419  
   420  	for _, casei := range lockorder {
   421  		k = &scases[casei]
   422  		if sg == sglist {
   423  			// sg has already been dequeued by the G that woke us up.
   424  			casi = int(casei)
   425  			cas = k
   426  			caseSuccess = sglist.success
   427  			if sglist.releasetime > 0 {
   428  				caseReleaseTime = sglist.releasetime
   429  			}
   430  		} else {
   431  			c = k.c
   432  			if int(casei) < nsends {
   433  				c.sendq.dequeueSudoG(sglist)
   434  			} else {
   435  				c.recvq.dequeueSudoG(sglist)
   436  			}
   437  		}
   438  		sgnext = sglist.waitlink
   439  		sglist.waitlink = nil
   440  		releaseSudog(sglist)
   441  		sglist = sgnext
   442  	}
   443  
   444  	if cas == nil {
   445  		throw("selectgo: bad wakeup")
   446  	}
   447  
   448  	c = cas.c
   449  
   450  	if debugSelect {
   451  		print("wait-return: cas0=", cas0, " c=", c, " cas=", cas, " send=", casi < nsends, "\n")
   452  	}
   453  
   454  	if casi < nsends {
   455  		if !caseSuccess {
   456  			goto sclose
   457  		}
   458  	} else {
   459  		recvOK = caseSuccess
   460  	}
   461  
   462  	if raceenabled {
   463  		if casi < nsends {
   464  			raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc)
   465  		} else if cas.elem != nil {
   466  			raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc)
   467  		}
   468  	}
   469  	if msanenabled {
   470  		if casi < nsends {
   471  			msanread(cas.elem, c.elemtype.size)
   472  		} else if cas.elem != nil {
   473  			msanwrite(cas.elem, c.elemtype.size)
   474  		}
   475  	}
   476  
   477  	selunlock(scases, lockorder)
   478  	goto retc
   479  
   480  bufrecv:
   481  	// can receive from buffer
   482  	if raceenabled {
   483  		if cas.elem != nil {
   484  			raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc)
   485  		}
   486  		racenotify(c, c.recvx, nil)
   487  	}
   488  	if msanenabled && cas.elem != nil {
   489  		msanwrite(cas.elem, c.elemtype.size)
   490  	}
   491  	recvOK = true
   492  	qp = chanbuf(c, c.recvx)
   493  	if cas.elem != nil {
   494  		typedmemmove(c.elemtype, cas.elem, qp)
   495  	}
   496  	typedmemclr(c.elemtype, qp)
   497  	c.recvx++
   498  	if c.recvx == c.dataqsiz {
   499  		c.recvx = 0
   500  	}
   501  	c.qcount--
   502  	selunlock(scases, lockorder)
   503  	goto retc
   504  
   505  bufsend:
   506  	// can send to buffer
   507  	if raceenabled {
   508  		racenotify(c, c.sendx, nil)
   509  		raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc)
   510  	}
   511  	if msanenabled {
   512  		msanread(cas.elem, c.elemtype.size)
   513  	}
   514  	typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem)
   515  	c.sendx++
   516  	if c.sendx == c.dataqsiz {
   517  		c.sendx = 0
   518  	}
   519  	c.qcount++
   520  	selunlock(scases, lockorder)
   521  	goto retc
   522  
   523  recv:
   524  	// can receive from sleeping sender (sg)
   525  	recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
   526  	if debugSelect {
   527  		print("syncrecv: cas0=", cas0, " c=", c, "\n")
   528  	}
   529  	recvOK = true
   530  	goto retc
   531  
   532  rclose:
   533  	// read at end of closed channel
   534  	selunlock(scases, lockorder)
   535  	recvOK = false
   536  	if cas.elem != nil {
   537  		typedmemclr(c.elemtype, cas.elem)
   538  	}
   539  	if raceenabled {
   540  		raceacquire(c.raceaddr())
   541  	}
   542  	goto retc
   543  
   544  send:
   545  	// can send to a sleeping receiver (sg)
   546  	if raceenabled {
   547  		raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc)
   548  	}
   549  	if msanenabled {
   550  		msanread(cas.elem, c.elemtype.size)
   551  	}
   552  	send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
   553  	if debugSelect {
   554  		print("syncsend: cas0=", cas0, " c=", c, "\n")
   555  	}
   556  	goto retc
   557  
   558  retc:
   559  	///MYCODE
   560  	if RecordSelectChoice {
   561  		StoreSelectInput(ncases, casi)
   562  	}
   563  
   564  	if caseReleaseTime > 0 {
   565  		blockevent(caseReleaseTime-t0, 1)
   566  	}
   567  	return casi, recvOK
   568  
   569  sclose:
   570  	// send on closed channel
   571  	selunlock(scases, lockorder)
   572  
   573  	///MYCODE
   574  	ReportNonBlockingBug()
   575  
   576  	panic(plainError("send on closed channel"))
   577  }
   578  
   579  func (c *hchan) sortkey() uintptr {
   580  	return uintptr(unsafe.Pointer(c))
   581  }
   582  
   583  // A runtimeSelect is a single case passed to rselect.
   584  // This must match ../reflect/value.go:/runtimeSelect
   585  type runtimeSelect struct {
   586  	dir selectDir
   587  	typ unsafe.Pointer // channel type (not used here)
   588  	ch  *hchan         // channel
   589  	val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
   590  }
   591  
   592  // These values must match ../reflect/value.go:/SelectDir.
   593  type selectDir int
   594  
   595  const (
   596  	_             selectDir = iota
   597  	selectSend              // case Chan <- Send
   598  	selectRecv              // case <-Chan:
   599  	selectDefault           // default
   600  )
   601  
   602  //go:linkname reflect_rselect reflect.rselect
   603  func reflect_rselect(cases []runtimeSelect) (int, bool) {
   604  	if len(cases) == 0 {
   605  		block()
   606  	}
   607  	sel := make([]scase, len(cases))
   608  	orig := make([]int, len(cases))
   609  	nsends, nrecvs := 0, 0
   610  	dflt := -1
   611  	for i, rc := range cases {
   612  		var j int
   613  		switch rc.dir {
   614  		case selectDefault:
   615  			dflt = i
   616  			continue
   617  		case selectSend:
   618  			j = nsends
   619  			nsends++
   620  		case selectRecv:
   621  			nrecvs++
   622  			j = len(cases) - nrecvs
   623  		}
   624  
   625  		sel[j] = scase{c: rc.ch, elem: rc.val}
   626  		orig[j] = i
   627  	}
   628  
   629  	// Only a default case.
   630  	if nsends+nrecvs == 0 {
   631  		return dflt, false
   632  	}
   633  
   634  	// Compact sel and orig if necessary.
   635  	if nsends+nrecvs < len(cases) {
   636  		copy(sel[nsends:], sel[len(cases)-nrecvs:])
   637  		copy(orig[nsends:], orig[len(cases)-nrecvs:])
   638  	}
   639  
   640  	order := make([]uint16, 2*(nsends+nrecvs))
   641  	var pc0 *uintptr
   642  	if raceenabled {
   643  		pcs := make([]uintptr, nsends+nrecvs)
   644  		for i := range pcs {
   645  			selectsetpc(&pcs[i])
   646  		}
   647  		pc0 = &pcs[0]
   648  	}
   649  
   650  	chosen, recvOK := selectgo(&sel[0], &order[0], pc0, nsends, nrecvs, dflt == -1)
   651  
   652  	// Translate chosen back to caller's ordering.
   653  	if chosen < 0 {
   654  		chosen = dflt
   655  	} else {
   656  		chosen = orig[chosen]
   657  	}
   658  	return chosen, recvOK
   659  }
   660  
   661  func (q *waitq) dequeueSudoG(sgp *sudog) {
   662  	x := sgp.prev
   663  	y := sgp.next
   664  	if x != nil {
   665  		if y != nil {
   666  			// middle of queue
   667  			x.next = y
   668  			y.prev = x
   669  			sgp.next = nil
   670  			sgp.prev = nil
   671  			return
   672  		}
   673  		// end of queue
   674  		x.next = nil
   675  		q.last = x
   676  		sgp.prev = nil
   677  		return
   678  	}
   679  	if y != nil {
   680  		// start of queue
   681  		y.prev = nil
   682  		q.first = y
   683  		sgp.next = nil
   684  		return
   685  	}
   686  
   687  	// x==y==nil. Either sgp is the only element in the queue,
   688  	// or it has already been removed. Use q.first to disambiguate.
   689  	if q.first == sgp {
   690  		q.first = nil
   691  		q.last = nil
   692  	}
   693  }