github.com/brownsys/tracing-framework-go@v0.0.0-20161210174012-0542a62412fe/go/darwin_amd64/src/runtime/netpoll.go (about)

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // Integrated network poller (platform-independent part).
    15  // A particular implementation (epoll/kqueue) must define the following functions:
    16  // func netpollinit()			// to initialize the poller
    17  // func netpollopen(fd uintptr, pd *pollDesc) int32	// to arm edge-triggered notifications
    18  // and associate fd with pd.
    19  // An implementation must call the following function to denote that the pd is ready.
    20  // func netpollready(gpp **g, pd *pollDesc, mode int32)
    21  
    22  // pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
    23  // goroutines respectively. The semaphore can be in the following states:
    24  // pdReady - io readiness notification is pending;
    25  //           a goroutine consumes the notification by changing the state to nil.
    26  // pdWait - a goroutine prepares to park on the semaphore, but not yet parked;
    27  //          the goroutine commits to park by changing the state to G pointer,
    28  //          or, alternatively, concurrent io notification changes the state to READY,
    29  //          or, alternatively, concurrent timeout/close changes the state to nil.
    30  // G pointer - the goroutine is blocked on the semaphore;
    31  //             io notification or timeout/close changes the state to READY or nil respectively
    32  //             and unparks the goroutine.
    33  // nil - nothing of the above.
    34  const (
    35  	pdReady uintptr = 1
    36  	pdWait  uintptr = 2
    37  )
    38  
    39  const pollBlockSize = 4 * 1024
    40  
    41  // Network poller descriptor.
    42  type pollDesc struct {
    43  	link *pollDesc // in pollcache, protected by pollcache.lock
    44  
    45  	// The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
    46  	// This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
    47  	// pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO readiness notification)
    48  	// proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
    49  	// in a lock-free way by all operations.
    50  	// NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg),
    51  	// that will blow up when GC starts moving objects.
    52  	lock    mutex // protects the following fields
    53  	fd      uintptr
    54  	closing bool
    55  	seq     uintptr // protects from stale timers and ready notifications
    56  	rg      uintptr // pdReady, pdWait, G waiting for read or nil
    57  	rt      timer   // read deadline timer (set if rt.f != nil)
    58  	rd      int64   // read deadline
    59  	wg      uintptr // pdReady, pdWait, G waiting for write or nil
    60  	wt      timer   // write deadline timer
    61  	wd      int64   // write deadline
    62  	user    uint32  // user settable cookie
    63  }
    64  
    65  type pollCache struct {
    66  	lock  mutex
    67  	first *pollDesc
    68  	// PollDesc objects must be type-stable,
    69  	// because we can get ready notification from epoll/kqueue
    70  	// after the descriptor is closed/reused.
    71  	// Stale notifications are detected using seq variable,
    72  	// seq is incremented when deadlines are changed or descriptor is reused.
    73  }
    74  
    75  var (
    76  	netpollInited uint32
    77  	pollcache     pollCache
    78  )
    79  
    80  //go:linkname net_runtime_pollServerInit net.runtime_pollServerInit
    81  func net_runtime_pollServerInit() {
    82  	netpollinit()
    83  	atomic.Store(&netpollInited, 1)
    84  }
    85  
    86  func netpollinited() bool {
    87  	return atomic.Load(&netpollInited) != 0
    88  }
    89  
    90  //go:linkname net_runtime_pollOpen net.runtime_pollOpen
    91  func net_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
    92  	pd := pollcache.alloc()
    93  	lock(&pd.lock)
    94  	if pd.wg != 0 && pd.wg != pdReady {
    95  		throw("netpollOpen: blocked write on free descriptor")
    96  	}
    97  	if pd.rg != 0 && pd.rg != pdReady {
    98  		throw("netpollOpen: blocked read on free descriptor")
    99  	}
   100  	pd.fd = fd
   101  	pd.closing = false
   102  	pd.seq++
   103  	pd.rg = 0
   104  	pd.rd = 0
   105  	pd.wg = 0
   106  	pd.wd = 0
   107  	unlock(&pd.lock)
   108  
   109  	var errno int32
   110  	errno = netpollopen(fd, pd)
   111  	return pd, int(errno)
   112  }
   113  
   114  //go:linkname net_runtime_pollClose net.runtime_pollClose
   115  func net_runtime_pollClose(pd *pollDesc) {
   116  	if !pd.closing {
   117  		throw("netpollClose: close w/o unblock")
   118  	}
   119  	if pd.wg != 0 && pd.wg != pdReady {
   120  		throw("netpollClose: blocked write on closing descriptor")
   121  	}
   122  	if pd.rg != 0 && pd.rg != pdReady {
   123  		throw("netpollClose: blocked read on closing descriptor")
   124  	}
   125  	netpollclose(pd.fd)
   126  	pollcache.free(pd)
   127  }
   128  
   129  func (c *pollCache) free(pd *pollDesc) {
   130  	lock(&c.lock)
   131  	pd.link = c.first
   132  	c.first = pd
   133  	unlock(&c.lock)
   134  }
   135  
   136  //go:linkname net_runtime_pollReset net.runtime_pollReset
   137  func net_runtime_pollReset(pd *pollDesc, mode int) int {
   138  	err := netpollcheckerr(pd, int32(mode))
   139  	if err != 0 {
   140  		return err
   141  	}
   142  	if mode == 'r' {
   143  		pd.rg = 0
   144  	} else if mode == 'w' {
   145  		pd.wg = 0
   146  	}
   147  	return 0
   148  }
   149  
   150  //go:linkname net_runtime_pollWait net.runtime_pollWait
   151  func net_runtime_pollWait(pd *pollDesc, mode int) int {
   152  	err := netpollcheckerr(pd, int32(mode))
   153  	if err != 0 {
   154  		return err
   155  	}
   156  	// As for now only Solaris uses level-triggered IO.
   157  	if GOOS == "solaris" {
   158  		netpollarm(pd, mode)
   159  	}
   160  	for !netpollblock(pd, int32(mode), false) {
   161  		err = netpollcheckerr(pd, int32(mode))
   162  		if err != 0 {
   163  			return err
   164  		}
   165  		// Can happen if timeout has fired and unblocked us,
   166  		// but before we had a chance to run, timeout has been reset.
   167  		// Pretend it has not happened and retry.
   168  	}
   169  	return 0
   170  }
   171  
   172  //go:linkname net_runtime_pollWaitCanceled net.runtime_pollWaitCanceled
   173  func net_runtime_pollWaitCanceled(pd *pollDesc, mode int) {
   174  	// This function is used only on windows after a failed attempt to cancel
   175  	// a pending async IO operation. Wait for ioready, ignore closing or timeouts.
   176  	for !netpollblock(pd, int32(mode), true) {
   177  	}
   178  }
   179  
   180  //go:linkname net_runtime_pollSetDeadline net.runtime_pollSetDeadline
   181  func net_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
   182  	lock(&pd.lock)
   183  	if pd.closing {
   184  		unlock(&pd.lock)
   185  		return
   186  	}
   187  	pd.seq++ // invalidate current timers
   188  	// Reset current timers.
   189  	if pd.rt.f != nil {
   190  		deltimer(&pd.rt)
   191  		pd.rt.f = nil
   192  	}
   193  	if pd.wt.f != nil {
   194  		deltimer(&pd.wt)
   195  		pd.wt.f = nil
   196  	}
   197  	// Setup new timers.
   198  	if d != 0 && d <= nanotime() {
   199  		d = -1
   200  	}
   201  	if mode == 'r' || mode == 'r'+'w' {
   202  		pd.rd = d
   203  	}
   204  	if mode == 'w' || mode == 'r'+'w' {
   205  		pd.wd = d
   206  	}
   207  	if pd.rd > 0 && pd.rd == pd.wd {
   208  		pd.rt.f = netpollDeadline
   209  		pd.rt.when = pd.rd
   210  		// Copy current seq into the timer arg.
   211  		// Timer func will check the seq against current descriptor seq,
   212  		// if they differ the descriptor was reused or timers were reset.
   213  		pd.rt.arg = pd
   214  		pd.rt.seq = pd.seq
   215  		addtimer(&pd.rt)
   216  	} else {
   217  		if pd.rd > 0 {
   218  			pd.rt.f = netpollReadDeadline
   219  			pd.rt.when = pd.rd
   220  			pd.rt.arg = pd
   221  			pd.rt.seq = pd.seq
   222  			addtimer(&pd.rt)
   223  		}
   224  		if pd.wd > 0 {
   225  			pd.wt.f = netpollWriteDeadline
   226  			pd.wt.when = pd.wd
   227  			pd.wt.arg = pd
   228  			pd.wt.seq = pd.seq
   229  			addtimer(&pd.wt)
   230  		}
   231  	}
   232  	// If we set the new deadline in the past, unblock currently pending IO if any.
   233  	var rg, wg *g
   234  	atomicstorep(unsafe.Pointer(&wg), nil) // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
   235  	if pd.rd < 0 {
   236  		rg = netpollunblock(pd, 'r', false)
   237  	}
   238  	if pd.wd < 0 {
   239  		wg = netpollunblock(pd, 'w', false)
   240  	}
   241  	unlock(&pd.lock)
   242  	if rg != nil {
   243  		goready(rg, 3)
   244  	}
   245  	if wg != nil {
   246  		goready(wg, 3)
   247  	}
   248  }
   249  
   250  //go:linkname net_runtime_pollUnblock net.runtime_pollUnblock
   251  func net_runtime_pollUnblock(pd *pollDesc) {
   252  	lock(&pd.lock)
   253  	if pd.closing {
   254  		throw("netpollUnblock: already closing")
   255  	}
   256  	pd.closing = true
   257  	pd.seq++
   258  	var rg, wg *g
   259  	atomicstorep(unsafe.Pointer(&rg), nil) // full memory barrier between store to closing and read of rg/wg in netpollunblock
   260  	rg = netpollunblock(pd, 'r', false)
   261  	wg = netpollunblock(pd, 'w', false)
   262  	if pd.rt.f != nil {
   263  		deltimer(&pd.rt)
   264  		pd.rt.f = nil
   265  	}
   266  	if pd.wt.f != nil {
   267  		deltimer(&pd.wt)
   268  		pd.wt.f = nil
   269  	}
   270  	unlock(&pd.lock)
   271  	if rg != nil {
   272  		goready(rg, 3)
   273  	}
   274  	if wg != nil {
   275  		goready(wg, 3)
   276  	}
   277  }
   278  
   279  // make pd ready, newly runnable goroutines (if any) are returned in rg/wg
   280  // May run during STW, so write barriers are not allowed.
   281  //go:nowritebarrier
   282  func netpollready(gpp *guintptr, pd *pollDesc, mode int32) {
   283  	var rg, wg guintptr
   284  	if mode == 'r' || mode == 'r'+'w' {
   285  		rg.set(netpollunblock(pd, 'r', true))
   286  	}
   287  	if mode == 'w' || mode == 'r'+'w' {
   288  		wg.set(netpollunblock(pd, 'w', true))
   289  	}
   290  	if rg != 0 {
   291  		rg.ptr().schedlink = *gpp
   292  		*gpp = rg
   293  	}
   294  	if wg != 0 {
   295  		wg.ptr().schedlink = *gpp
   296  		*gpp = wg
   297  	}
   298  }
   299  
   300  func netpollcheckerr(pd *pollDesc, mode int32) int {
   301  	if pd.closing {
   302  		return 1 // errClosing
   303  	}
   304  	if (mode == 'r' && pd.rd < 0) || (mode == 'w' && pd.wd < 0) {
   305  		return 2 // errTimeout
   306  	}
   307  	return 0
   308  }
   309  
   310  func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
   311  	return atomic.Casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
   312  }
   313  
   314  // returns true if IO is ready, or false if timedout or closed
   315  // waitio - wait only for completed IO, ignore errors
   316  func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
   317  	gpp := &pd.rg
   318  	if mode == 'w' {
   319  		gpp = &pd.wg
   320  	}
   321  
   322  	// set the gpp semaphore to WAIT
   323  	for {
   324  		old := *gpp
   325  		if old == pdReady {
   326  			*gpp = 0
   327  			return true
   328  		}
   329  		if old != 0 {
   330  			throw("netpollblock: double wait")
   331  		}
   332  		if atomic.Casuintptr(gpp, 0, pdWait) {
   333  			break
   334  		}
   335  	}
   336  
   337  	// need to recheck error states after setting gpp to WAIT
   338  	// this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
   339  	// do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
   340  	if waitio || netpollcheckerr(pd, mode) == 0 {
   341  		gopark(netpollblockcommit, unsafe.Pointer(gpp), "IO wait", traceEvGoBlockNet, 5)
   342  	}
   343  	// be careful to not lose concurrent READY notification
   344  	old := atomic.Xchguintptr(gpp, 0)
   345  	if old > pdWait {
   346  		throw("netpollblock: corrupted state")
   347  	}
   348  	return old == pdReady
   349  }
   350  
   351  func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
   352  	gpp := &pd.rg
   353  	if mode == 'w' {
   354  		gpp = &pd.wg
   355  	}
   356  
   357  	for {
   358  		old := *gpp
   359  		if old == pdReady {
   360  			return nil
   361  		}
   362  		if old == 0 && !ioready {
   363  			// Only set READY for ioready. runtime_pollWait
   364  			// will check for timeout/cancel before waiting.
   365  			return nil
   366  		}
   367  		var new uintptr
   368  		if ioready {
   369  			new = pdReady
   370  		}
   371  		if atomic.Casuintptr(gpp, old, new) {
   372  			if old == pdReady || old == pdWait {
   373  				old = 0
   374  			}
   375  			return (*g)(unsafe.Pointer(old))
   376  		}
   377  	}
   378  }
   379  
   380  func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
   381  	lock(&pd.lock)
   382  	// Seq arg is seq when the timer was set.
   383  	// If it's stale, ignore the timer event.
   384  	if seq != pd.seq {
   385  		// The descriptor was reused or timers were reset.
   386  		unlock(&pd.lock)
   387  		return
   388  	}
   389  	var rg *g
   390  	if read {
   391  		if pd.rd <= 0 || pd.rt.f == nil {
   392  			throw("netpolldeadlineimpl: inconsistent read deadline")
   393  		}
   394  		pd.rd = -1
   395  		atomicstorep(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock
   396  		rg = netpollunblock(pd, 'r', false)
   397  	}
   398  	var wg *g
   399  	if write {
   400  		if pd.wd <= 0 || pd.wt.f == nil && !read {
   401  			throw("netpolldeadlineimpl: inconsistent write deadline")
   402  		}
   403  		pd.wd = -1
   404  		atomicstorep(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock
   405  		wg = netpollunblock(pd, 'w', false)
   406  	}
   407  	unlock(&pd.lock)
   408  	if rg != nil {
   409  		goready(rg, 0)
   410  	}
   411  	if wg != nil {
   412  		goready(wg, 0)
   413  	}
   414  }
   415  
   416  func netpollDeadline(arg interface{}, seq uintptr) {
   417  	netpolldeadlineimpl(arg.(*pollDesc), seq, true, true)
   418  }
   419  
   420  func netpollReadDeadline(arg interface{}, seq uintptr) {
   421  	netpolldeadlineimpl(arg.(*pollDesc), seq, true, false)
   422  }
   423  
   424  func netpollWriteDeadline(arg interface{}, seq uintptr) {
   425  	netpolldeadlineimpl(arg.(*pollDesc), seq, false, true)
   426  }
   427  
   428  func (c *pollCache) alloc() *pollDesc {
   429  	lock(&c.lock)
   430  	if c.first == nil {
   431  		const pdSize = unsafe.Sizeof(pollDesc{})
   432  		n := pollBlockSize / pdSize
   433  		if n == 0 {
   434  			n = 1
   435  		}
   436  		// Must be in non-GC memory because can be referenced
   437  		// only from epoll/kqueue internals.
   438  		mem := persistentalloc(n*pdSize, 0, &memstats.other_sys)
   439  		for i := uintptr(0); i < n; i++ {
   440  			pd := (*pollDesc)(add(mem, i*pdSize))
   441  			pd.link = c.first
   442  			c.first = pd
   443  		}
   444  	}
   445  	pd := c.first
   446  	c.first = pd.link
   447  	unlock(&c.lock)
   448  	return pd
   449  }