github.com/x04/go/src@v0.0.0-20200202162449-3d481ceb3525/runtime/netpoll_solaris.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "github.com/x04/go/src/unsafe"
     8  
     9  // Solaris runtime-integrated network poller.
    10  //
    11  // Solaris uses event ports for scalable network I/O. Event
    12  // ports are level-triggered, unlike epoll and kqueue which
    13  // can be configured in both level-triggered and edge-triggered
    14  // mode. Level triggering means we have to keep track of a few things
    15  // ourselves. After we receive an event for a file descriptor,
    16  // it's our responsibility to ask again to be notified for future
    17  // events for that descriptor. When doing this we must keep track of
    18  // what kind of events the goroutines are currently interested in,
    19  // for example a fd may be open both for reading and writing.
    20  //
    21  // A description of the high level operation of this code
    22  // follows. Networking code will get a file descriptor by some means
    23  // and will register it with the netpolling mechanism by a code path
    24  // that eventually calls runtime·netpollopen. runtime·netpollopen
    25  // calls port_associate with an empty event set. That means that we
    26  // will not receive any events at this point. The association needs
    27  // to be done at this early point because we need to process the I/O
    28  // readiness notification at some point in the future. If I/O becomes
    29  // ready when nobody is listening, when we finally care about it,
    30  // nobody will tell us anymore.
    31  //
    32  // Beside calling runtime·netpollopen, the networking code paths
    33  // will call runtime·netpollarm each time goroutines are interested
    34  // in doing network I/O. Because now we know what kind of I/O we
    35  // are interested in (reading/writing), we can call port_associate
    36  // passing the correct type of event set (POLLIN/POLLOUT). As we made
    37  // sure to have already associated the file descriptor with the port,
    38  // when we now call port_associate, we will unblock the main poller
    39  // loop (in runtime·netpoll) right away if the socket is actually
    40  // ready for I/O.
    41  //
    42  // The main poller loop runs in its own thread waiting for events
    43  // using port_getn. When an event happens, it will tell the scheduler
    44  // about it using runtime·netpollready. Besides doing this, it must
    45  // also re-associate the events that were not part of this current
    46  // notification with the file descriptor. Failing to do this would
    47  // mean each notification will prevent concurrent code using the
    48  // same file descriptor in parallel.
    49  //
    50  // The logic dealing with re-associations is encapsulated in
    51  // runtime·netpollupdate. This function takes care to associate the
    52  // descriptor only with the subset of events that were previously
    53  // part of the association, except the one that just happened. We
    54  // can't re-associate with that right away, because event ports
    55  // are level triggered so it would cause a busy loop. Instead, that
    56  // association is effected only by the runtime·netpollarm code path,
    57  // when Go code actually asks for I/O.
    58  //
    59  // The open and arming mechanisms are serialized using the lock
    60  // inside PollDesc. This is required because the netpoll loop runs
    61  // asynchronously in respect to other Go code and by the time we get
    62  // to call port_associate to update the association in the loop, the
    63  // file descriptor might have been closed and reopened already. The
    64  // lock allows runtime·netpollupdate to be called synchronously from
    65  // the loop thread while preventing other threads operating to the
    66  // same PollDesc, so once we unblock in the main loop, until we loop
    67  // again we know for sure we are always talking about the same file
    68  // descriptor and can safely access the data we want (the event set).
    69  
    70  //go:cgo_import_dynamic libc_port_create port_create "libc.so"
    71  //go:cgo_import_dynamic libc_port_associate port_associate "libc.so"
    72  //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so"
    73  //go:cgo_import_dynamic libc_port_getn port_getn "libc.so"
    74  //go:cgo_import_dynamic libc_port_alert port_alert "libc.so"
    75  
    76  //go:linkname libc_port_create libc_port_create
    77  //go:linkname libc_port_associate libc_port_associate
    78  //go:linkname libc_port_dissociate libc_port_dissociate
    79  //go:linkname libc_port_getn libc_port_getn
    80  //go:linkname libc_port_alert libc_port_alert
    81  
    82  var (
    83  	libc_port_create,
    84  	libc_port_associate,
    85  	libc_port_dissociate,
    86  	libc_port_getn,
    87  	libc_port_alert libcFunc
    88  )
    89  
    90  func errno() int32 {
    91  	return *getg().m.perrno
    92  }
    93  
    94  func fcntl(fd, cmd, arg int32) int32 {
    95  	return int32(sysvicall3(&libc_fcntl, uintptr(fd), uintptr(cmd), uintptr(arg)))
    96  }
    97  
    98  func port_create() int32 {
    99  	return int32(sysvicall0(&libc_port_create))
   100  }
   101  
   102  func port_associate(port, source int32, object uintptr, events uint32, user uintptr) int32 {
   103  	return int32(sysvicall5(&libc_port_associate, uintptr(port), uintptr(source), object, uintptr(events), user))
   104  }
   105  
   106  func port_dissociate(port, source int32, object uintptr) int32 {
   107  	return int32(sysvicall3(&libc_port_dissociate, uintptr(port), uintptr(source), object))
   108  }
   109  
   110  func port_getn(port int32, evs *portevent, max uint32, nget *uint32, timeout *timespec) int32 {
   111  	return int32(sysvicall5(&libc_port_getn, uintptr(port), uintptr(unsafe.Pointer(evs)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout))))
   112  }
   113  
   114  func port_alert(port int32, flags, events uint32, user uintptr) int32 {
   115  	return int32(sysvicall4(&libc_port_alert, uintptr(port), uintptr(flags), uintptr(events), user))
   116  }
   117  
   118  var portfd int32 = -1
   119  
   120  func netpollinit() {
   121  	portfd = port_create()
   122  	if portfd >= 0 {
   123  		fcntl(portfd, _F_SETFD, _FD_CLOEXEC)
   124  		return
   125  	}
   126  
   127  	print("runtime: port_create failed (errno=", errno(), ")\n")
   128  	throw("runtime: netpollinit failed")
   129  }
   130  
   131  func netpollIsPollDescriptor(fd uintptr) bool {
   132  	return fd == uintptr(portfd)
   133  }
   134  
   135  func netpollopen(fd uintptr, pd *pollDesc) int32 {
   136  	lock(&pd.lock)
   137  	// We don't register for any specific type of events yet, that's
   138  	// netpollarm's job. We merely ensure we call port_associate before
   139  	// asynchronous connect/accept completes, so when we actually want
   140  	// to do any I/O, the call to port_associate (from netpollarm,
   141  	// with the interested event set) will unblock port_getn right away
   142  	// because of the I/O readiness notification.
   143  	pd.user = 0
   144  	r := port_associate(portfd, _PORT_SOURCE_FD, fd, 0, uintptr(unsafe.Pointer(pd)))
   145  	unlock(&pd.lock)
   146  	return r
   147  }
   148  
   149  func netpollclose(fd uintptr) int32 {
   150  	return port_dissociate(portfd, _PORT_SOURCE_FD, fd)
   151  }
   152  
   153  // Updates the association with a new set of interested events. After
   154  // this call, port_getn will return one and only one event for that
   155  // particular descriptor, so this function needs to be called again.
   156  func netpollupdate(pd *pollDesc, set, clear uint32) {
   157  	if pd.closing {
   158  		return
   159  	}
   160  
   161  	old := pd.user
   162  	events := (old & ^clear) | set
   163  	if old == events {
   164  		return
   165  	}
   166  
   167  	if events != 0 && port_associate(portfd, _PORT_SOURCE_FD, pd.fd, events, uintptr(unsafe.Pointer(pd))) != 0 {
   168  		print("runtime: port_associate failed (errno=", errno(), ")\n")
   169  		throw("runtime: netpollupdate failed")
   170  	}
   171  	pd.user = events
   172  }
   173  
   174  // subscribe the fd to the port such that port_getn will return one event.
   175  func netpollarm(pd *pollDesc, mode int) {
   176  	lock(&pd.lock)
   177  	switch mode {
   178  	case 'r':
   179  		netpollupdate(pd, _POLLIN, 0)
   180  	case 'w':
   181  		netpollupdate(pd, _POLLOUT, 0)
   182  	default:
   183  		throw("runtime: bad mode")
   184  	}
   185  	unlock(&pd.lock)
   186  }
   187  
   188  // netpollBreak interrupts a port_getn wait.
   189  func netpollBreak() {
   190  	// Use port_alert to put portfd into alert mode.
   191  	// This will wake up all threads sleeping in port_getn on portfd,
   192  	// and cause their calls to port_getn to return immediately.
   193  	// Further, until portfd is taken out of alert mode,
   194  	// all calls to port_getn will return immediately.
   195  	if port_alert(portfd, _PORT_ALERT_UPDATE, _POLLHUP, uintptr(unsafe.Pointer(&portfd))) < 0 {
   196  		if e := errno(); e != _EBUSY {
   197  			println("runtime: port_alert failed with", e)
   198  			throw("runtime: netpoll: port_alert failed")
   199  		}
   200  	}
   201  }
   202  
   203  // netpoll checks for ready network connections.
   204  // Returns list of goroutines that become runnable.
   205  // delay < 0: blocks indefinitely
   206  // delay == 0: does not block, just polls
   207  // delay > 0: block for up to that many nanoseconds
   208  func netpoll(delay int64) gList {
   209  	if portfd == -1 {
   210  		return gList{}
   211  	}
   212  
   213  	var wait *timespec
   214  	var ts timespec
   215  	if delay < 0 {
   216  		wait = nil
   217  	} else if delay == 0 {
   218  		wait = &ts
   219  	} else {
   220  		ts.setNsec(delay)
   221  		if ts.tv_sec > 1e6 {
   222  			// An arbitrary cap on how long to wait for a timer.
   223  			// 1e6 s == ~11.5 days.
   224  			ts.tv_sec = 1e6
   225  		}
   226  		wait = &ts
   227  	}
   228  
   229  	var events [128]portevent
   230  retry:
   231  	var n uint32 = 1
   232  	r := port_getn(portfd, &events[0], uint32(len(events)), &n, wait)
   233  	e := errno()
   234  	if r < 0 && e == _ETIME && n > 0 {
   235  		// As per port_getn(3C), an ETIME failure does not preclude the
   236  		// delivery of some number of events.  Treat a timeout failure
   237  		// with delivered events as a success.
   238  		r = 0
   239  	}
   240  	if r < 0 {
   241  		if e != _EINTR && e != _ETIME {
   242  			print("runtime: port_getn on fd ", portfd, " failed (errno=", e, ")\n")
   243  			throw("runtime: netpoll failed")
   244  		}
   245  		// If a timed sleep was interrupted and there are no events,
   246  		// just return to recalculate how long we should sleep now.
   247  		if delay > 0 {
   248  			return gList{}
   249  		}
   250  		goto retry
   251  	}
   252  
   253  	var toRun gList
   254  	for i := 0; i < int(n); i++ {
   255  		ev := &events[i]
   256  
   257  		if ev.portev_source == _PORT_SOURCE_ALERT {
   258  			if ev.portev_events != _POLLHUP || unsafe.Pointer(ev.portev_user) != unsafe.Pointer(&portfd) {
   259  				throw("runtime: netpoll: bad port_alert wakeup")
   260  			}
   261  			if delay != 0 {
   262  				// Now that a blocking call to netpoll
   263  				// has seen the alert, take portfd
   264  				// back out of alert mode.
   265  				// See the comment in netpollBreak.
   266  				if port_alert(portfd, 0, 0, 0) < 0 {
   267  					e := errno()
   268  					println("runtime: port_alert failed with", e)
   269  					throw("runtime: netpoll: port_alert failed")
   270  				}
   271  			}
   272  			continue
   273  		}
   274  
   275  		if ev.portev_events == 0 {
   276  			continue
   277  		}
   278  		pd := (*pollDesc)(unsafe.Pointer(ev.portev_user))
   279  
   280  		var mode, clear int32
   281  		if (ev.portev_events & (_POLLIN | _POLLHUP | _POLLERR)) != 0 {
   282  			mode += 'r'
   283  			clear |= _POLLIN
   284  		}
   285  		if (ev.portev_events & (_POLLOUT | _POLLHUP | _POLLERR)) != 0 {
   286  			mode += 'w'
   287  			clear |= _POLLOUT
   288  		}
   289  		// To effect edge-triggered events, we need to be sure to
   290  		// update our association with whatever events were not
   291  		// set with the event. For example if we are registered
   292  		// for POLLIN|POLLOUT, and we get POLLIN, besides waking
   293  		// the goroutine interested in POLLIN we have to not forget
   294  		// about the one interested in POLLOUT.
   295  		if clear != 0 {
   296  			lock(&pd.lock)
   297  			netpollupdate(pd, 0, uint32(clear))
   298  			unlock(&pd.lock)
   299  		}
   300  
   301  		if mode != 0 {
   302  			// TODO(mikio): Consider implementing event
   303  			// scanning error reporting once we are sure
   304  			// about the event port on SmartOS.
   305  			//
   306  			// See golang.org/x/issue/30840.
   307  			netpollready(&toRun, pd, mode)
   308  		}
   309  	}
   310  
   311  	return toRun
   312  }