github.com/spotify/syslog-redirector-golang@v0.0.0-20140320174030-4859f03d829a/src/pkg/runtime/netpoll.goc (about)

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build darwin dragonfly freebsd linux netbsd openbsd windows
     6  
     7  package net
     8  
     9  #include "runtime.h"
    10  #include "defs_GOOS_GOARCH.h"
    11  #include "arch_GOARCH.h"
    12  #include "malloc.h"
    13  
    14  // Integrated network poller (platform-independent part).
    15  // A particular implementation (epoll/kqueue) must define the following functions:
    16  // void runtime·netpollinit(void);			// to initialize the poller
    17  // int32 runtime·netpollopen(uintptr fd, PollDesc *pd);	// to arm edge-triggered notifications
    18  							// and associate fd with pd.
    19  // An implementation must call the following function to denote that the pd is ready.
    20  // void runtime·netpollready(G **gpp, PollDesc *pd, int32 mode);
    21  
    22  #define READY ((G*)1)
    23  
    24  struct PollDesc
    25  {
    26  	PollDesc* link;	// in pollcache, protected by pollcache.Lock
    27  	Lock;		// protectes the following fields
    28  	uintptr	fd;
    29  	bool	closing;
    30  	uintptr	seq;	// protects from stale timers and ready notifications
    31  	G*	rg;	// G waiting for read or READY (binary semaphore)
    32  	Timer	rt;	// read deadline timer (set if rt.fv != nil)
    33  	int64	rd;	// read deadline
    34  	G*	wg;	// the same for writes
    35  	Timer	wt;
    36  	int64	wd;
    37  };
    38  
    39  static struct
    40  {
    41  	Lock;
    42  	PollDesc*	first;
    43  	// PollDesc objects must be type-stable,
    44  	// because we can get ready notification from epoll/kqueue
    45  	// after the descriptor is closed/reused.
    46  	// Stale notifications are detected using seq variable,
    47  	// seq is incremented when deadlines are changed or descriptor is reused.
    48  } pollcache;
    49  
    50  static bool	netpollblock(PollDesc*, int32);
    51  static G*	netpollunblock(PollDesc*, int32, bool);
    52  static void	deadline(int64, Eface);
    53  static void	readDeadline(int64, Eface);
    54  static void	writeDeadline(int64, Eface);
    55  static PollDesc*	allocPollDesc(void);
    56  static intgo	checkerr(PollDesc *pd, int32 mode);
    57  
    58  static FuncVal deadlineFn	= {(void(*)(void))deadline};
    59  static FuncVal readDeadlineFn	= {(void(*)(void))readDeadline};
    60  static FuncVal writeDeadlineFn	= {(void(*)(void))writeDeadline};
    61  
    62  func runtime_pollServerInit() {
    63  	runtime·netpollinit();
    64  }
    65  
    66  func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
    67  	pd = allocPollDesc();
    68  	runtime·lock(pd);
    69  	if(pd->wg != nil && pd->wg != READY)
    70  		runtime·throw("runtime_pollOpen: blocked write on free descriptor");
    71  	if(pd->rg != nil && pd->rg != READY)
    72  		runtime·throw("runtime_pollOpen: blocked read on free descriptor");
    73  	pd->fd = fd;
    74  	pd->closing = false;
    75  	pd->seq++;
    76  	pd->rg = nil;
    77  	pd->rd = 0;
    78  	pd->wg = nil;
    79  	pd->wd = 0;
    80  	runtime·unlock(pd);
    81  
    82  	errno = runtime·netpollopen(fd, pd);
    83  }
    84  
    85  func runtime_pollClose(pd *PollDesc) {
    86  	if(!pd->closing)
    87  		runtime·throw("runtime_pollClose: close w/o unblock");
    88  	if(pd->wg != nil && pd->wg != READY)
    89  		runtime·throw("runtime_pollClose: blocked write on closing descriptor");
    90  	if(pd->rg != nil && pd->rg != READY)
    91  		runtime·throw("runtime_pollClose: blocked read on closing descriptor");
    92  	runtime·netpollclose(pd->fd);
    93  	runtime·lock(&pollcache);
    94  	pd->link = pollcache.first;
    95  	pollcache.first = pd;
    96  	runtime·unlock(&pollcache);
    97  }
    98  
    99  func runtime_pollReset(pd *PollDesc, mode int) (err int) {
   100  	runtime·lock(pd);
   101  	err = checkerr(pd, mode);
   102  	if(err)
   103  		goto ret;
   104  	if(mode == 'r')
   105  		pd->rg = nil;
   106  	else if(mode == 'w')
   107  		pd->wg = nil;
   108  ret:
   109  	runtime·unlock(pd);
   110  }
   111  
   112  func runtime_pollWait(pd *PollDesc, mode int) (err int) {
   113  	runtime·lock(pd);
   114  	err = checkerr(pd, mode);
   115  	if(err == 0) {
   116  		while(!netpollblock(pd, mode)) {
   117  			err = checkerr(pd, mode);
   118  			if(err != 0)
   119  				break;
   120  			// Can happen if timeout has fired and unblocked us,
   121  			// but before we had a chance to run, timeout has been reset.
   122  			// Pretend it has not happened and retry.
   123  		}
   124  	}
   125  	runtime·unlock(pd);
   126  }
   127  
   128  func runtime_pollWaitCanceled(pd *PollDesc, mode int) {
   129  	runtime·lock(pd);
   130  	// wait for ioready, ignore closing or timeouts.
   131  	while(!netpollblock(pd, mode))
   132  		;
   133  	runtime·unlock(pd);
   134  }
   135  
   136  func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
   137  	G *rg, *wg;
   138  
   139  	runtime·lock(pd);
   140  	if(pd->closing) {
   141  		runtime·unlock(pd);
   142  		return;
   143  	}
   144  	pd->seq++;  // invalidate current timers
   145  	// Reset current timers.
   146  	if(pd->rt.fv) {
   147  		runtime·deltimer(&pd->rt);
   148  		pd->rt.fv = nil;
   149  	}
   150  	if(pd->wt.fv) {
   151  		runtime·deltimer(&pd->wt);
   152  		pd->wt.fv = nil;
   153  	}
   154  	// Setup new timers.
   155  	if(d != 0 && d <= runtime·nanotime())
   156  		d = -1;
   157  	if(mode == 'r' || mode == 'r'+'w')
   158  		pd->rd = d;
   159  	if(mode == 'w' || mode == 'r'+'w')
   160  		pd->wd = d;
   161  	if(pd->rd > 0 && pd->rd == pd->wd) {
   162  		pd->rt.fv = &deadlineFn;
   163  		pd->rt.when = pd->rd;
   164  		// Copy current seq into the timer arg.
   165  		// Timer func will check the seq against current descriptor seq,
   166  		// if they differ the descriptor was reused or timers were reset.
   167  		pd->rt.arg.type = (Type*)pd->seq;
   168  		pd->rt.arg.data = pd;
   169  		runtime·addtimer(&pd->rt);
   170  	} else {
   171  		if(pd->rd > 0) {
   172  			pd->rt.fv = &readDeadlineFn;
   173  			pd->rt.when = pd->rd;
   174  			pd->rt.arg.type = (Type*)pd->seq;
   175  			pd->rt.arg.data = pd;
   176  			runtime·addtimer(&pd->rt);
   177  		}
   178  		if(pd->wd > 0) {
   179  			pd->wt.fv = &writeDeadlineFn;
   180  			pd->wt.when = pd->wd;
   181  			pd->wt.arg.type = (Type*)pd->seq;
   182  			pd->wt.arg.data = pd;
   183  			runtime·addtimer(&pd->wt);
   184  		}
   185  	}
   186  	// If we set the new deadline in the past, unblock currently pending IO if any.
   187  	rg = nil;
   188  	wg = nil;
   189  	if(pd->rd < 0)
   190  		rg = netpollunblock(pd, 'r', false);
   191  	if(pd->wd < 0)
   192  		wg = netpollunblock(pd, 'w', false);
   193  	runtime·unlock(pd);
   194  	if(rg)
   195  		runtime·ready(rg);
   196  	if(wg)
   197  		runtime·ready(wg);
   198  }
   199  
   200  func runtime_pollUnblock(pd *PollDesc) {
   201  	G *rg, *wg;
   202  
   203  	runtime·lock(pd);
   204  	if(pd->closing)
   205  		runtime·throw("runtime_pollUnblock: already closing");
   206  	pd->closing = true;
   207  	pd->seq++;
   208  	rg = netpollunblock(pd, 'r', false);
   209  	wg = netpollunblock(pd, 'w', false);
   210  	if(pd->rt.fv) {
   211  		runtime·deltimer(&pd->rt);
   212  		pd->rt.fv = nil;
   213  	}
   214  	if(pd->wt.fv) {
   215  		runtime·deltimer(&pd->wt);
   216  		pd->wt.fv = nil;
   217  	}
   218  	runtime·unlock(pd);
   219  	if(rg)
   220  		runtime·ready(rg);
   221  	if(wg)
   222  		runtime·ready(wg);
   223  }
   224  
   225  uintptr
   226  runtime·netpollfd(PollDesc *pd)
   227  {
   228  	return pd->fd;
   229  }
   230  
   231  // make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
   232  void
   233  runtime·netpollready(G **gpp, PollDesc *pd, int32 mode)
   234  {
   235  	G *rg, *wg;
   236  
   237  	rg = wg = nil;
   238  	runtime·lock(pd);
   239  	if(mode == 'r' || mode == 'r'+'w')
   240  		rg = netpollunblock(pd, 'r', true);
   241  	if(mode == 'w' || mode == 'r'+'w')
   242  		wg = netpollunblock(pd, 'w', true);
   243  	runtime·unlock(pd);
   244  	if(rg) {
   245  		rg->schedlink = *gpp;
   246  		*gpp = rg;
   247  	}
   248  	if(wg) {
   249  		wg->schedlink = *gpp;
   250  		*gpp = wg;
   251  	}
   252  }
   253  
   254  static intgo
   255  checkerr(PollDesc *pd, int32 mode)
   256  {
   257  	if(pd->closing)
   258  		return 1;  // errClosing
   259  	if((mode == 'r' && pd->rd < 0) || (mode == 'w' && pd->wd < 0))
   260  		return 2;  // errTimeout
   261  	return 0;
   262  }
   263  
   264  // returns true if IO is ready, or false if timedout or closed
   265  static bool
   266  netpollblock(PollDesc *pd, int32 mode)
   267  {
   268  	G **gpp;
   269  
   270  	gpp = &pd->rg;
   271  	if(mode == 'w')
   272  		gpp = &pd->wg;
   273  	if(*gpp == READY) {
   274  		*gpp = nil;
   275  		return true;
   276  	}
   277  	if(*gpp != nil)
   278  		runtime·throw("netpollblock: double wait");
   279  	*gpp = g;
   280  	runtime·park(runtime·unlock, &pd->Lock, "IO wait");
   281  	runtime·lock(pd);
   282  	if(g->param)
   283  		return true;
   284  	return false;
   285  }
   286  
   287  static G*
   288  netpollunblock(PollDesc *pd, int32 mode, bool ioready)
   289  {
   290  	G **gpp, *old;
   291  
   292  	gpp = &pd->rg;
   293  	if(mode == 'w')
   294  		gpp = &pd->wg;
   295  	if(*gpp == READY)
   296  		return nil;
   297  	if(*gpp == nil) {
   298  		// Only set READY for ioready. runtime_pollWait
   299  		// will check for timeout/cancel before waiting.
   300  		if(ioready)
   301  			*gpp = READY;
   302  		return nil;
   303  	}
   304  	old = *gpp;
   305  	// pass unblock reason onto blocked g
   306  	old->param = (void*)ioready;
   307  	*gpp = nil;
   308  	return old;
   309  }
   310  
   311  static void
   312  deadlineimpl(int64 now, Eface arg, bool read, bool write)
   313  {
   314  	PollDesc *pd;
   315  	uint32 seq;
   316  	G *rg, *wg;
   317  
   318  	USED(now);
   319  	pd = (PollDesc*)arg.data;
   320  	// This is the seq when the timer was set.
   321  	// If it's stale, ignore the timer event.
   322  	seq = (uintptr)arg.type;
   323  	rg = wg = nil;
   324  	runtime·lock(pd);
   325  	if(seq != pd->seq) {
   326  		// The descriptor was reused or timers were reset.
   327  		runtime·unlock(pd);
   328  		return;
   329  	}
   330  	if(read) {
   331  		if(pd->rd <= 0 || pd->rt.fv == nil)
   332  			runtime·throw("deadlineimpl: inconsistent read deadline");
   333  		pd->rd = -1;
   334  		pd->rt.fv = nil;
   335  		rg = netpollunblock(pd, 'r', false);
   336  	}
   337  	if(write) {
   338  		if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
   339  			runtime·throw("deadlineimpl: inconsistent write deadline");
   340  		pd->wd = -1;
   341  		pd->wt.fv = nil;
   342  		wg = netpollunblock(pd, 'w', false);
   343  	}
   344  	runtime·unlock(pd);
   345  	if(rg)
   346  		runtime·ready(rg);
   347  	if(wg)
   348  		runtime·ready(wg);
   349  }
   350  
   351  static void
   352  deadline(int64 now, Eface arg)
   353  {
   354  	deadlineimpl(now, arg, true, true);
   355  }
   356  
   357  static void
   358  readDeadline(int64 now, Eface arg)
   359  {
   360  	deadlineimpl(now, arg, true, false);
   361  }
   362  
   363  static void
   364  writeDeadline(int64 now, Eface arg)
   365  {
   366  	deadlineimpl(now, arg, false, true);
   367  }
   368  
   369  static PollDesc*
   370  allocPollDesc(void)
   371  {
   372  	PollDesc *pd;
   373  	uint32 i, n;
   374  
   375  	runtime·lock(&pollcache);
   376  	if(pollcache.first == nil) {
   377  		n = PageSize/sizeof(*pd);
   378  		if(n == 0)
   379  			n = 1;
   380  		// Must be in non-GC memory because can be referenced
   381  		// only from epoll/kqueue internals.
   382  		pd = runtime·persistentalloc(n*sizeof(*pd), 0, &mstats.other_sys);
   383  		for(i = 0; i < n; i++) {
   384  			pd[i].link = pollcache.first;
   385  			pollcache.first = &pd[i];
   386  		}
   387  	}
   388  	pd = pollcache.first;
   389  	pollcache.first = pd->link;
   390  	runtime·unlock(&pollcache);
   391  	return pd;
   392  }