github.com/xushiwei/go@v0.0.0-20130601165731-2b9d83f45bc9/src/pkg/runtime/netpoll.goc (about)

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build darwin linux
     6  
     7  package net
     8  
     9  #include "runtime.h"
    10  #include "defs_GOOS_GOARCH.h"
    11  #include "arch_GOARCH.h"
    12  #include "malloc.h"
    13  
    14  // Integrated network poller (platform-independent part).
    15  // A particular implementation (epoll/kqueue) must define the following functions:
    16  // void runtime·netpollinit(void);			// to initialize the poller
    17  // int32 runtime·netpollopen(int32 fd, PollDesc *pd);	// to arm edge-triggered notifications
    18  							// and associate fd with pd.
    19  // An implementation must call the following function to denote that the pd is ready.
    20  // void runtime·netpollready(G **gpp, PollDesc *pd, int32 mode);
    21  
    22  #define READY ((G*)1)
    23  
    24  struct PollDesc
    25  {
    26  	PollDesc* link;	// in pollcache, protected by pollcache.Lock
    27  	Lock;		// protectes the following fields
    28  	int32	fd;
    29  	bool	closing;
    30  	uintptr	seq;	// protects from stale timers and ready notifications
    31  	G*	rg;	// G waiting for read or READY (binary semaphore)
    32  	Timer	rt;	// read deadline timer (set if rt.fv != nil)
    33  	int64	rd;	// read deadline
    34  	G*	wg;	// the same for writes
    35  	Timer	wt;
    36  	int64	wd;
    37  };
    38  
    39  static struct
    40  {
    41  	Lock;
    42  	PollDesc*	first;
    43  	// PollDesc objects must be type-stable,
    44  	// because we can get ready notification from epoll/kqueue
    45  	// after the descriptor is closed/reused.
    46  	// Stale notifications are detected using seq variable,
    47  	// seq is incremented when deadlines are changed or descriptor is reused.
    48  } pollcache;
    49  
    50  static void	netpollblock(PollDesc*, int32);
    51  static G*	netpollunblock(PollDesc*, int32);
    52  static void	deadline(int64, Eface);
    53  static void	readDeadline(int64, Eface);
    54  static void	writeDeadline(int64, Eface);
    55  static PollDesc*	allocPollDesc(void);
    56  static intgo	checkerr(PollDesc *pd, int32 mode);
    57  
    58  static FuncVal deadlineFn	= {(void(*)(void))deadline};
    59  static FuncVal readDeadlineFn	= {(void(*)(void))readDeadline};
    60  static FuncVal writeDeadlineFn	= {(void(*)(void))writeDeadline};
    61  
    62  func runtime_pollServerInit() {
    63  	runtime·netpollinit();
    64  }
    65  
    66  func runtime_pollOpen(fd int) (pd *PollDesc, errno int) {
    67  	pd = allocPollDesc();
    68  	runtime·lock(pd);
    69  	if(pd->wg != nil && pd->wg != READY)
    70  		runtime·throw("runtime_pollOpen: blocked write on free descriptor");
    71  	if(pd->rg != nil && pd->rg != READY)
    72  		runtime·throw("runtime_pollOpen: blocked read on free descriptor");
    73  	pd->fd = fd;
    74  	pd->closing = false;
    75  	pd->seq++;
    76  	pd->rg = nil;
    77  	pd->rd = 0;
    78  	pd->wg = nil;
    79  	pd->wd = 0;
    80  	runtime·unlock(pd);
    81  
    82  	errno = runtime·netpollopen(fd, pd);
    83  }
    84  
    85  func runtime_pollClose(pd *PollDesc) {
    86  	if(!pd->closing)
    87  		runtime·throw("runtime_pollClose: close w/o unblock");
    88  	if(pd->wg != nil && pd->wg != READY)
    89  		runtime·throw("runtime_pollClose: blocked write on closing descriptor");
    90  	if(pd->rg != nil && pd->rg != READY)
    91  		runtime·throw("runtime_pollClose: blocked read on closing descriptor");
    92  	runtime·netpollclose(pd->fd);
    93  	runtime·lock(&pollcache);
    94  	pd->link = pollcache.first;
    95  	pollcache.first = pd;
    96  	runtime·unlock(&pollcache);
    97  }
    98  
    99  func runtime_pollReset(pd *PollDesc, mode int) (err int) {
   100  	runtime·lock(pd);
   101  	err = checkerr(pd, mode);
   102  	if(err)
   103  		goto ret;
   104  	if(mode == 'r')
   105  		pd->rg = nil;
   106  	else if(mode == 'w')
   107  		pd->wg = nil;
   108  ret:
   109  	runtime·unlock(pd);
   110  }
   111  
   112  func runtime_pollWait(pd *PollDesc, mode int) (err int) {
   113  	runtime·lock(pd);
   114  	err = checkerr(pd, mode);
   115  	if(err)
   116  		goto ret;
   117  	netpollblock(pd, mode);
   118  	err = checkerr(pd, mode);
   119  ret:
   120  	runtime·unlock(pd);
   121  }
   122  
   123  func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
   124  	runtime·lock(pd);
   125  	if(pd->closing)
   126  		goto ret;
   127  	pd->seq++;  // invalidate current timers
   128  	// Reset current timers.
   129  	if(pd->rt.fv) {
   130  		runtime·deltimer(&pd->rt);
   131  		pd->rt.fv = nil;
   132  	}
   133  	if(pd->wt.fv) {
   134  		runtime·deltimer(&pd->wt);
   135  		pd->wt.fv = nil;
   136  	}
   137  	// Setup new timers.
   138  	if(d != 0 && d <= runtime·nanotime()) {
   139  		d = -1;
   140  	}
   141  	if(mode == 'r' || mode == 'r'+'w')
   142  		pd->rd = d;
   143  	if(mode == 'w' || mode == 'r'+'w')
   144  		pd->wd = d;
   145  	if(pd->rd > 0 && pd->rd == pd->wd) {
   146  		pd->rt.fv = &deadlineFn;
   147  		pd->rt.when = pd->rd;
   148  		// Copy current seq into the timer arg.
   149  		// Timer func will check the seq against current descriptor seq,
   150  		// if they differ the descriptor was reused or timers were reset.
   151  		pd->rt.arg.type = (Type*)pd->seq;
   152  		pd->rt.arg.data = pd;
   153  		runtime·addtimer(&pd->rt);
   154  	} else {
   155  		if(pd->rd > 0) {
   156  			pd->rt.fv = &readDeadlineFn;
   157  			pd->rt.when = pd->rd;
   158  			pd->rt.arg.type = (Type*)pd->seq;
   159  			pd->rt.arg.data = pd;
   160  			runtime·addtimer(&pd->rt);
   161  		}
   162  		if(pd->wd > 0) {
   163  			pd->wt.fv = &writeDeadlineFn;
   164  			pd->wt.when = pd->wd;
   165  			pd->wt.arg.type = (Type*)pd->seq;
   166  			pd->wt.arg.data = pd;
   167  			runtime·addtimer(&pd->wt);
   168  		}
   169  	}
   170  ret:
   171  	runtime·unlock(pd);
   172  }
   173  
   174  func runtime_pollUnblock(pd *PollDesc) {
   175  	G *rg, *wg;
   176  
   177  	runtime·lock(pd);
   178  	if(pd->closing)
   179  		runtime·throw("runtime_pollUnblock: already closing");
   180  	pd->closing = true;
   181  	pd->seq++;
   182  	rg = netpollunblock(pd, 'r');
   183  	wg = netpollunblock(pd, 'w');
   184  	if(pd->rt.fv) {
   185  		runtime·deltimer(&pd->rt);
   186  		pd->rt.fv = nil;
   187  	}
   188  	if(pd->wt.fv) {
   189  		runtime·deltimer(&pd->wt);
   190  		pd->wt.fv = nil;
   191  	}
   192  	runtime·unlock(pd);
   193  	if(rg)
   194  		runtime·ready(rg);
   195  	if(wg)
   196  		runtime·ready(wg);
   197  }
   198  
   199  // make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
   200  void
   201  runtime·netpollready(G **gpp, PollDesc *pd, int32 mode)
   202  {
   203  	G *rg, *wg;
   204  
   205  	rg = wg = nil;
   206  	runtime·lock(pd);
   207  	if(mode == 'r' || mode == 'r'+'w')
   208  		rg = netpollunblock(pd, 'r');
   209  	if(mode == 'w' || mode == 'r'+'w')
   210  		wg = netpollunblock(pd, 'w');
   211  	runtime·unlock(pd);
   212  	if(rg) {
   213  		rg->schedlink = *gpp;
   214  		*gpp = rg;
   215  	}
   216  	if(wg) {
   217  		wg->schedlink = *gpp;
   218  		*gpp = wg;
   219  	}
   220  }
   221  
   222  static intgo
   223  checkerr(PollDesc *pd, int32 mode)
   224  {
   225  	if(pd->closing)
   226  		return 1;  // errClosing
   227  	if((mode == 'r' && pd->rd < 0) || (mode == 'w' && pd->wd < 0))
   228  		return 2;  // errTimeout
   229  	return 0;
   230  }
   231  
   232  static void
   233  netpollblock(PollDesc *pd, int32 mode)
   234  {
   235  	G **gpp;
   236  
   237  	gpp = &pd->rg;
   238  	if(mode == 'w')
   239  		gpp = &pd->wg;
   240  	if(*gpp == READY) {
   241  		*gpp = nil;
   242  		return;
   243  	}
   244  	if(*gpp != nil)
   245  		runtime·throw("epoll: double wait");
   246  	*gpp = g;
   247  	runtime·park(runtime·unlock, &pd->Lock, "IO wait");
   248  	runtime·lock(pd);
   249  }
   250  
   251  static G*
   252  netpollunblock(PollDesc *pd, int32 mode)
   253  {
   254  	G **gpp, *old;
   255  
   256  	gpp = &pd->rg;
   257  	if(mode == 'w')
   258  		gpp = &pd->wg;
   259  	if(*gpp == READY)
   260  		return nil;
   261  	if(*gpp == nil) {
   262  		*gpp = READY;
   263  		return nil;
   264  	}
   265  	old = *gpp;
   266  	*gpp = nil;
   267  	return old;
   268  }
   269  
   270  static void
   271  deadlineimpl(int64 now, Eface arg, bool read, bool write)
   272  {
   273  	PollDesc *pd;
   274  	uint32 seq;
   275  	G *rg, *wg;
   276  
   277  	USED(now);
   278  	pd = (PollDesc*)arg.data;
   279  	// This is the seq when the timer was set.
   280  	// If it's stale, ignore the timer event.
   281  	seq = (uintptr)arg.type;
   282  	rg = wg = nil;
   283  	runtime·lock(pd);
   284  	if(seq != pd->seq) {
   285  		// The descriptor was reused or timers were reset.
   286  		runtime·unlock(pd);
   287  		return;
   288  	}
   289  	if(read) {
   290  		if(pd->rd <= 0 || pd->rt.fv == nil)
   291  			runtime·throw("deadlineimpl: inconsistent read deadline");
   292  		pd->rd = -1;
   293  		pd->rt.fv = nil;
   294  		rg = netpollunblock(pd, 'r');
   295  	}
   296  	if(write) {
   297  		if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
   298  			runtime·throw("deadlineimpl: inconsistent write deadline");
   299  		pd->wd = -1;
   300  		pd->wt.fv = nil;
   301  		wg = netpollunblock(pd, 'w');
   302  	}
   303  	runtime·unlock(pd);
   304  	if(rg)
   305  		runtime·ready(rg);
   306  	if(wg)
   307  		runtime·ready(wg);
   308  }
   309  
   310  static void
   311  deadline(int64 now, Eface arg)
   312  {
   313  	deadlineimpl(now, arg, true, true);
   314  }
   315  
   316  static void
   317  readDeadline(int64 now, Eface arg)
   318  {
   319  	deadlineimpl(now, arg, true, false);
   320  }
   321  
   322  static void
   323  writeDeadline(int64 now, Eface arg)
   324  {
   325  	deadlineimpl(now, arg, false, true);
   326  }
   327  
   328  static PollDesc*
   329  allocPollDesc(void)
   330  {
   331  	PollDesc *pd;
   332  	uint32 i, n;
   333  
   334  	runtime·lock(&pollcache);
   335  	if(pollcache.first == nil) {
   336  		n = PageSize/sizeof(*pd);
   337  		if(n == 0)
   338  			n = 1;
   339  		// Must be in non-GC memory because can be referenced
   340  		// only from epoll/kqueue internals.
   341  		pd = runtime·SysAlloc(n*sizeof(*pd));
   342  		for(i = 0; i < n; i++) {
   343  			pd[i].link = pollcache.first;
   344  			pollcache.first = &pd[i];
   345  		}
   346  	}
   347  	pd = pollcache.first;
   348  	pollcache.first = pd->link;
   349  	runtime·unlock(&pollcache);
   350  	return pd;
   351  }