github.com/llvm-mirror/llgo@v0.0.0-20190322182713-bf6f0a60fce1/libgo-noext.diff (about)

     1  diff -r bb70e852004f libgo/runtime/chan.goc
     2  --- a/libgo/runtime/chan.goc	Fri Jan 16 13:28:21 2015 -0800
     3  +++ b/libgo/runtime/chan.goc	Fri Apr 03 17:31:02 2015 -0700
     4  @@ -111,7 +111,7 @@
     5   		mysg.releasetime = -1;
     6   	}
     7   
     8  -	runtime_lock(c);
     9  +	runtime_lock(&c->lock);
    10   	if(c->closed)
    11   		goto closed;
    12   
    13  @@ -120,7 +120,7 @@
    14   
    15   	sg = dequeue(&c->recvq);
    16   	if(sg != nil) {
    17  -		runtime_unlock(c);
    18  +		runtime_unlock(&c->lock);
    19   
    20   		gp = sg->g;
    21   		gp->param = sg;
    22  @@ -133,7 +133,7 @@
    23   	}
    24   
    25   	if(!block) {
    26  -		runtime_unlock(c);
    27  +		runtime_unlock(&c->lock);
    28   		return false;
    29   	}
    30   
    31  @@ -142,10 +142,10 @@
    32   	mysg.selectdone = nil;
    33   	g->param = nil;
    34   	enqueue(&c->sendq, &mysg);
    35  -	runtime_parkunlock(c, "chan send");
    36  +	runtime_parkunlock(&c->lock, "chan send");
    37   
    38   	if(g->param == nil) {
    39  -		runtime_lock(c);
    40  +		runtime_lock(&c->lock);
    41   		if(!c->closed)
    42   			runtime_throw("chansend: spurious wakeup");
    43   		goto closed;
    44  @@ -162,16 +162,16 @@
    45   
    46   	if(c->qcount >= c->dataqsiz) {
    47   		if(!block) {
    48  -			runtime_unlock(c);
    49  +			runtime_unlock(&c->lock);
    50   			return false;
    51   		}
    52   		mysg.g = g;
    53   		mysg.elem = nil;
    54   		mysg.selectdone = nil;
    55   		enqueue(&c->sendq, &mysg);
    56  -		runtime_parkunlock(c, "chan send");
    57  +		runtime_parkunlock(&c->lock, "chan send");
    58   
    59  -		runtime_lock(c);
    60  +		runtime_lock(&c->lock);
    61   		goto asynch;
    62   	}
    63   
    64  @@ -183,18 +183,18 @@
    65   	sg = dequeue(&c->recvq);
    66   	if(sg != nil) {
    67   		gp = sg->g;
    68  -		runtime_unlock(c);
    69  +		runtime_unlock(&c->lock);
    70   		if(sg->releasetime)
    71   			sg->releasetime = runtime_cputicks();
    72   		runtime_ready(gp);
    73   	} else
    74  -		runtime_unlock(c);
    75  +		runtime_unlock(&c->lock);
    76   	if(mysg.releasetime > 0)
    77   		runtime_blockevent(mysg.releasetime - t0, 2);
    78   	return true;
    79   
    80   closed:
    81  -	runtime_unlock(c);
    82  +	runtime_unlock(&c->lock);
    83   	runtime_panicstring("send on closed channel");
    84   	return false;  // not reached
    85   }
    86  @@ -232,7 +232,7 @@
    87   		mysg.releasetime = -1;
    88   	}
    89   
    90  -	runtime_lock(c);
    91  +	runtime_lock(&c->lock);
    92   	if(c->dataqsiz > 0)
    93   		goto asynch;
    94   
    95  @@ -241,7 +241,7 @@
    96   
    97   	sg = dequeue(&c->sendq);
    98   	if(sg != nil) {
    99  -		runtime_unlock(c);
   100  +		runtime_unlock(&c->lock);
   101   
   102   		if(ep != nil)
   103   			runtime_memmove(ep, sg->elem, c->elemsize);
   104  @@ -257,7 +257,7 @@
   105   	}
   106   
   107   	if(!block) {
   108  -		runtime_unlock(c);
   109  +		runtime_unlock(&c->lock);
   110   		return false;
   111   	}
   112   
   113  @@ -266,10 +266,10 @@
   114   	mysg.selectdone = nil;
   115   	g->param = nil;
   116   	enqueue(&c->recvq, &mysg);
   117  -	runtime_parkunlock(c, "chan receive");
   118  +	runtime_parkunlock(&c->lock, "chan receive");
   119   
   120   	if(g->param == nil) {
   121  -		runtime_lock(c);
   122  +		runtime_lock(&c->lock);
   123   		if(!c->closed)
   124   			runtime_throw("chanrecv: spurious wakeup");
   125   		goto closed;
   126  @@ -287,7 +287,7 @@
   127   			goto closed;
   128   
   129   		if(!block) {
   130  -			runtime_unlock(c);
   131  +			runtime_unlock(&c->lock);
   132   			if(received != nil)
   133   				*received = false;
   134   			return false;
   135  @@ -296,9 +296,9 @@
   136   		mysg.elem = nil;
   137   		mysg.selectdone = nil;
   138   		enqueue(&c->recvq, &mysg);
   139  -		runtime_parkunlock(c, "chan receive");
   140  +		runtime_parkunlock(&c->lock, "chan receive");
   141   
   142  -		runtime_lock(c);
   143  +		runtime_lock(&c->lock);
   144   		goto asynch;
   145   	}
   146   
   147  @@ -312,12 +312,12 @@
   148   	sg = dequeue(&c->sendq);
   149   	if(sg != nil) {
   150   		gp = sg->g;
   151  -		runtime_unlock(c);
   152  +		runtime_unlock(&c->lock);
   153   		if(sg->releasetime)
   154   			sg->releasetime = runtime_cputicks();
   155   		runtime_ready(gp);
   156   	} else
   157  -		runtime_unlock(c);
   158  +		runtime_unlock(&c->lock);
   159   
   160   	if(received != nil)
   161   		*received = true;
   162  @@ -330,7 +330,7 @@
   163   		runtime_memclr(ep, c->elemsize);
   164   	if(received != nil)
   165   		*received = false;
   166  -	runtime_unlock(c);
   167  +	runtime_unlock(&c->lock);
   168   	if(mysg.releasetime > 0)
   169   		runtime_blockevent(mysg.releasetime - t0, 2);
   170   	return true;
   171  @@ -604,7 +604,7 @@
   172   		c0 = sel->lockorder[i];
   173   		if(c0 && c0 != c) {
   174   			c = sel->lockorder[i];
   175  -			runtime_lock(c);
   176  +			runtime_lock(&c->lock);
   177   		}
   178   	}
   179   }
   180  @@ -632,7 +632,7 @@
   181   		c = sel->lockorder[i];
   182   		if(i>0 && sel->lockorder[i-1] == c)
   183   			continue;  // will unlock it on the next iteration
   184  -		runtime_unlock(c);
   185  +		runtime_unlock(&c->lock);
   186   	}
   187   }
   188   
   189  @@ -1017,9 +1017,9 @@
   190   	if(runtime_gcwaiting())
   191   		runtime_gosched();
   192   
   193  -	runtime_lock(c);
   194  +	runtime_lock(&c->lock);
   195   	if(c->closed) {
   196  -		runtime_unlock(c);
   197  +		runtime_unlock(&c->lock);
   198   		runtime_panicstring("close of closed channel");
   199   	}
   200   	c->closed = true;
   201  @@ -1048,7 +1048,7 @@
   202   		runtime_ready(gp);
   203   	}
   204   
   205  -	runtime_unlock(c);
   206  +	runtime_unlock(&c->lock);
   207   }
   208   
   209   void
   210  diff -r bb70e852004f libgo/runtime/chan.h
   211  --- a/libgo/runtime/chan.h	Fri Jan 16 13:28:21 2015 -0800
   212  +++ b/libgo/runtime/chan.h	Fri Apr 03 17:31:02 2015 -0700
   213  @@ -39,7 +39,7 @@
   214   	uintgo	recvx;			// receive index
   215   	WaitQ	recvq;			// list of recv waiters
   216   	WaitQ	sendq;			// list of send waiters
   217  -	Lock;
   218  +	Lock	lock;
   219   };
   220   
   221   // Buffer follows Hchan immediately in memory.
   222  diff -r bb70e852004f libgo/runtime/heapdump.c
   223  --- a/libgo/runtime/heapdump.c	Fri Jan 16 13:28:21 2015 -0800
   224  +++ b/libgo/runtime/heapdump.c	Fri Apr 03 17:31:02 2015 -0700
   225  @@ -387,7 +387,7 @@
   226   				if(sp->kind != KindSpecialFinalizer)
   227   					continue;
   228   				spf = (SpecialFinalizer*)sp;
   229  -				p = (byte*)((s->start << PageShift) + spf->offset);
   230  +				p = (byte*)((s->start << PageShift) + spf->special.offset);
   231   				dumpfinalizer(p, spf->fn, spf->ft, spf->ot);
   232   			}
   233   		}
   234  @@ -566,7 +566,7 @@
   235   			if(sp->kind != KindSpecialProfile)
   236   				continue;
   237   			spp = (SpecialProfile*)sp;
   238  -			p = (byte*)((s->start << PageShift) + spp->offset);
   239  +			p = (byte*)((s->start << PageShift) + spp->special.offset);
   240   			dumpint(TagAllocSample);
   241   			dumpint((uintptr)p);
   242   			dumpint((uintptr)spp->b);
   243  diff -r bb70e852004f libgo/runtime/malloc.goc
   244  --- a/libgo/runtime/malloc.goc	Fri Jan 16 13:28:21 2015 -0800
   245  +++ b/libgo/runtime/malloc.goc	Fri Apr 03 17:31:02 2015 -0700
   246  @@ -429,9 +429,9 @@
   247   	m->mcache->local_nlookup++;
   248   	if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
   249   		// purge cache stats to prevent overflow
   250  -		runtime_lock(&runtime_mheap);
   251  +		runtime_lock(&runtime_mheap.lock);
   252   		runtime_purgecachedstats(m->mcache);
   253  -		runtime_unlock(&runtime_mheap);
   254  +		runtime_unlock(&runtime_mheap.lock);
   255   	}
   256   
   257   	s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
   258  @@ -728,7 +728,7 @@
   259   
   260   static struct
   261   {
   262  -	Lock;
   263  +	Lock	lock;
   264   	byte*	pos;
   265   	byte*	end;
   266   } persistent;
   267  @@ -757,19 +757,19 @@
   268   		align = 8;
   269   	if(size >= PersistentAllocMaxBlock)
   270   		return runtime_SysAlloc(size, stat);
   271  -	runtime_lock(&persistent);
   272  +	runtime_lock(&persistent.lock);
   273   	persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
   274   	if(persistent.pos + size > persistent.end) {
   275   		persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats.other_sys);
   276   		if(persistent.pos == nil) {
   277  -			runtime_unlock(&persistent);
   278  +			runtime_unlock(&persistent.lock);
   279   			runtime_throw("runtime: cannot allocate memory");
   280   		}
   281   		persistent.end = persistent.pos + PersistentAllocChunk;
   282   	}
   283   	p = persistent.pos;
   284   	persistent.pos += size;
   285  -	runtime_unlock(&persistent);
   286  +	runtime_unlock(&persistent.lock);
   287   	if(stat != &mstats.other_sys) {
   288   		// reaccount the allocation against provided stat
   289   		runtime_xadd64(stat, size);
   290  diff -r bb70e852004f libgo/runtime/malloc.h
   291  --- a/libgo/runtime/malloc.h	Fri Jan 16 13:28:21 2015 -0800
   292  +++ b/libgo/runtime/malloc.h	Fri Apr 03 17:31:02 2015 -0700
   293  @@ -390,7 +390,7 @@
   294   typedef struct SpecialFinalizer SpecialFinalizer;
   295   struct SpecialFinalizer
   296   {
   297  -	Special;
   298  +	Special		special;
   299   	FuncVal*	fn;
   300   	const FuncType*	ft;
   301   	const PtrType*	ot;
   302  @@ -401,7 +401,7 @@
   303   typedef struct SpecialProfile SpecialProfile;
   304   struct SpecialProfile
   305   {
   306  -	Special;
   307  +	Special	special;
   308   	Bucket*	b;
   309   };
   310   
   311  @@ -458,7 +458,7 @@
   312   // Central list of free objects of a given size.
   313   struct MCentral
   314   {
   315  -	Lock;
   316  +	Lock  lock;
   317   	int32 sizeclass;
   318   	MSpan nonempty;	// list of spans with a free object
   319   	MSpan empty;	// list of spans with no free objects (or cached in an MCache)
   320  @@ -476,7 +476,7 @@
   321   // but all the other global data is here too.
   322   struct MHeap
   323   {
   324  -	Lock;
   325  +	Lock lock;
   326   	MSpan free[MaxMHeapList];	// free lists of given length
   327   	MSpan freelarge;		// free lists length >= MaxMHeapList
   328   	MSpan busy[MaxMHeapList];	// busy lists of large objects of given length
   329  @@ -505,7 +505,7 @@
   330   	// spaced CacheLineSize bytes apart, so that each MCentral.Lock
   331   	// gets its own cache line.
   332   	struct {
   333  -		MCentral;
   334  +		MCentral mcentral;
   335   		byte pad[64];
   336   	} central[NumSizeClasses];
   337   
   338  diff -r bb70e852004f libgo/runtime/mcache.c
   339  --- a/libgo/runtime/mcache.c	Fri Jan 16 13:28:21 2015 -0800
   340  +++ b/libgo/runtime/mcache.c	Fri Apr 03 17:31:02 2015 -0700
   341  @@ -23,9 +23,9 @@
   342   	MCache *c;
   343   	int32 i;
   344   
   345  -	runtime_lock(&runtime_mheap);
   346  +	runtime_lock(&runtime_mheap.lock);
   347   	c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
   348  -	runtime_unlock(&runtime_mheap);
   349  +	runtime_unlock(&runtime_mheap.lock);
   350   	runtime_memclr((byte*)c, sizeof(*c));
   351   	for(i = 0; i < NumSizeClasses; i++)
   352   		c->alloc[i] = &emptymspan;
   353  @@ -44,10 +44,10 @@
   354   runtime_freemcache(MCache *c)
   355   {
   356   	runtime_MCache_ReleaseAll(c);
   357  -	runtime_lock(&runtime_mheap);
   358  +	runtime_lock(&runtime_mheap.lock);
   359   	runtime_purgecachedstats(c);
   360   	runtime_FixAlloc_Free(&runtime_mheap.cachealloc, c);
   361  -	runtime_unlock(&runtime_mheap);
   362  +	runtime_unlock(&runtime_mheap.lock);
   363   }
   364   
   365   // Gets a span that has a free object in it and assigns it
   366  @@ -64,19 +64,19 @@
   367   	if(s->freelist != nil)
   368   		runtime_throw("refill on a nonempty span");
   369   	if(s != &emptymspan)
   370  -		runtime_MCentral_UncacheSpan(&runtime_mheap.central[sizeclass], s);
   371  +		runtime_MCentral_UncacheSpan(&runtime_mheap.central[sizeclass].mcentral, s);
   372   
   373   	// Push any explicitly freed objects to the central lists.
   374   	// Not required, but it seems like a good time to do it.
   375   	l = &c->free[sizeclass];
   376   	if(l->nlist > 0) {
   377  -		runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list);
   378  +		runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass].mcentral, l->list);
   379   		l->list = nil;
   380   		l->nlist = 0;
   381   	}
   382   
   383   	// Get a new cached span from the central lists.
   384  -	s = runtime_MCentral_CacheSpan(&runtime_mheap.central[sizeclass]);
   385  +	s = runtime_MCentral_CacheSpan(&runtime_mheap.central[sizeclass].mcentral);
   386   	if(s == nil)
   387   		runtime_throw("out of memory");
   388   	if(s->freelist == nil) {
   389  @@ -102,7 +102,7 @@
   390   	// We transfer a span at a time from MCentral to MCache,
   391   	// so we'll do the same in the other direction.
   392   	if(l->nlist >= (runtime_class_to_allocnpages[sizeclass]<<PageShift)/size) {
   393  -		runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list);
   394  +		runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass].mcentral, l->list);
   395   		l->list = nil;
   396   		l->nlist = 0;
   397   	}
   398  @@ -118,12 +118,12 @@
   399   	for(i=0; i<NumSizeClasses; i++) {
   400   		s = c->alloc[i];
   401   		if(s != &emptymspan) {
   402  -			runtime_MCentral_UncacheSpan(&runtime_mheap.central[i], s);
   403  +			runtime_MCentral_UncacheSpan(&runtime_mheap.central[i].mcentral, s);
   404   			c->alloc[i] = &emptymspan;
   405   		}
   406   		l = &c->free[i];
   407   		if(l->nlist > 0) {
   408  -			runtime_MCentral_FreeList(&runtime_mheap.central[i], l->list);
   409  +			runtime_MCentral_FreeList(&runtime_mheap.central[i].mcentral, l->list);
   410   			l->list = nil;
   411   			l->nlist = 0;
   412   		}
   413  diff -r bb70e852004f libgo/runtime/mcentral.c
   414  --- a/libgo/runtime/mcentral.c	Fri Jan 16 13:28:21 2015 -0800
   415  +++ b/libgo/runtime/mcentral.c	Fri Apr 03 17:31:02 2015 -0700
   416  @@ -39,14 +39,14 @@
   417   	int32 cap, n;
   418   	uint32 sg;
   419   
   420  -	runtime_lock(c);
   421  +	runtime_lock(&c->lock);
   422   	sg = runtime_mheap.sweepgen;
   423   retry:
   424   	for(s = c->nonempty.next; s != &c->nonempty; s = s->next) {
   425   		if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
   426  -			runtime_unlock(c);
   427  +			runtime_unlock(&c->lock);
   428   			runtime_MSpan_Sweep(s);
   429  -			runtime_lock(c);
   430  +			runtime_lock(&c->lock);
   431   			// the span could have been moved to heap, retry
   432   			goto retry;
   433   		}
   434  @@ -65,9 +65,9 @@
   435   			runtime_MSpanList_Remove(s);
   436   			// swept spans are at the end of the list
   437   			runtime_MSpanList_InsertBack(&c->empty, s);
   438  -			runtime_unlock(c);
   439  +			runtime_unlock(&c->lock);
   440   			runtime_MSpan_Sweep(s);
   441  -			runtime_lock(c);
   442  +			runtime_lock(&c->lock);
   443   			// the span could be moved to nonempty or heap, retry
   444   			goto retry;
   445   		}
   446  @@ -82,7 +82,7 @@
   447   
   448   	// Replenish central list if empty.
   449   	if(!MCentral_Grow(c)) {
   450  -		runtime_unlock(c);
   451  +		runtime_unlock(&c->lock);
   452   		return nil;
   453   	}
   454   	goto retry;
   455  @@ -98,7 +98,7 @@
   456   	runtime_MSpanList_Remove(s);
   457   	runtime_MSpanList_InsertBack(&c->empty, s);
   458   	s->incache = true;
   459  -	runtime_unlock(c);
   460  +	runtime_unlock(&c->lock);
   461   	return s;
   462   }
   463   
   464  @@ -109,7 +109,7 @@
   465   	MLink *v;
   466   	int32 cap, n;
   467   
   468  -	runtime_lock(c);
   469  +	runtime_lock(&c->lock);
   470   
   471   	s->incache = false;
   472   
   473  @@ -135,7 +135,7 @@
   474   		runtime_MSpanList_Remove(s);
   475   		runtime_MSpanList_Insert(&c->nonempty, s);
   476   	}
   477  -	runtime_unlock(c);
   478  +	runtime_unlock(&c->lock);
   479   }
   480   
   481   // Free the list of objects back into the central free list c.
   482  @@ -145,12 +145,12 @@
   483   {
   484   	MLink *next;
   485   
   486  -	runtime_lock(c);
   487  +	runtime_lock(&c->lock);
   488   	for(; start != nil; start = next) {
   489   		next = start->next;
   490   		MCentral_Free(c, start);
   491   	}
   492  -	runtime_unlock(c);
   493  +	runtime_unlock(&c->lock);
   494   }
   495   
   496   // Helper: free one object back into the central free list.
   497  @@ -193,7 +193,7 @@
   498   	// If s is completely freed, return it to the heap.
   499   	if(s->ref == 0) {
   500   		MCentral_ReturnToHeap(c, s); // unlocks c
   501  -		runtime_lock(c);
   502  +		runtime_lock(&c->lock);
   503   	}
   504   }
   505   
   506  @@ -206,7 +206,7 @@
   507   {
   508   	if(s->incache)
   509   		runtime_throw("freespan into cached span");
   510  -	runtime_lock(c);
   511  +	runtime_lock(&c->lock);
   512   
   513   	// Move to nonempty if necessary.
   514   	if(s->freelist == nil) {
   515  @@ -227,7 +227,7 @@
   516   	runtime_atomicstore(&s->sweepgen, runtime_mheap.sweepgen);
   517   
   518   	if(s->ref != 0) {
   519  -		runtime_unlock(c);
   520  +		runtime_unlock(&c->lock);
   521   		return false;
   522   	}
   523   
   524  @@ -260,12 +260,12 @@
   525   	byte *p;
   526   	MSpan *s;
   527   
   528  -	runtime_unlock(c);
   529  +	runtime_unlock(&c->lock);
   530   	runtime_MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
   531   	s = runtime_MHeap_Alloc(&runtime_mheap, npages, c->sizeclass, 0, 1);
   532   	if(s == nil) {
   533   		// TODO(rsc): Log out of memory
   534  -		runtime_lock(c);
   535  +		runtime_lock(&c->lock);
   536   		return false;
   537   	}
   538   
   539  @@ -282,7 +282,7 @@
   540   	*tailp = nil;
   541   	runtime_markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift));
   542   
   543  -	runtime_lock(c);
   544  +	runtime_lock(&c->lock);
   545   	c->nfree += n;
   546   	runtime_MSpanList_Insert(&c->nonempty, s);
   547   	return true;
   548  @@ -301,7 +301,7 @@
   549   	if(s->ref != 0)
   550   		runtime_throw("ref wrong");
   551   	c->nfree -= (s->npages << PageShift) / size;
   552  -	runtime_unlock(c);
   553  +	runtime_unlock(&c->lock);
   554   	runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
   555   	runtime_MHeap_Free(&runtime_mheap, s, 0);
   556   }
   557  diff -r bb70e852004f libgo/runtime/mgc0.c
   558  --- a/libgo/runtime/mgc0.c	Fri Jan 16 13:28:21 2015 -0800
   559  +++ b/libgo/runtime/mgc0.c	Fri Apr 03 17:31:02 2015 -0700
   560  @@ -225,7 +225,7 @@
   561   	Note	alldone;
   562   	ParFor	*markfor;
   563   
   564  -	Lock;
   565  +	Lock	lock;
   566   	byte	*chunk;
   567   	uintptr	nchunk;
   568   } work __attribute__((aligned(8)));
   569  @@ -1337,7 +1337,7 @@
   570   				// retain everything it points to.
   571   				spf = (SpecialFinalizer*)sp;
   572   				// A finalizer can be set for an inner byte of an object, find object beginning.
   573  -				p = (void*)((s->start << PageShift) + spf->offset/s->elemsize*s->elemsize);
   574  +				p = (void*)((s->start << PageShift) + spf->special.offset/s->elemsize*s->elemsize);
   575   				enqueue1(&wbuf, (Obj){p, s->elemsize, 0});
   576   				enqueue1(&wbuf, (Obj){(void*)&spf->fn, PtrSize, 0});
   577   				enqueue1(&wbuf, (Obj){(void*)&spf->ft, PtrSize, 0});
   578  @@ -1378,7 +1378,7 @@
   579   	b = (Workbuf*)runtime_lfstackpop(&work.empty);
   580   	if(b == nil) {
   581   		// Need to allocate.
   582  -		runtime_lock(&work);
   583  +		runtime_lock(&work.lock);
   584   		if(work.nchunk < sizeof *b) {
   585   			work.nchunk = 1<<20;
   586   			work.chunk = runtime_SysAlloc(work.nchunk, &mstats.gc_sys);
   587  @@ -1388,7 +1388,7 @@
   588   		b = (Workbuf*)work.chunk;
   589   		work.chunk += sizeof *b;
   590   		work.nchunk -= sizeof *b;
   591  -		runtime_unlock(&work);
   592  +		runtime_unlock(&work.lock);
   593   	}
   594   	b->nobj = 0;
   595   	return b;
   596  @@ -1802,7 +1802,7 @@
   597   		c->local_nsmallfree[cl] += nfree;
   598   		c->local_cachealloc -= nfree * size;
   599   		runtime_xadd64(&mstats.next_gc, -(uint64)(nfree * size * (gcpercent + 100)/100));
   600  -		res = runtime_MCentral_FreeSpan(&runtime_mheap.central[cl], s, nfree, head.next, end);
   601  +		res = runtime_MCentral_FreeSpan(&runtime_mheap.central[cl].mcentral, s, nfree, head.next, end);
   602   		//MCentral_FreeSpan updates sweepgen
   603   	}
   604   	return res;
   605  @@ -2147,10 +2147,10 @@
   606   		return;
   607   
   608   	if(gcpercent == GcpercentUnknown) {	// first time through
   609  -		runtime_lock(&runtime_mheap);
   610  +		runtime_lock(&runtime_mheap.lock);
   611   		if(gcpercent == GcpercentUnknown)
   612   			gcpercent = readgogc();
   613  -		runtime_unlock(&runtime_mheap);
   614  +		runtime_unlock(&runtime_mheap.lock);
   615   	}
   616   	if(gcpercent < 0)
   617   		return;
   618  @@ -2423,7 +2423,7 @@
   619   
   620   	// Pass back: pauses, last gc (absolute time), number of gc, total pause ns.
   621   	p = (uint64*)pauses->array;
   622  -	runtime_lock(&runtime_mheap);
   623  +	runtime_lock(&runtime_mheap.lock);
   624   	n = mstats.numgc;
   625   	if(n > nelem(mstats.pause_ns))
   626   		n = nelem(mstats.pause_ns);
   627  @@ -2438,7 +2438,7 @@
   628   	p[n] = mstats.last_gc;
   629   	p[n+1] = mstats.numgc;
   630   	p[n+2] = mstats.pause_total_ns;	
   631  -	runtime_unlock(&runtime_mheap);
   632  +	runtime_unlock(&runtime_mheap.lock);
   633   	pauses->__count = n+3;
   634   }
   635   
   636  @@ -2446,14 +2446,14 @@
   637   runtime_setgcpercent(int32 in) {
   638   	int32 out;
   639   
   640  -	runtime_lock(&runtime_mheap);
   641  +	runtime_lock(&runtime_mheap.lock);
   642   	if(gcpercent == GcpercentUnknown)
   643   		gcpercent = readgogc();
   644   	out = gcpercent;
   645   	if(in < 0)
   646   		in = -1;
   647   	gcpercent = in;
   648  -	runtime_unlock(&runtime_mheap);
   649  +	runtime_unlock(&runtime_mheap.lock);
   650   	return out;
   651   }
   652   
   653  diff -r bb70e852004f libgo/runtime/mheap.c
   654  --- a/libgo/runtime/mheap.c	Fri Jan 16 13:28:21 2015 -0800
   655  +++ b/libgo/runtime/mheap.c	Fri Apr 03 17:31:02 2015 -0700
   656  @@ -70,7 +70,7 @@
   657   	runtime_MSpanList_Init(&h->freelarge);
   658   	runtime_MSpanList_Init(&h->busylarge);
   659   	for(i=0; i<nelem(h->central); i++)
   660  -		runtime_MCentral_Init(&h->central[i], i);
   661  +		runtime_MCentral_Init(&h->central[i].mcentral, i);
   662   }
   663   
   664   void
   665  @@ -109,9 +109,9 @@
   666   			runtime_MSpanList_Remove(s);
   667   			// swept spans are at the end of the list
   668   			runtime_MSpanList_InsertBack(list, s);
   669  -			runtime_unlock(h);
   670  +			runtime_unlock(&h->lock);
   671   			n += runtime_MSpan_Sweep(s);
   672  -			runtime_lock(h);
   673  +			runtime_lock(&h->lock);
   674   			if(n >= npages)
   675   				return n;
   676   			// the span could have been moved elsewhere
   677  @@ -156,7 +156,7 @@
   678   	}
   679   
   680   	// Now sweep everything that is not yet swept.
   681  -	runtime_unlock(h);
   682  +	runtime_unlock(&h->lock);
   683   	for(;;) {
   684   		n = runtime_sweepone();
   685   		if(n == (uintptr)-1)  // all spans are swept
   686  @@ -165,7 +165,7 @@
   687   		if(reclaimed >= npage)
   688   			break;
   689   	}
   690  -	runtime_lock(h);
   691  +	runtime_lock(&h->lock);
   692   }
   693   
   694   // Allocate a new span of npage pages from the heap
   695  @@ -175,7 +175,7 @@
   696   {
   697   	MSpan *s;
   698   
   699  -	runtime_lock(h);
   700  +	runtime_lock(&h->lock);
   701   	mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
   702   	runtime_m()->mcache->local_cachealloc = 0;
   703   	s = MHeap_AllocLocked(h, npage, sizeclass);
   704  @@ -191,7 +191,7 @@
   705   				runtime_MSpanList_InsertBack(&h->busylarge, s);
   706   		}
   707   	}
   708  -	runtime_unlock(h);
   709  +	runtime_unlock(&h->lock);
   710   	if(s != nil) {
   711   		if(needzero && s->needzero)
   712   			runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
   713  @@ -386,7 +386,7 @@
   714   void
   715   runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
   716   {
   717  -	runtime_lock(h);
   718  +	runtime_lock(&h->lock);
   719   	mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
   720   	runtime_m()->mcache->local_cachealloc = 0;
   721   	mstats.heap_inuse -= s->npages<<PageShift;
   722  @@ -395,7 +395,7 @@
   723   		mstats.heap_objects--;
   724   	}
   725   	MHeap_FreeLocked(h, s);
   726  -	runtime_unlock(h);
   727  +	runtime_unlock(&h->lock);
   728   }
   729   
   730   static void
   731  @@ -548,10 +548,10 @@
   732   		runtime_noteclear(&note);
   733   		runtime_notetsleepg(&note, tick);
   734   
   735  -		runtime_lock(h);
   736  +		runtime_lock(&h->lock);
   737   		unixnow = runtime_unixnanotime();
   738   		if(unixnow - mstats.last_gc > forcegc) {
   739  -			runtime_unlock(h);
   740  +			runtime_unlock(&h->lock);
   741   			// The scavenger can not block other goroutines,
   742   			// otherwise deadlock detector can fire spuriously.
   743   			// GC blocks other goroutines via the runtime_worldsema.
   744  @@ -561,11 +561,11 @@
   745   			runtime_notetsleepg(&note, -1);
   746   			if(runtime_debug.gctrace > 0)
   747   				runtime_printf("scvg%d: GC forced\n", k);
   748  -			runtime_lock(h);
   749  +			runtime_lock(&h->lock);
   750   		}
   751   		now = runtime_nanotime();
   752   		scavenge(k, now, limit);
   753  -		runtime_unlock(h);
   754  +		runtime_unlock(&h->lock);
   755   	}
   756   }
   757   
   758  @@ -575,9 +575,9 @@
   759   runtime_debug_freeOSMemory(void)
   760   {
   761   	runtime_gc(2);  // force GC and do eager sweep
   762  -	runtime_lock(&runtime_mheap);
   763  +	runtime_lock(&runtime_mheap.lock);
   764   	scavenge(-1, ~(uintptr)0, 0);
   765  -	runtime_unlock(&runtime_mheap);
   766  +	runtime_unlock(&runtime_mheap.lock);
   767   }
   768   
   769   // Initialize a new span with the given start and npages.
   770  @@ -752,11 +752,11 @@
   771   	runtime_lock(&runtime_mheap.speciallock);
   772   	s = runtime_FixAlloc_Alloc(&runtime_mheap.specialfinalizeralloc);
   773   	runtime_unlock(&runtime_mheap.speciallock);
   774  -	s->kind = KindSpecialFinalizer;
   775  +	s->special.kind = KindSpecialFinalizer;
   776   	s->fn = f;
   777   	s->ft = ft;
   778   	s->ot = ot;
   779  -	if(addspecial(p, s))
   780  +	if(addspecial(p, &s->special))
   781   		return true;
   782   
   783   	// There was an old finalizer
   784  @@ -789,9 +789,9 @@
   785   	runtime_lock(&runtime_mheap.speciallock);
   786   	s = runtime_FixAlloc_Alloc(&runtime_mheap.specialprofilealloc);
   787   	runtime_unlock(&runtime_mheap.speciallock);
   788  -	s->kind = KindSpecialProfile;
   789  +	s->special.kind = KindSpecialProfile;
   790   	s->b = b;
   791  -	if(!addspecial(p, s))
   792  +	if(!addspecial(p, &s->special))
   793   		runtime_throw("setprofilebucket: profile already set");
   794   }
   795   
   796  @@ -879,14 +879,14 @@
   797   	// remove the span from whatever list it is in now
   798   	if(s->sizeclass > 0) {
   799   		// must be in h->central[x].empty
   800  -		c = &h->central[s->sizeclass];
   801  -		runtime_lock(c);
   802  +		c = &h->central[s->sizeclass].mcentral;
   803  +		runtime_lock(&c->lock);
   804   		runtime_MSpanList_Remove(s);
   805  -		runtime_unlock(c);
   806  -		runtime_lock(h);
   807  +		runtime_unlock(&c->lock);
   808  +		runtime_lock(&h->lock);
   809   	} else {
   810   		// must be in h->busy/busylarge
   811  -		runtime_lock(h);
   812  +		runtime_lock(&h->lock);
   813   		runtime_MSpanList_Remove(s);
   814   	}
   815   	// heap is locked now
   816  @@ -933,18 +933,18 @@
   817   
   818   	// place the span into a new list
   819   	if(s->sizeclass > 0) {
   820  -		runtime_unlock(h);
   821  -		c = &h->central[s->sizeclass];
   822  -		runtime_lock(c);
   823  +		runtime_unlock(&h->lock);
   824  +		c = &h->central[s->sizeclass].mcentral;
   825  +		runtime_lock(&c->lock);
   826   		// swept spans are at the end of the list
   827   		runtime_MSpanList_InsertBack(&c->empty, s);
   828  -		runtime_unlock(c);
   829  +		runtime_unlock(&c->lock);
   830   	} else {
   831   		// Swept spans are at the end of lists.
   832   		if(s->npages < nelem(h->free))
   833   			runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
   834   		else
   835   			runtime_MSpanList_InsertBack(&h->busylarge, s);
   836  -		runtime_unlock(h);
   837  +		runtime_unlock(&h->lock);
   838   	}
   839   }
   840  diff -r bb70e852004f libgo/runtime/netpoll.goc
   841  --- a/libgo/runtime/netpoll.goc	Fri Jan 16 13:28:21 2015 -0800
   842  +++ b/libgo/runtime/netpoll.goc	Fri Apr 03 17:31:02 2015 -0700
   843  @@ -53,7 +53,7 @@
   844   	// pollReset, pollWait, pollWaitCanceled and runtime_netpollready (IO rediness notification)
   845   	// proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
   846   	// in a lock-free way by all operations.
   847  -	Lock;		// protectes the following fields
   848  +	Lock	lock;	// protectes the following fields
   849   	uintptr	fd;
   850   	bool	closing;
   851   	uintptr	seq;	// protects from stale timers and ready notifications
   852  @@ -68,7 +68,7 @@
   853   
   854   static struct
   855   {
   856  -	Lock;
   857  +	Lock		lock;
   858   	PollDesc*	first;
   859   	// PollDesc objects must be type-stable,
   860   	// because we can get ready notification from epoll/kqueue
   861  @@ -100,7 +100,7 @@
   862   
   863   func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
   864   	pd = allocPollDesc();
   865  -	runtime_lock(pd);
   866  +	runtime_lock(&pd->lock);
   867   	if(pd->wg != nil && pd->wg != READY)
   868   		runtime_throw("runtime_pollOpen: blocked write on free descriptor");
   869   	if(pd->rg != nil && pd->rg != READY)
   870  @@ -112,7 +112,7 @@
   871   	pd->rd = 0;
   872   	pd->wg = nil;
   873   	pd->wd = 0;
   874  -	runtime_unlock(pd);
   875  +	runtime_unlock(&pd->lock);
   876   
   877   	errno = runtime_netpollopen(fd, pd);
   878   }
   879  @@ -125,10 +125,10 @@
   880   	if(pd->rg != nil && pd->rg != READY)
   881   		runtime_throw("runtime_pollClose: blocked read on closing descriptor");
   882   	runtime_netpollclose(pd->fd);
   883  -	runtime_lock(&pollcache);
   884  +	runtime_lock(&pollcache.lock);
   885   	pd->link = pollcache.first;
   886   	pollcache.first = pd;
   887  -	runtime_unlock(&pollcache);
   888  +	runtime_unlock(&pollcache.lock);
   889   }
   890   
   891   func runtime_pollReset(pd *PollDesc, mode int) (err int) {
   892  @@ -169,9 +169,9 @@
   893   func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
   894   	G *rg, *wg;
   895   
   896  -	runtime_lock(pd);
   897  +	runtime_lock(&pd->lock);
   898   	if(pd->closing) {
   899  -		runtime_unlock(pd);
   900  +		runtime_unlock(&pd->lock);
   901   		return;
   902   	}
   903   	pd->seq++;  // invalidate current timers
   904  @@ -226,7 +226,7 @@
   905   		rg = netpollunblock(pd, 'r', false);
   906   	if(pd->wd < 0)
   907   		wg = netpollunblock(pd, 'w', false);
   908  -	runtime_unlock(pd);
   909  +	runtime_unlock(&pd->lock);
   910   	if(rg)
   911   		runtime_ready(rg);
   912   	if(wg)
   913  @@ -236,7 +236,7 @@
   914   func runtime_pollUnblock(pd *PollDesc) {
   915   	G *rg, *wg;
   916   
   917  -	runtime_lock(pd);
   918  +	runtime_lock(&pd->lock);
   919   	if(pd->closing)
   920   		runtime_throw("runtime_pollUnblock: already closing");
   921   	pd->closing = true;
   922  @@ -252,7 +252,7 @@
   923   		runtime_deltimer(&pd->wt);
   924   		pd->wt.fv = nil;
   925   	}
   926  -	runtime_unlock(pd);
   927  +	runtime_unlock(&pd->lock);
   928   	if(rg)
   929   		runtime_ready(rg);
   930   	if(wg)
   931  @@ -280,13 +280,13 @@
   932   void
   933   runtime_netpolllock(PollDesc *pd)
   934   {
   935  -	runtime_lock(pd);
   936  +	runtime_lock(&pd->lock);
   937   }
   938   
   939   void
   940   runtime_netpollunlock(PollDesc *pd)
   941   {
   942  -	runtime_unlock(pd);
   943  +	runtime_unlock(&pd->lock);
   944   }
   945   
   946   // make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
   947  @@ -399,12 +399,12 @@
   948   
   949   	pd = (PollDesc*)arg.data;
   950   	rg = wg = nil;
   951  -	runtime_lock(pd);
   952  +	runtime_lock(&pd->lock);
   953   	// Seq arg is seq when the timer was set.
   954   	// If it's stale, ignore the timer event.
   955   	if(seq != pd->seq) {
   956   		// The descriptor was reused or timers were reset.
   957  -		runtime_unlock(pd);
   958  +		runtime_unlock(&pd->lock);
   959   		return;
   960   	}
   961   	if(read) {
   962  @@ -421,7 +421,7 @@
   963   		runtime_atomicstorep(&pd->wt.fv, nil);  // full memory barrier between store to wd and load of wg in netpollunblock
   964   		wg = netpollunblock(pd, 'w', false);
   965   	}
   966  -	runtime_unlock(pd);
   967  +	runtime_unlock(&pd->lock);
   968   	if(rg)
   969   		runtime_ready(rg);
   970   	if(wg)
   971  @@ -452,7 +452,7 @@
   972   	PollDesc *pd;
   973   	uint32 i, n;
   974   
   975  -	runtime_lock(&pollcache);
   976  +	runtime_lock(&pollcache.lock);
   977   	if(pollcache.first == nil) {
   978   		n = PollBlockSize/sizeof(*pd);
   979   		if(n == 0)
   980  @@ -467,6 +467,6 @@
   981   	}
   982   	pd = pollcache.first;
   983   	pollcache.first = pd->link;
   984  -	runtime_unlock(&pollcache);
   985  +	runtime_unlock(&pollcache.lock);
   986   	return pd;
   987   }
   988  diff -r bb70e852004f libgo/runtime/proc.c
   989  --- a/libgo/runtime/proc.c	Fri Jan 16 13:28:21 2015 -0800
   990  +++ b/libgo/runtime/proc.c	Fri Apr 03 17:31:02 2015 -0700
   991  @@ -302,7 +302,7 @@
   992   
   993   typedef struct Sched Sched;
   994   struct Sched {
   995  -	Lock;
   996  +	Lock	lock;
   997   
   998   	uint64	goidgen;
   999   	M*	midle;	 // idle m's waiting for work
  1000  @@ -709,7 +709,7 @@
  1001   
  1002   	mp->fastrand = 0x49f6428aUL + mp->id + runtime_cputicks();
  1003   
  1004  -	runtime_lock(&runtime_sched);
  1005  +	runtime_lock(&runtime_sched.lock);
  1006   	mp->id = runtime_sched.mcount++;
  1007   	checkmcount();
  1008   	runtime_mpreinit(mp);
  1009  @@ -720,7 +720,7 @@
  1010   	// runtime_NumCgoCall() iterates over allm w/o schedlock,
  1011   	// so we need to publish it safely.
  1012   	runtime_atomicstorep(&runtime_allm, mp);
  1013  -	runtime_unlock(&runtime_sched);
  1014  +	runtime_unlock(&runtime_sched.lock);
  1015   }
  1016   
  1017   // Mark gp ready to run.
  1018  @@ -747,7 +747,7 @@
  1019   
  1020   	// Figure out how many CPUs to use during GC.
  1021   	// Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
  1022  -	runtime_lock(&runtime_sched);
  1023  +	runtime_lock(&runtime_sched.lock);
  1024   	n = runtime_gomaxprocs;
  1025   	if(n > runtime_ncpu)
  1026   		n = runtime_ncpu > 0 ? runtime_ncpu : 1;
  1027  @@ -755,7 +755,7 @@
  1028   		n = MaxGcproc;
  1029   	if(n > runtime_sched.nmidle+1) // one M is currently running
  1030   		n = runtime_sched.nmidle+1;
  1031  -	runtime_unlock(&runtime_sched);
  1032  +	runtime_unlock(&runtime_sched.lock);
  1033   	return n;
  1034   }
  1035   
  1036  @@ -764,14 +764,14 @@
  1037   {
  1038   	int32 n;
  1039   
  1040  -	runtime_lock(&runtime_sched);
  1041  +	runtime_lock(&runtime_sched.lock);
  1042   	n = runtime_gomaxprocs;
  1043   	if(n > runtime_ncpu)
  1044   		n = runtime_ncpu;
  1045   	if(n > MaxGcproc)
  1046   		n = MaxGcproc;
  1047   	n -= runtime_sched.nmidle+1; // one M is currently running
  1048  -	runtime_unlock(&runtime_sched);
  1049  +	runtime_unlock(&runtime_sched.lock);
  1050   	return n > 0;
  1051   }
  1052   
  1053  @@ -781,7 +781,7 @@
  1054   	M *mp;
  1055   	int32 n, pos;
  1056   
  1057  -	runtime_lock(&runtime_sched);
  1058  +	runtime_lock(&runtime_sched.lock);
  1059   	pos = 0;
  1060   	for(n = 1; n < nproc; n++) {  // one M is currently running
  1061   		if(runtime_allp[pos]->mcache == m->mcache)
  1062  @@ -794,7 +794,7 @@
  1063   		pos++;
  1064   		runtime_notewakeup(&mp->park);
  1065   	}
  1066  -	runtime_unlock(&runtime_sched);
  1067  +	runtime_unlock(&runtime_sched.lock);
  1068   }
  1069   
  1070   // Similar to stoptheworld but best-effort and can be called several times.
  1071  @@ -833,7 +833,7 @@
  1072   	P *p;
  1073   	bool wait;
  1074   
  1075  -	runtime_lock(&runtime_sched);
  1076  +	runtime_lock(&runtime_sched.lock);
  1077   	runtime_sched.stopwait = runtime_gomaxprocs;
  1078   	runtime_atomicstore((uint32*)&runtime_sched.gcwaiting, 1);
  1079   	preemptall();
  1080  @@ -853,7 +853,7 @@
  1081   		runtime_sched.stopwait--;
  1082   	}
  1083   	wait = runtime_sched.stopwait > 0;
  1084  -	runtime_unlock(&runtime_sched);
  1085  +	runtime_unlock(&runtime_sched.lock);
  1086   
  1087   	// wait for remaining P's to stop voluntarily
  1088   	if(wait) {
  1089  @@ -887,7 +887,7 @@
  1090   	gp = runtime_netpoll(false);  // non-blocking
  1091   	injectglist(gp);
  1092   	add = needaddgcproc();
  1093  -	runtime_lock(&runtime_sched);
  1094  +	runtime_lock(&runtime_sched.lock);
  1095   	if(newprocs) {
  1096   		procresize(newprocs);
  1097   		newprocs = 0;
  1098  @@ -911,7 +911,7 @@
  1099   		runtime_sched.sysmonwait = false;
  1100   		runtime_notewakeup(&runtime_sched.sysmonnote);
  1101   	}
  1102  -	runtime_unlock(&runtime_sched);
  1103  +	runtime_unlock(&runtime_sched.lock);
  1104   
  1105   	while(p1) {
  1106   		p = p1;
  1107  @@ -1346,9 +1346,9 @@
  1108   	}
  1109   
  1110   retry:
  1111  -	runtime_lock(&runtime_sched);
  1112  +	runtime_lock(&runtime_sched.lock);
  1113   	mput(m);
  1114  -	runtime_unlock(&runtime_sched);
  1115  +	runtime_unlock(&runtime_sched.lock);
  1116   	runtime_notesleep(&m->park);
  1117   	runtime_noteclear(&m->park);
  1118   	if(m->helpgc) {
  1119  @@ -1375,18 +1375,18 @@
  1120   	M *mp;
  1121   	void (*fn)(void);
  1122   
  1123  -	runtime_lock(&runtime_sched);
  1124  +	runtime_lock(&runtime_sched.lock);
  1125   	if(p == nil) {
  1126   		p = pidleget();
  1127   		if(p == nil) {
  1128  -			runtime_unlock(&runtime_sched);
  1129  +			runtime_unlock(&runtime_sched.lock);
  1130   			if(spinning)
  1131   				runtime_xadd(&runtime_sched.nmspinning, -1);
  1132   			return;
  1133   		}
  1134   	}
  1135   	mp = mget();
  1136  -	runtime_unlock(&runtime_sched);
  1137  +	runtime_unlock(&runtime_sched.lock);
  1138   	if(mp == nil) {
  1139   		fn = nil;
  1140   		if(spinning)
  1141  @@ -1419,28 +1419,28 @@
  1142   		startm(p, true);
  1143   		return;
  1144   	}
  1145  -	runtime_lock(&runtime_sched);
  1146  +	runtime_lock(&runtime_sched.lock);
  1147   	if(runtime_sched.gcwaiting) {
  1148   		p->status = Pgcstop;
  1149   		if(--runtime_sched.stopwait == 0)
  1150   			runtime_notewakeup(&runtime_sched.stopnote);
  1151  -		runtime_unlock(&runtime_sched);
  1152  +		runtime_unlock(&runtime_sched.lock);
  1153   		return;
  1154   	}
  1155   	if(runtime_sched.runqsize) {
  1156  -		runtime_unlock(&runtime_sched);
  1157  +		runtime_unlock(&runtime_sched.lock);
  1158   		startm(p, false);
  1159   		return;
  1160   	}
  1161   	// If this is the last running P and nobody is polling network,
  1162   	// need to wakeup another M to poll network.
  1163   	if(runtime_sched.npidle == (uint32)runtime_gomaxprocs-1 && runtime_atomicload64(&runtime_sched.lastpoll) != 0) {
  1164  -		runtime_unlock(&runtime_sched);
  1165  +		runtime_unlock(&runtime_sched.lock);
  1166   		startm(p, false);
  1167   		return;
  1168   	}
  1169   	pidleput(p);
  1170  -	runtime_unlock(&runtime_sched);
  1171  +	runtime_unlock(&runtime_sched.lock);
  1172   }
  1173   
  1174   // Tries to add one more P to execute G's.
  1175  @@ -1512,11 +1512,11 @@
  1176   		runtime_xadd(&runtime_sched.nmspinning, -1);
  1177   	}
  1178   	p = releasep();
  1179  -	runtime_lock(&runtime_sched);
  1180  +	runtime_lock(&runtime_sched.lock);
  1181   	p->status = Pgcstop;
  1182   	if(--runtime_sched.stopwait == 0)
  1183   		runtime_notewakeup(&runtime_sched.stopnote);
  1184  -	runtime_unlock(&runtime_sched);
  1185  +	runtime_unlock(&runtime_sched.lock);
  1186   	stopm();
  1187   }
  1188   
  1189  @@ -1567,9 +1567,9 @@
  1190   		return gp;
  1191   	// global runq
  1192   	if(runtime_sched.runqsize) {
  1193  -		runtime_lock(&runtime_sched);
  1194  +		runtime_lock(&runtime_sched.lock);
  1195   		gp = globrunqget(m->p, 0);
  1196  -		runtime_unlock(&runtime_sched);
  1197  +		runtime_unlock(&runtime_sched.lock);
  1198   		if(gp)
  1199   			return gp;
  1200   	}
  1201  @@ -1603,19 +1603,19 @@
  1202   	}
  1203   stop:
  1204   	// return P and block
  1205  -	runtime_lock(&runtime_sched);
  1206  +	runtime_lock(&runtime_sched.lock);
  1207   	if(runtime_sched.gcwaiting) {
  1208  -		runtime_unlock(&runtime_sched);
  1209  +		runtime_unlock(&runtime_sched.lock);
  1210   		goto top;
  1211   	}
  1212   	if(runtime_sched.runqsize) {
  1213   		gp = globrunqget(m->p, 0);
  1214  -		runtime_unlock(&runtime_sched);
  1215  +		runtime_unlock(&runtime_sched.lock);
  1216   		return gp;
  1217   	}
  1218   	p = releasep();
  1219   	pidleput(p);
  1220  -	runtime_unlock(&runtime_sched);
  1221  +	runtime_unlock(&runtime_sched.lock);
  1222   	if(m->spinning) {
  1223   		m->spinning = false;
  1224   		runtime_xadd(&runtime_sched.nmspinning, -1);
  1225  @@ -1624,9 +1624,9 @@
  1226   	for(i = 0; i < runtime_gomaxprocs; i++) {
  1227   		p = runtime_allp[i];
  1228   		if(p && p->runqhead != p->runqtail) {
  1229  -			runtime_lock(&runtime_sched);
  1230  +			runtime_lock(&runtime_sched.lock);
  1231   			p = pidleget();
  1232  -			runtime_unlock(&runtime_sched);
  1233  +			runtime_unlock(&runtime_sched.lock);
  1234   			if(p) {
  1235   				acquirep(p);
  1236   				goto top;
  1237  @@ -1643,9 +1643,9 @@
  1238   		gp = runtime_netpoll(true);  // block until new work is available
  1239   		runtime_atomicstore64(&runtime_sched.lastpoll, runtime_nanotime());
  1240   		if(gp) {
  1241  -			runtime_lock(&runtime_sched);
  1242  +			runtime_lock(&runtime_sched.lock);
  1243   			p = pidleget();
  1244  -			runtime_unlock(&runtime_sched);
  1245  +			runtime_unlock(&runtime_sched.lock);
  1246   			if(p) {
  1247   				acquirep(p);
  1248   				injectglist(gp->schedlink);
  1249  @@ -1688,14 +1688,14 @@
  1250   
  1251   	if(glist == nil)
  1252   		return;
  1253  -	runtime_lock(&runtime_sched);
  1254  +	runtime_lock(&runtime_sched.lock);
  1255   	for(n = 0; glist; n++) {
  1256   		gp = glist;
  1257   		glist = gp->schedlink;
  1258   		gp->status = Grunnable;
  1259   		globrunqput(gp);
  1260   	}
  1261  -	runtime_unlock(&runtime_sched);
  1262  +	runtime_unlock(&runtime_sched.lock);
  1263   
  1264   	for(; n && runtime_sched.npidle; n--)
  1265   		startm(nil, false);
  1266  @@ -1726,9 +1726,9 @@
  1267   	// This is a fancy way to say tick%61==0,
  1268   	// it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
  1269   	if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime_sched.runqsize > 0) {
  1270  -		runtime_lock(&runtime_sched);
  1271  +		runtime_lock(&runtime_sched.lock);
  1272   		gp = globrunqget(m->p, 1);
  1273  -		runtime_unlock(&runtime_sched);
  1274  +		runtime_unlock(&runtime_sched.lock);
  1275   		if(gp)
  1276   			resetspinning();
  1277   	}
  1278  @@ -1822,9 +1822,9 @@
  1279   	gp->status = Grunnable;
  1280   	gp->m = nil;
  1281   	m->curg = nil;
  1282  -	runtime_lock(&runtime_sched);
  1283  +	runtime_lock(&runtime_sched.lock);
  1284   	globrunqput(gp);
  1285  -	runtime_unlock(&runtime_sched);
  1286  +	runtime_unlock(&runtime_sched.lock);
  1287   	if(m->lockedg) {
  1288   		stoplockedm();
  1289   		execute(gp);  // Never returns.
  1290  @@ -1925,24 +1925,24 @@
  1291   	g->status = Gsyscall;
  1292   
  1293   	if(runtime_atomicload(&runtime_sched.sysmonwait)) {  // TODO: fast atomic
  1294  -		runtime_lock(&runtime_sched);
  1295  +		runtime_lock(&runtime_sched.lock);
  1296   		if(runtime_atomicload(&runtime_sched.sysmonwait)) {
  1297   			runtime_atomicstore(&runtime_sched.sysmonwait, 0);
  1298   			runtime_notewakeup(&runtime_sched.sysmonnote);
  1299   		}
  1300  -		runtime_unlock(&runtime_sched);
  1301  +		runtime_unlock(&runtime_sched.lock);
  1302   	}
  1303   
  1304   	m->mcache = nil;
  1305   	m->p->m = nil;
  1306   	runtime_atomicstore(&m->p->status, Psyscall);
  1307   	if(runtime_sched.gcwaiting) {
  1308  -		runtime_lock(&runtime_sched);
  1309  +		runtime_lock(&runtime_sched.lock);
  1310   		if (runtime_sched.stopwait > 0 && runtime_cas(&m->p->status, Psyscall, Pgcstop)) {
  1311   			if(--runtime_sched.stopwait == 0)
  1312   				runtime_notewakeup(&runtime_sched.stopnote);
  1313   		}
  1314  -		runtime_unlock(&runtime_sched);
  1315  +		runtime_unlock(&runtime_sched.lock);
  1316   	}
  1317   
  1318   	m->locks--;
  1319  @@ -2053,13 +2053,13 @@
  1320   	// Try to get any other idle P.
  1321   	m->p = nil;
  1322   	if(runtime_sched.pidle) {
  1323  -		runtime_lock(&runtime_sched);
  1324  +		runtime_lock(&runtime_sched.lock);
  1325   		p = pidleget();
  1326   		if(p && runtime_atomicload(&runtime_sched.sysmonwait)) {
  1327   			runtime_atomicstore(&runtime_sched.sysmonwait, 0);
  1328   			runtime_notewakeup(&runtime_sched.sysmonnote);
  1329   		}
  1330  -		runtime_unlock(&runtime_sched);
  1331  +		runtime_unlock(&runtime_sched.lock);
  1332   		if(p) {
  1333   			acquirep(p);
  1334   			return true;
  1335  @@ -2078,7 +2078,7 @@
  1336   	gp->status = Grunnable;
  1337   	gp->m = nil;
  1338   	m->curg = nil;
  1339  -	runtime_lock(&runtime_sched);
  1340  +	runtime_lock(&runtime_sched.lock);
  1341   	p = pidleget();
  1342   	if(p == nil)
  1343   		globrunqput(gp);
  1344  @@ -2086,7 +2086,7 @@
  1345   		runtime_atomicstore(&runtime_sched.sysmonwait, 0);
  1346   		runtime_notewakeup(&runtime_sched.sysmonnote);
  1347   	}
  1348  -	runtime_unlock(&runtime_sched);
  1349  +	runtime_unlock(&runtime_sched.lock);
  1350   	if(p) {
  1351   		acquirep(p);
  1352   		execute(gp);  // Never returns.
  1353  @@ -2365,13 +2365,13 @@
  1354   
  1355   	if(n > MaxGomaxprocs)
  1356   		n = MaxGomaxprocs;
  1357  -	runtime_lock(&runtime_sched);
  1358  +	runtime_lock(&runtime_sched.lock);
  1359   	ret = runtime_gomaxprocs;
  1360   	if(n <= 0 || n == ret) {
  1361  -		runtime_unlock(&runtime_sched);
  1362  +		runtime_unlock(&runtime_sched.lock);
  1363   		return ret;
  1364   	}
  1365  -	runtime_unlock(&runtime_sched);
  1366  +	runtime_unlock(&runtime_sched.lock);
  1367   
  1368   	runtime_semacquire(&runtime_worldsema, false);
  1369   	m->gcing = 1;
  1370  @@ -2476,7 +2476,7 @@
  1371   }
  1372   
  1373   static struct {
  1374  -	Lock;
  1375  +	Lock lock;
  1376   	void (*fn)(uintptr*, int32);
  1377   	int32 hz;
  1378   	uintptr pcbuf[TracebackMaxFrames];
  1379  @@ -2508,9 +2508,9 @@
  1380   	if(mp->mcache == nil)
  1381   		traceback = false;
  1382   
  1383  -	runtime_lock(&prof);
  1384  +	runtime_lock(&prof.lock);
  1385   	if(prof.fn == nil) {
  1386  -		runtime_unlock(&prof);
  1387  +		runtime_unlock(&prof.lock);
  1388   		mp->mallocing--;
  1389   		return;
  1390   	}
  1391  @@ -2538,7 +2538,7 @@
  1392   			prof.pcbuf[1] = (uintptr)System;
  1393   	}
  1394   	prof.fn(prof.pcbuf, n);
  1395  -	runtime_unlock(&prof);
  1396  +	runtime_unlock(&prof.lock);
  1397   	mp->mallocing--;
  1398   }
  1399   
  1400  @@ -2563,13 +2563,13 @@
  1401   	// it would deadlock.
  1402   	runtime_resetcpuprofiler(0);
  1403   
  1404  -	runtime_lock(&prof);
  1405  +	runtime_lock(&prof.lock);
  1406   	prof.fn = fn;
  1407   	prof.hz = hz;
  1408  -	runtime_unlock(&prof);
  1409  -	runtime_lock(&runtime_sched);
  1410  +	runtime_unlock(&prof.lock);
  1411  +	runtime_lock(&runtime_sched.lock);
  1412   	runtime_sched.profilehz = hz;
  1413  -	runtime_unlock(&runtime_sched);
  1414  +	runtime_unlock(&runtime_sched.lock);
  1415   
  1416   	if(hz != 0)
  1417   		runtime_resetcpuprofiler(hz);
  1418  @@ -2707,11 +2707,11 @@
  1419   static void
  1420   incidlelocked(int32 v)
  1421   {
  1422  -	runtime_lock(&runtime_sched);
  1423  +	runtime_lock(&runtime_sched.lock);
  1424   	runtime_sched.nmidlelocked += v;
  1425   	if(v > 0)
  1426   		checkdead();
  1427  -	runtime_unlock(&runtime_sched);
  1428  +	runtime_unlock(&runtime_sched.lock);
  1429   }
  1430   
  1431   // Check for deadlock situation.
  1432  @@ -2780,16 +2780,16 @@
  1433   		runtime_usleep(delay);
  1434   		if(runtime_debug.schedtrace <= 0 &&
  1435   			(runtime_sched.gcwaiting || runtime_atomicload(&runtime_sched.npidle) == (uint32)runtime_gomaxprocs)) {  // TODO: fast atomic
  1436  -			runtime_lock(&runtime_sched);
  1437  +			runtime_lock(&runtime_sched.lock);
  1438   			if(runtime_atomicload(&runtime_sched.gcwaiting) || runtime_atomicload(&runtime_sched.npidle) == (uint32)runtime_gomaxprocs) {
  1439   				runtime_atomicstore(&runtime_sched.sysmonwait, 1);
  1440  -				runtime_unlock(&runtime_sched);
  1441  +				runtime_unlock(&runtime_sched.lock);
  1442   				runtime_notesleep(&runtime_sched.sysmonnote);
  1443   				runtime_noteclear(&runtime_sched.sysmonnote);
  1444   				idle = 0;
  1445   				delay = 20;
  1446   			} else
  1447  -				runtime_unlock(&runtime_sched);
  1448  +				runtime_unlock(&runtime_sched.lock);
  1449   		}
  1450   		// poll network if not polled for more than 10ms
  1451   		lastpoll = runtime_atomicload64(&runtime_sched.lastpoll);
  1452  @@ -2918,7 +2918,7 @@
  1453   	if(starttime == 0)
  1454   		starttime = now;
  1455   
  1456  -	runtime_lock(&runtime_sched);
  1457  +	runtime_lock(&runtime_sched.lock);
  1458   	runtime_printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d idlethreads=%d runqueue=%d",
  1459   		(now-starttime)/1000000, runtime_gomaxprocs, runtime_sched.npidle, runtime_sched.mcount,
  1460   		runtime_sched.nmidle, runtime_sched.runqsize);
  1461  @@ -2954,7 +2954,7 @@
  1462   		}
  1463   	}
  1464   	if(!detailed) {
  1465  -		runtime_unlock(&runtime_sched);
  1466  +		runtime_unlock(&runtime_sched.lock);
  1467   		return;
  1468   	}
  1469   	for(mp = runtime_allm; mp; mp = mp->alllink) {
  1470  @@ -2986,7 +2986,7 @@
  1471   			lockedm ? lockedm->id : -1);
  1472   	}
  1473   	runtime_unlock(&allglock);
  1474  -	runtime_unlock(&runtime_sched);
  1475  +	runtime_unlock(&runtime_sched.lock);
  1476   }
  1477   
  1478   // Put mp on midle list.
  1479  @@ -3142,9 +3142,9 @@
  1480   	for(i=0; i<n; i++)
  1481   		batch[i]->schedlink = batch[i+1];
  1482   	// Now put the batch on global queue.
  1483  -	runtime_lock(&runtime_sched);
  1484  +	runtime_lock(&runtime_sched.lock);
  1485   	globrunqputbatch(batch[0], batch[n], n+1);
  1486  -	runtime_unlock(&runtime_sched);
  1487  +	runtime_unlock(&runtime_sched.lock);
  1488   	return true;
  1489   }
  1490   
  1491  @@ -3296,11 +3296,11 @@
  1492   {
  1493   	int32 out;
  1494   
  1495  -	runtime_lock(&runtime_sched);
  1496  +	runtime_lock(&runtime_sched.lock);
  1497   	out = runtime_sched.maxmcount;
  1498   	runtime_sched.maxmcount = in;
  1499   	checkmcount();
  1500  -	runtime_unlock(&runtime_sched);
  1501  +	runtime_unlock(&runtime_sched.lock);
  1502   	return out;
  1503   }
  1504   
  1505  diff -r bb70e852004f libgo/runtime/runtime.h
  1506  --- a/libgo/runtime/runtime.h	Fri Jan 16 13:28:21 2015 -0800
  1507  +++ b/libgo/runtime/runtime.h	Fri Apr 03 17:31:02 2015 -0700
  1508  @@ -285,7 +285,7 @@
  1509   
  1510   struct P
  1511   {
  1512  -	Lock;
  1513  +	Lock	lock;
  1514   
  1515   	int32	id;
  1516   	uint32	status;		// one of Pidle/Prunning/...
  1517  @@ -383,7 +383,7 @@
  1518   
  1519   struct	Timers
  1520   {
  1521  -	Lock;
  1522  +	Lock	lock;
  1523   	G	*timerproc;
  1524   	bool		sleeping;
  1525   	bool		rescheduling;
  1526  diff -r bb70e852004f libgo/runtime/sema.goc
  1527  --- a/libgo/runtime/sema.goc	Fri Jan 16 13:28:21 2015 -0800
  1528  +++ b/libgo/runtime/sema.goc	Fri Apr 03 17:31:02 2015 -0700
  1529  @@ -35,7 +35,7 @@
  1530   typedef struct SemaRoot SemaRoot;
  1531   struct SemaRoot
  1532   {
  1533  -	Lock;
  1534  +	Lock		lock;
  1535   	SemaWaiter*	head;
  1536   	SemaWaiter*	tail;
  1537   	// Number of waiters. Read w/o the lock.
  1538  @@ -47,7 +47,7 @@
  1539   
  1540   struct semtable
  1541   {
  1542  -	SemaRoot;
  1543  +	SemaRoot root;
  1544   	uint8 pad[CacheLineSize-sizeof(SemaRoot)];
  1545   };
  1546   static struct semtable semtable[SEMTABLESZ];
  1547  @@ -55,7 +55,7 @@
  1548   static SemaRoot*
  1549   semroot(uint32 volatile *addr)
  1550   {
  1551  -	return &semtable[((uintptr)addr >> 3) % SEMTABLESZ];
  1552  +	return &semtable[((uintptr)addr >> 3) % SEMTABLESZ].root;
  1553   }
  1554   
  1555   static void
  1556  @@ -124,19 +124,19 @@
  1557   	}
  1558   	for(;;) {
  1559   
  1560  -		runtime_lock(root);
  1561  +		runtime_lock(&root->lock);
  1562   		// Add ourselves to nwait to disable "easy case" in semrelease.
  1563   		runtime_xadd(&root->nwait, 1);
  1564   		// Check cansemacquire to avoid missed wakeup.
  1565   		if(cansemacquire(addr)) {
  1566   			runtime_xadd(&root->nwait, -1);
  1567  -			runtime_unlock(root);
  1568  +			runtime_unlock(&root->lock);
  1569   			return;
  1570   		}
  1571   		// Any semrelease after the cansemacquire knows we're waiting
  1572   		// (we set nwait above), so go to sleep.
  1573   		semqueue(root, addr, &s);
  1574  -		runtime_parkunlock(root, "semacquire");
  1575  +		runtime_parkunlock(&root->lock, "semacquire");
  1576   		if(cansemacquire(addr)) {
  1577   			if(t0)
  1578   				runtime_blockevent(s.releasetime - t0, 3);
  1579  @@ -161,11 +161,11 @@
  1580   		return;
  1581   
  1582   	// Harder case: search for a waiter and wake it.
  1583  -	runtime_lock(root);
  1584  +	runtime_lock(&root->lock);
  1585   	if(runtime_atomicload(&root->nwait) == 0) {
  1586   		// The count is already consumed by another goroutine,
  1587   		// so no need to wake up another goroutine.
  1588  -		runtime_unlock(root);
  1589  +		runtime_unlock(&root->lock);
  1590   		return;
  1591   	}
  1592   	for(s = root->head; s; s = s->next) {
  1593  @@ -175,7 +175,7 @@
  1594   			break;
  1595   		}
  1596   	}
  1597  -	runtime_unlock(root);
  1598  +	runtime_unlock(&root->lock);
  1599   	if(s) {
  1600   		if(s->releasetime)
  1601   			s->releasetime = runtime_cputicks();
  1602  @@ -211,7 +211,7 @@
  1603   typedef struct SyncSema SyncSema;
  1604   struct SyncSema
  1605   {
  1606  -	Lock;
  1607  +	Lock		lock;
  1608   	SemaWaiter*	head;
  1609   	SemaWaiter*	tail;
  1610   };
  1611  @@ -238,7 +238,7 @@
  1612   		w.releasetime = -1;
  1613   	}
  1614   
  1615  -	runtime_lock(s);
  1616  +	runtime_lock(&s->lock);
  1617   	if(s->head && s->head->nrelease > 0) {
  1618   		// have pending release, consume it
  1619   		wake = nil;
  1620  @@ -249,7 +249,7 @@
  1621   			if(s->head == nil)
  1622   				s->tail = nil;
  1623   		}
  1624  -		runtime_unlock(s);
  1625  +		runtime_unlock(&s->lock);
  1626   		if(wake)
  1627   			runtime_ready(wake->g);
  1628   	} else {
  1629  @@ -259,7 +259,7 @@
  1630   		else
  1631   			s->tail->next = &w;
  1632   		s->tail = &w;
  1633  -		runtime_parkunlock(s, "semacquire");
  1634  +		runtime_parkunlock(&s->lock, "semacquire");
  1635   		if(t0)
  1636   			runtime_blockevent(w.releasetime - t0, 2);
  1637   	}
  1638  @@ -274,7 +274,7 @@
  1639   	w.next = nil;
  1640   	w.releasetime = 0;
  1641   
  1642  -	runtime_lock(s);
  1643  +	runtime_lock(&s->lock);
  1644   	while(w.nrelease > 0 && s->head && s->head->nrelease < 0) {
  1645   		// have pending acquire, satisfy it
  1646   		wake = s->head;
  1647  @@ -293,7 +293,7 @@
  1648   		else
  1649   			s->tail->next = &w;
  1650   		s->tail = &w;
  1651  -		runtime_parkunlock(s, "semarelease");
  1652  +		runtime_parkunlock(&s->lock, "semarelease");
  1653   	} else
  1654  -		runtime_unlock(s);
  1655  +		runtime_unlock(&s->lock);
  1656   }
  1657  diff -r bb70e852004f libgo/runtime/sigqueue.goc
  1658  --- a/libgo/runtime/sigqueue.goc	Fri Jan 16 13:28:21 2015 -0800
  1659  +++ b/libgo/runtime/sigqueue.goc	Fri Apr 03 17:31:02 2015 -0700
  1660  @@ -32,7 +32,7 @@
  1661   #include "defs.h"
  1662   
  1663   static struct {
  1664  -	Note;
  1665  +	Note note;
  1666   	uint32 mask[(NSIG+31)/32];
  1667   	uint32 wanted[(NSIG+31)/32];
  1668   	uint32 state;
  1669  @@ -70,7 +70,7 @@
  1670   					new = HASSIGNAL;
  1671   				if(runtime_cas(&sig.state, old, new)) {
  1672   					if (old == HASWAITER)
  1673  -						runtime_notewakeup(&sig);
  1674  +						runtime_notewakeup(&sig.note);
  1675   					break;
  1676   				}
  1677   			}
  1678  @@ -107,8 +107,8 @@
  1679   				new = HASWAITER;
  1680   			if(runtime_cas(&sig.state, old, new)) {
  1681   				if (new == HASWAITER) {
  1682  -					runtime_notetsleepg(&sig, -1);
  1683  -					runtime_noteclear(&sig);
  1684  +					runtime_notetsleepg(&sig.note, -1);
  1685  +					runtime_noteclear(&sig.note);
  1686   				}
  1687   				break;
  1688   			}
  1689  @@ -138,7 +138,7 @@
  1690   		// to use for initialization.  It does not pass
  1691   		// signal information in m.
  1692   		sig.inuse = true;	// enable reception of signals; cannot disable
  1693  -		runtime_noteclear(&sig);
  1694  +		runtime_noteclear(&sig.note);
  1695   		return;
  1696   	}
  1697   	
  1698  diff -r bb70e852004f libgo/runtime/time.goc
  1699  --- a/libgo/runtime/time.goc	Fri Jan 16 13:28:21 2015 -0800
  1700  +++ b/libgo/runtime/time.goc	Fri Apr 03 17:31:02 2015 -0700
  1701  @@ -92,17 +92,17 @@
  1702   	t.fv = &readyv;
  1703   	t.arg.__object = g;
  1704   	t.seq = 0;
  1705  -	runtime_lock(&timers);
  1706  +	runtime_lock(&timers.lock);
  1707   	addtimer(&t);
  1708  -	runtime_parkunlock(&timers, reason);
  1709  +	runtime_parkunlock(&timers.lock, reason);
  1710   }
  1711   
  1712   void
  1713   runtime_addtimer(Timer *t)
  1714   {
  1715  -	runtime_lock(&timers);
  1716  +	runtime_lock(&timers.lock);
  1717   	addtimer(t);
  1718  -	runtime_unlock(&timers);
  1719  +	runtime_unlock(&timers.lock);
  1720   }
  1721   
  1722   // Add a timer to the heap and start or kick the timer proc
  1723  @@ -167,14 +167,14 @@
  1724   	i = t->i;
  1725   	gi = i;
  1726   
  1727  -	runtime_lock(&timers);
  1728  +	runtime_lock(&timers.lock);
  1729   
  1730   	// t may not be registered anymore and may have
  1731   	// a bogus i (typically 0, if generated by Go).
  1732   	// Verify it before proceeding.
  1733   	i = t->i;
  1734   	if(i < 0 || i >= timers.len || timers.t[i] != t) {
  1735  -		runtime_unlock(&timers);
  1736  +		runtime_unlock(&timers.lock);
  1737   		return false;
  1738   	}
  1739   
  1740  @@ -190,7 +190,7 @@
  1741   	}
  1742   	if(debug)
  1743   		dumptimers("deltimer");
  1744  -	runtime_unlock(&timers);
  1745  +	runtime_unlock(&timers.lock);
  1746   	return true;
  1747   }
  1748   
  1749  @@ -209,7 +209,7 @@
  1750   	uintptr seq;
  1751   
  1752   	for(;;) {
  1753  -		runtime_lock(&timers);
  1754  +		runtime_lock(&timers.lock);
  1755   		timers.sleeping = false;
  1756   		now = runtime_nanotime();
  1757   		for(;;) {
  1758  @@ -236,7 +236,7 @@
  1759   			f = (void*)t->fv->fn;
  1760   			arg = t->arg;
  1761   			seq = t->seq;
  1762  -			runtime_unlock(&timers);
  1763  +			runtime_unlock(&timers.lock);
  1764   			__builtin_call_with_static_chain(f(arg, seq), fv);
  1765   
  1766   			// clear f and arg to avoid leak while sleeping for next timer
  1767  @@ -246,20 +246,20 @@
  1768   			arg.__object = nil;
  1769   			USED(&arg);
  1770   
  1771  -			runtime_lock(&timers);
  1772  +			runtime_lock(&timers.lock);
  1773   		}
  1774   		if(delta < 0) {
  1775   			// No timers left - put goroutine to sleep.
  1776   			timers.rescheduling = true;
  1777   			runtime_g()->isbackground = true;
  1778  -			runtime_parkunlock(&timers, "timer goroutine (idle)");
  1779  +			runtime_parkunlock(&timers.lock, "timer goroutine (idle)");
  1780   			runtime_g()->isbackground = false;
  1781   			continue;
  1782   		}
  1783   		// At least one timer pending.  Sleep until then.
  1784   		timers.sleeping = true;
  1785   		runtime_noteclear(&timers.waitnote);
  1786  -		runtime_unlock(&timers);
  1787  +		runtime_unlock(&timers.lock);
  1788   		runtime_notetsleepg(&timers.waitnote, delta);
  1789   	}
  1790   }