github.com/rohankumardubey/syslog-redirector-golang@v0.0.0-20140320174030-4859f03d829a/src/pkg/runtime/race.c (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Implementation of the race detector API.
     6  // +build race
     7  
     8  #include "runtime.h"
     9  #include "arch_GOARCH.h"
    10  #include "malloc.h"
    11  #include "race.h"
    12  #include "../../cmd/ld/textflag.h"
    13  
    14  void runtime∕race·Initialize(uintptr *racectx);
    15  void runtime∕race·MapShadow(void *addr, uintptr size);
    16  void runtime∕race·Finalize(void);
    17  void runtime∕race·FinalizerGoroutine(uintptr racectx);
    18  void runtime∕race·Read(uintptr racectx, void *addr, void *pc);
    19  void runtime∕race·Write(uintptr racectx, void *addr, void *pc);
    20  void runtime∕race·ReadRange(uintptr racectx, void *addr, uintptr sz, void *pc);
    21  void runtime∕race·WriteRange(uintptr racectx, void *addr, uintptr sz, void *pc);
    22  void runtime∕race·FuncEnter(uintptr racectx, void *pc);
    23  void runtime∕race·FuncExit(uintptr racectx);
    24  void runtime∕race·Malloc(uintptr racectx, void *p, uintptr sz, void *pc);
    25  void runtime∕race·Free(void *p);
    26  void runtime∕race·GoStart(uintptr racectx, uintptr *chracectx, void *pc);
    27  void runtime∕race·GoEnd(uintptr racectx);
    28  void runtime∕race·Acquire(uintptr racectx, void *addr);
    29  void runtime∕race·Release(uintptr racectx, void *addr);
    30  void runtime∕race·ReleaseMerge(uintptr racectx, void *addr);
    31  
    32  extern byte noptrdata[];
    33  extern byte enoptrbss[];
    34  
    35  static bool onstack(uintptr argp);
    36  
    37  // We set m->racecall around all calls into race library to trigger fast path in cgocall.
    38  // Also we increment m->locks to disable preemption and potential rescheduling
    39  // to ensure that we reset m->racecall on the correct m.
    40  
    41  uintptr
    42  runtime·raceinit(void)
    43  {
    44  	uintptr racectx, start, size;
    45  
    46  	m->racecall = true;
    47  	m->locks++;
    48  	runtime∕race·Initialize(&racectx);
    49  	// Round data segment to page boundaries, because it's used in mmap().
    50  	start = (uintptr)noptrdata & ~(PageSize-1);
    51  	size = ROUND((uintptr)enoptrbss - start, PageSize);
    52  	runtime∕race·MapShadow((void*)start, size);
    53  	m->locks--;
    54  	m->racecall = false;
    55  	return racectx;
    56  }
    57  
    58  void
    59  runtime·racefini(void)
    60  {
    61  	m->racecall = true;
    62  	m->locks++;
    63  	runtime∕race·Finalize();
    64  	m->locks--;
    65  	m->racecall = false;
    66  }
    67  
    68  void
    69  runtime·racemapshadow(void *addr, uintptr size)
    70  {
    71  	m->racecall = true;
    72  	m->locks++;
    73  	runtime∕race·MapShadow(addr, size);
    74  	m->locks--;
    75  	m->racecall = false;
    76  }
    77  
    78  // Called from instrumented code.
    79  // If we split stack, getcallerpc() can return runtime·lessstack().
    80  #pragma textflag NOSPLIT
    81  void
    82  runtime·racewrite(uintptr addr)
    83  {
    84  	if(!onstack(addr)) {
    85  		m->racecall = true;
    86  		m->locks++;
    87  		runtime∕race·Write(g->racectx, (void*)addr, runtime·getcallerpc(&addr));
    88  		m->locks--;
    89  		m->racecall = false;
    90  	}
    91  }
    92  
    93  #pragma textflag NOSPLIT
    94  void
    95  runtime·racewriterange(uintptr addr, uintptr sz)
    96  {
    97  	if(!onstack(addr)) {
    98  		m->racecall = true;
    99  		m->locks++;
   100  		runtime∕race·WriteRange(g->racectx, (void*)addr, sz, runtime·getcallerpc(&addr));
   101  		m->locks--;
   102  		m->racecall = false;
   103  	}
   104  }
   105  
   106  // Called from instrumented code.
   107  // If we split stack, getcallerpc() can return runtime·lessstack().
   108  #pragma textflag NOSPLIT
   109  void
   110  runtime·raceread(uintptr addr)
   111  {
   112  	if(!onstack(addr)) {
   113  		m->racecall = true;
   114  		m->locks++;
   115  		runtime∕race·Read(g->racectx, (void*)addr, runtime·getcallerpc(&addr));
   116  		m->locks--;
   117  		m->racecall = false;
   118  	}
   119  }
   120  
   121  #pragma textflag NOSPLIT
   122  void
   123  runtime·racereadrange(uintptr addr, uintptr sz)
   124  {
   125  	if(!onstack(addr)) {
   126  		m->racecall = true;
   127  		m->locks++;
   128  		runtime∕race·ReadRange(g->racectx, (void*)addr, sz, runtime·getcallerpc(&addr));
   129  		m->locks--;
   130  		m->racecall = false;
   131  	}
   132  }
   133  
   134  // Called from runtime·racefuncenter (assembly).
   135  #pragma textflag NOSPLIT
   136  void
   137  runtime·racefuncenter1(uintptr pc)
   138  {
   139  	// If the caller PC is lessstack, use slower runtime·callers
   140  	// to walk across the stack split to find the real caller.
   141  	if(pc == (uintptr)runtime·lessstack)
   142  		runtime·callers(2, &pc, 1);
   143  
   144  	m->racecall = true;
   145  	m->locks++;
   146  	runtime∕race·FuncEnter(g->racectx, (void*)pc);
   147  	m->locks--;
   148  	m->racecall = false;
   149  }
   150  
   151  // Called from instrumented code.
   152  #pragma textflag NOSPLIT
   153  void
   154  runtime·racefuncexit(void)
   155  {
   156  	m->racecall = true;
   157  	m->locks++;
   158  	runtime∕race·FuncExit(g->racectx);
   159  	m->locks--;
   160  	m->racecall = false;
   161  }
   162  
   163  void
   164  runtime·racemalloc(void *p, uintptr sz)
   165  {
   166  	// use m->curg because runtime·stackalloc() is called from g0
   167  	if(m->curg == nil)
   168  		return;
   169  	m->racecall = true;
   170  	m->locks++;
   171  	runtime∕race·Malloc(m->curg->racectx, p, sz, /* unused pc */ 0);
   172  	m->locks--;
   173  	m->racecall = false;
   174  }
   175  
   176  void
   177  runtime·racefree(void *p)
   178  {
   179  	m->racecall = true;
   180  	m->locks++;
   181  	runtime∕race·Free(p);
   182  	m->locks--;
   183  	m->racecall = false;
   184  }
   185  
   186  uintptr
   187  runtime·racegostart(void *pc)
   188  {
   189  	uintptr racectx;
   190  
   191  	m->racecall = true;
   192  	m->locks++;
   193  	runtime∕race·GoStart(g->racectx, &racectx, pc);
   194  	m->locks--;
   195  	m->racecall = false;
   196  	return racectx;
   197  }
   198  
   199  void
   200  runtime·racegoend(void)
   201  {
   202  	m->racecall = true;
   203  	m->locks++;
   204  	runtime∕race·GoEnd(g->racectx);
   205  	m->locks--;
   206  	m->racecall = false;
   207  }
   208  
   209  static void
   210  memoryaccess(void *addr, uintptr callpc, uintptr pc, bool write)
   211  {
   212  	uintptr racectx;
   213  
   214  	if(!onstack((uintptr)addr)) {
   215  		m->racecall = true;
   216  		m->locks++;
   217  		racectx = g->racectx;
   218  		if(callpc) {
   219  			if(callpc == (uintptr)runtime·lessstack)
   220  				runtime·callers(3, &callpc, 1);
   221  			runtime∕race·FuncEnter(racectx, (void*)callpc);
   222  		}
   223  		if(write)
   224  			runtime∕race·Write(racectx, addr, (void*)pc);
   225  		else
   226  			runtime∕race·Read(racectx, addr, (void*)pc);
   227  		if(callpc)
   228  			runtime∕race·FuncExit(racectx);
   229  		m->locks--;
   230  		m->racecall = false;
   231  	}
   232  }
   233  
   234  void
   235  runtime·racewritepc(void *addr, void *callpc, void *pc)
   236  {
   237  	memoryaccess(addr, (uintptr)callpc, (uintptr)pc, true);
   238  }
   239  
   240  void
   241  runtime·racereadpc(void *addr, void *callpc, void *pc)
   242  {
   243  	memoryaccess(addr, (uintptr)callpc, (uintptr)pc, false);
   244  }
   245  
   246  static void
   247  rangeaccess(void *addr, uintptr size, uintptr callpc, uintptr pc, bool write)
   248  {
   249  	uintptr racectx;
   250  
   251  	if(!onstack((uintptr)addr)) {
   252  		m->racecall = true;
   253  		m->locks++;
   254  		racectx = g->racectx;
   255  		if(callpc) {
   256  			if(callpc == (uintptr)runtime·lessstack)
   257  				runtime·callers(3, &callpc, 1);
   258  			runtime∕race·FuncEnter(racectx, (void*)callpc);
   259  		}
   260  		if(write)
   261  			runtime∕race·WriteRange(racectx, addr, size, (void*)pc);
   262  		else
   263  			runtime∕race·ReadRange(racectx, addr, size, (void*)pc);
   264  		if(callpc)
   265  			runtime∕race·FuncExit(racectx);
   266  		m->locks--;
   267  		m->racecall = false;
   268  	}
   269  }
   270  
   271  void
   272  runtime·racewriterangepc(void *addr, uintptr sz, void *callpc, void *pc)
   273  {
   274  	rangeaccess(addr, sz, (uintptr)callpc, (uintptr)pc, true);
   275  }
   276  
   277  void
   278  runtime·racereadrangepc(void *addr, uintptr sz, void *callpc, void *pc)
   279  {
   280  	rangeaccess(addr, sz, (uintptr)callpc, (uintptr)pc, false);
   281  }
   282  
   283  void
   284  runtime·raceacquire(void *addr)
   285  {
   286  	runtime·raceacquireg(g, addr);
   287  }
   288  
   289  void
   290  runtime·raceacquireg(G *gp, void *addr)
   291  {
   292  	if(g->raceignore)
   293  		return;
   294  	m->racecall = true;
   295  	m->locks++;
   296  	runtime∕race·Acquire(gp->racectx, addr);
   297  	m->locks--;
   298  	m->racecall = false;
   299  }
   300  
   301  void
   302  runtime·racerelease(void *addr)
   303  {
   304  	runtime·racereleaseg(g, addr);
   305  }
   306  
   307  void
   308  runtime·racereleaseg(G *gp, void *addr)
   309  {
   310  	if(g->raceignore)
   311  		return;
   312  	m->racecall = true;
   313  	m->locks++;
   314  	runtime∕race·Release(gp->racectx, addr);
   315  	m->locks--;
   316  	m->racecall = false;
   317  }
   318  
   319  void
   320  runtime·racereleasemerge(void *addr)
   321  {
   322  	runtime·racereleasemergeg(g, addr);
   323  }
   324  
   325  void
   326  runtime·racereleasemergeg(G *gp, void *addr)
   327  {
   328  	if(g->raceignore)
   329  		return;
   330  	m->racecall = true;
   331  	m->locks++;
   332  	runtime∕race·ReleaseMerge(gp->racectx, addr);
   333  	m->locks--;
   334  	m->racecall = false;
   335  }
   336  
   337  void
   338  runtime·racefingo(void)
   339  {
   340  	m->racecall = true;
   341  	m->locks++;
   342  	runtime∕race·FinalizerGoroutine(g->racectx);
   343  	m->locks--;
   344  	m->racecall = false;
   345  }
   346  
   347  // func RaceAcquire(addr unsafe.Pointer)
   348  void
   349  runtime·RaceAcquire(void *addr)
   350  {
   351  	runtime·raceacquire(addr);
   352  }
   353  
   354  // func RaceRelease(addr unsafe.Pointer)
   355  void
   356  runtime·RaceRelease(void *addr)
   357  {
   358  	runtime·racerelease(addr);
   359  }
   360  
   361  // func RaceReleaseMerge(addr unsafe.Pointer)
   362  void
   363  runtime·RaceReleaseMerge(void *addr)
   364  {
   365  	runtime·racereleasemerge(addr);
   366  }
   367  
   368  // func RaceSemacquire(s *uint32)
   369  void
   370  runtime·RaceSemacquire(uint32 *s)
   371  {
   372  	runtime·semacquire(s, false);
   373  }
   374  
   375  // func RaceSemrelease(s *uint32)
   376  void
   377  runtime·RaceSemrelease(uint32 *s)
   378  {
   379  	runtime·semrelease(s);
   380  }
   381  
   382  // func RaceRead(addr unsafe.Pointer)
   383  #pragma textflag NOSPLIT
   384  void
   385  runtime·RaceRead(void *addr)
   386  {
   387  	memoryaccess(addr, 0, (uintptr)runtime·getcallerpc(&addr), false);
   388  }
   389  
   390  // func RaceWrite(addr unsafe.Pointer)
   391  #pragma textflag NOSPLIT
   392  void
   393  runtime·RaceWrite(void *addr)
   394  {
   395  	memoryaccess(addr, 0, (uintptr)runtime·getcallerpc(&addr), true);
   396  }
   397  
   398  // func RaceReadRange(addr unsafe.Pointer, len int)
   399  #pragma textflag NOSPLIT
   400  void
   401  runtime·RaceReadRange(void *addr, intgo len)
   402  {
   403  	rangeaccess(addr, len, 0, (uintptr)runtime·getcallerpc(&addr), false);
   404  }
   405  
   406  // func RaceWriteRange(addr unsafe.Pointer, len int)
   407  #pragma textflag NOSPLIT
   408  void
   409  runtime·RaceWriteRange(void *addr, intgo len)
   410  {
   411  	rangeaccess(addr, len, 0, (uintptr)runtime·getcallerpc(&addr), true);
   412  }
   413  
   414  // func RaceDisable()
   415  void
   416  runtime·RaceDisable(void)
   417  {
   418  	g->raceignore++;
   419  }
   420  
   421  // func RaceEnable()
   422  void
   423  runtime·RaceEnable(void)
   424  {
   425  	g->raceignore--;
   426  }
   427  
   428  static bool
   429  onstack(uintptr argp)
   430  {
   431  	// noptrdata, data, bss, noptrbss
   432  	// the layout is in ../../cmd/ld/data.c
   433  	if((byte*)argp >= noptrdata && (byte*)argp < enoptrbss)
   434  		return false;
   435  	if((byte*)argp >= runtime·mheap.arena_start && (byte*)argp < runtime·mheap.arena_used)
   436  		return false;
   437  	return true;
   438  }