github.com/fenixara/go@v0.0.0-20170127160404-96ea0918e670/src/runtime/race.go (about)

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build race
     6  
     7  // Public race detection API, present iff build with -race.
     8  
     9  package runtime
    10  
    11  import (
    12  	"unsafe"
    13  )
    14  
    15  func RaceRead(addr unsafe.Pointer)
    16  func RaceWrite(addr unsafe.Pointer)
    17  func RaceReadRange(addr unsafe.Pointer, len int)
    18  func RaceWriteRange(addr unsafe.Pointer, len int)
    19  
    20  func RaceSemacquire(s *uint32)
    21  func RaceSemrelease(s *uint32)
    22  
    23  func RaceErrors() int {
    24  	var n uint64
    25  	racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
    26  	return int(n)
    27  }
    28  
    29  // private interface for the runtime
    30  const raceenabled = true
    31  
    32  // For all functions accepting callerpc and pc,
    33  // callerpc is a return PC of the function that calls this function,
    34  // pc is start PC of the function that calls this function.
    35  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    36  	kind := t.kind & kindMask
    37  	if kind == kindArray || kind == kindStruct {
    38  		// for composite objects we have to read every address
    39  		// because a write might happen to any subobject.
    40  		racereadrangepc(addr, t.size, callerpc, pc)
    41  	} else {
    42  		// for non-composite objects we can read just the start
    43  		// address, as any write must write the first byte.
    44  		racereadpc(addr, callerpc, pc)
    45  	}
    46  }
    47  
    48  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    49  	kind := t.kind & kindMask
    50  	if kind == kindArray || kind == kindStruct {
    51  		// for composite objects we have to write every address
    52  		// because a write might happen to any subobject.
    53  		racewriterangepc(addr, t.size, callerpc, pc)
    54  	} else {
    55  		// for non-composite objects we can write just the start
    56  		// address, as any write must write the first byte.
    57  		racewritepc(addr, callerpc, pc)
    58  	}
    59  }
    60  
    61  //go:noescape
    62  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
    63  
    64  //go:noescape
    65  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
    66  
    67  type symbolizeCodeContext struct {
    68  	pc   uintptr
    69  	fn   *byte
    70  	file *byte
    71  	line uintptr
    72  	off  uintptr
    73  	res  uintptr
    74  }
    75  
    76  var qq = [...]byte{'?', '?', 0}
    77  var dash = [...]byte{'-', 0}
    78  
    79  const (
    80  	raceGetProcCmd = iota
    81  	raceSymbolizeCodeCmd
    82  	raceSymbolizeDataCmd
    83  )
    84  
    85  // Callback from C into Go, runs on g0.
    86  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
    87  	switch cmd {
    88  	case raceGetProcCmd:
    89  		throw("should have been handled by racecallbackthunk")
    90  	case raceSymbolizeCodeCmd:
    91  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
    92  	case raceSymbolizeDataCmd:
    93  		raceSymbolizeData((*symbolizeDataContext)(ctx))
    94  	default:
    95  		throw("unknown command")
    96  	}
    97  }
    98  
    99  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
   100  	f := FuncForPC(ctx.pc)
   101  	if f != nil {
   102  		file, line := f.FileLine(ctx.pc)
   103  		if line != 0 {
   104  			ctx.fn = cfuncname(f.raw())
   105  			ctx.line = uintptr(line)
   106  			ctx.file = &bytes(file)[0] // assume NUL-terminated
   107  			ctx.off = ctx.pc - f.Entry()
   108  			ctx.res = 1
   109  			return
   110  		}
   111  	}
   112  	ctx.fn = &qq[0]
   113  	ctx.file = &dash[0]
   114  	ctx.line = 0
   115  	ctx.off = ctx.pc
   116  	ctx.res = 1
   117  }
   118  
   119  type symbolizeDataContext struct {
   120  	addr  uintptr
   121  	heap  uintptr
   122  	start uintptr
   123  	size  uintptr
   124  	name  *byte
   125  	file  *byte
   126  	line  uintptr
   127  	res   uintptr
   128  }
   129  
   130  func raceSymbolizeData(ctx *symbolizeDataContext) {
   131  	if _, x, n := findObject(unsafe.Pointer(ctx.addr)); x != nil {
   132  		ctx.heap = 1
   133  		ctx.start = uintptr(x)
   134  		ctx.size = n
   135  		ctx.res = 1
   136  	}
   137  }
   138  
   139  // Race runtime functions called via runtime·racecall.
   140  //go:linkname __tsan_init __tsan_init
   141  var __tsan_init byte
   142  
   143  //go:linkname __tsan_fini __tsan_fini
   144  var __tsan_fini byte
   145  
   146  //go:linkname __tsan_proc_create __tsan_proc_create
   147  var __tsan_proc_create byte
   148  
   149  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   150  var __tsan_proc_destroy byte
   151  
   152  //go:linkname __tsan_map_shadow __tsan_map_shadow
   153  var __tsan_map_shadow byte
   154  
   155  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   156  var __tsan_finalizer_goroutine byte
   157  
   158  //go:linkname __tsan_go_start __tsan_go_start
   159  var __tsan_go_start byte
   160  
   161  //go:linkname __tsan_go_end __tsan_go_end
   162  var __tsan_go_end byte
   163  
   164  //go:linkname __tsan_malloc __tsan_malloc
   165  var __tsan_malloc byte
   166  
   167  //go:linkname __tsan_free __tsan_free
   168  var __tsan_free byte
   169  
   170  //go:linkname __tsan_acquire __tsan_acquire
   171  var __tsan_acquire byte
   172  
   173  //go:linkname __tsan_release __tsan_release
   174  var __tsan_release byte
   175  
   176  //go:linkname __tsan_release_merge __tsan_release_merge
   177  var __tsan_release_merge byte
   178  
   179  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   180  var __tsan_go_ignore_sync_begin byte
   181  
   182  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   183  var __tsan_go_ignore_sync_end byte
   184  
   185  //go:linkname __tsan_report_count __tsan_report_count
   186  var __tsan_report_count byte
   187  
   188  // Mimic what cmd/cgo would do.
   189  //go:cgo_import_static __tsan_init
   190  //go:cgo_import_static __tsan_fini
   191  //go:cgo_import_static __tsan_proc_create
   192  //go:cgo_import_static __tsan_proc_destroy
   193  //go:cgo_import_static __tsan_map_shadow
   194  //go:cgo_import_static __tsan_finalizer_goroutine
   195  //go:cgo_import_static __tsan_go_start
   196  //go:cgo_import_static __tsan_go_end
   197  //go:cgo_import_static __tsan_malloc
   198  //go:cgo_import_static __tsan_free
   199  //go:cgo_import_static __tsan_acquire
   200  //go:cgo_import_static __tsan_release
   201  //go:cgo_import_static __tsan_release_merge
   202  //go:cgo_import_static __tsan_go_ignore_sync_begin
   203  //go:cgo_import_static __tsan_go_ignore_sync_end
   204  //go:cgo_import_static __tsan_report_count
   205  
   206  // These are called from race_amd64.s.
   207  //go:cgo_import_static __tsan_read
   208  //go:cgo_import_static __tsan_read_pc
   209  //go:cgo_import_static __tsan_read_range
   210  //go:cgo_import_static __tsan_write
   211  //go:cgo_import_static __tsan_write_pc
   212  //go:cgo_import_static __tsan_write_range
   213  //go:cgo_import_static __tsan_func_enter
   214  //go:cgo_import_static __tsan_func_exit
   215  
   216  //go:cgo_import_static __tsan_go_atomic32_load
   217  //go:cgo_import_static __tsan_go_atomic64_load
   218  //go:cgo_import_static __tsan_go_atomic32_store
   219  //go:cgo_import_static __tsan_go_atomic64_store
   220  //go:cgo_import_static __tsan_go_atomic32_exchange
   221  //go:cgo_import_static __tsan_go_atomic64_exchange
   222  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   223  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   224  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   225  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   226  
   227  // start/end of global data (data+bss).
   228  var racedatastart uintptr
   229  var racedataend uintptr
   230  
   231  // start/end of heap for race_amd64.s
   232  var racearenastart uintptr
   233  var racearenaend uintptr
   234  
   235  func racefuncenter(uintptr)
   236  func racefuncexit()
   237  func racereadrangepc1(uintptr, uintptr, uintptr)
   238  func racewriterangepc1(uintptr, uintptr, uintptr)
   239  func racecallbackthunk(uintptr)
   240  
   241  // racecall allows calling an arbitrary function f from C race runtime
   242  // with up to 4 uintptr arguments.
   243  func racecall(*byte, uintptr, uintptr, uintptr, uintptr)
   244  
   245  // checks if the address has shadow (i.e. heap or data/bss)
   246  //go:nosplit
   247  func isvalidaddr(addr unsafe.Pointer) bool {
   248  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   249  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   250  }
   251  
   252  //go:nosplit
   253  func raceinit() (gctx, pctx uintptr) {
   254  	// cgo is required to initialize libc, which is used by race runtime
   255  	if !iscgo {
   256  		throw("raceinit: race build must use cgo")
   257  	}
   258  
   259  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), funcPC(racecallbackthunk), 0)
   260  
   261  	// Round data segment to page boundaries, because it's used in mmap().
   262  	start := ^uintptr(0)
   263  	end := uintptr(0)
   264  	if start > firstmoduledata.noptrdata {
   265  		start = firstmoduledata.noptrdata
   266  	}
   267  	if start > firstmoduledata.data {
   268  		start = firstmoduledata.data
   269  	}
   270  	if start > firstmoduledata.noptrbss {
   271  		start = firstmoduledata.noptrbss
   272  	}
   273  	if start > firstmoduledata.bss {
   274  		start = firstmoduledata.bss
   275  	}
   276  	if end < firstmoduledata.enoptrdata {
   277  		end = firstmoduledata.enoptrdata
   278  	}
   279  	if end < firstmoduledata.edata {
   280  		end = firstmoduledata.edata
   281  	}
   282  	if end < firstmoduledata.enoptrbss {
   283  		end = firstmoduledata.enoptrbss
   284  	}
   285  	if end < firstmoduledata.ebss {
   286  		end = firstmoduledata.ebss
   287  	}
   288  	size := round(end-start, _PageSize)
   289  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   290  	racedatastart = start
   291  	racedataend = start + size
   292  
   293  	return
   294  }
   295  
   296  var raceFiniLock mutex
   297  
   298  //go:nosplit
   299  func racefini() {
   300  	// racefini() can only be called once to avoid races.
   301  	// This eventually (via __tsan_fini) calls C.exit which has
   302  	// undefined behavior if called more than once. If the lock is
   303  	// already held it's assumed that the first caller exits the program
   304  	// so other calls can hang forever without an issue.
   305  	lock(&raceFiniLock)
   306  	racecall(&__tsan_fini, 0, 0, 0, 0)
   307  }
   308  
   309  //go:nosplit
   310  func raceproccreate() uintptr {
   311  	var ctx uintptr
   312  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   313  	return ctx
   314  }
   315  
   316  //go:nosplit
   317  func raceprocdestroy(ctx uintptr) {
   318  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   319  }
   320  
   321  //go:nosplit
   322  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   323  	if racearenastart == 0 {
   324  		racearenastart = uintptr(addr)
   325  	}
   326  	if racearenaend < uintptr(addr)+size {
   327  		racearenaend = uintptr(addr) + size
   328  	}
   329  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   330  }
   331  
   332  //go:nosplit
   333  func racemalloc(p unsafe.Pointer, sz uintptr) {
   334  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   335  }
   336  
   337  //go:nosplit
   338  func racefree(p unsafe.Pointer, sz uintptr) {
   339  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   340  }
   341  
   342  //go:nosplit
   343  func racegostart(pc uintptr) uintptr {
   344  	_g_ := getg()
   345  	var spawng *g
   346  	if _g_.m.curg != nil {
   347  		spawng = _g_.m.curg
   348  	} else {
   349  		spawng = _g_
   350  	}
   351  
   352  	var racectx uintptr
   353  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   354  	return racectx
   355  }
   356  
   357  //go:nosplit
   358  func racegoend() {
   359  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   360  }
   361  
   362  //go:nosplit
   363  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   364  	_g_ := getg()
   365  	if _g_ != _g_.m.curg {
   366  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   367  		// Not interesting.
   368  		return
   369  	}
   370  	if callpc != 0 {
   371  		racefuncenter(callpc)
   372  	}
   373  	racewriterangepc1(uintptr(addr), sz, pc)
   374  	if callpc != 0 {
   375  		racefuncexit()
   376  	}
   377  }
   378  
   379  //go:nosplit
   380  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   381  	_g_ := getg()
   382  	if _g_ != _g_.m.curg {
   383  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   384  		// Not interesting.
   385  		return
   386  	}
   387  	if callpc != 0 {
   388  		racefuncenter(callpc)
   389  	}
   390  	racereadrangepc1(uintptr(addr), sz, pc)
   391  	if callpc != 0 {
   392  		racefuncexit()
   393  	}
   394  }
   395  
   396  //go:nosplit
   397  func raceacquire(addr unsafe.Pointer) {
   398  	raceacquireg(getg(), addr)
   399  }
   400  
   401  //go:nosplit
   402  func raceacquireg(gp *g, addr unsafe.Pointer) {
   403  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   404  		return
   405  	}
   406  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   407  }
   408  
   409  //go:nosplit
   410  func racerelease(addr unsafe.Pointer) {
   411  	racereleaseg(getg(), addr)
   412  }
   413  
   414  //go:nosplit
   415  func racereleaseg(gp *g, addr unsafe.Pointer) {
   416  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   417  		return
   418  	}
   419  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   420  }
   421  
   422  //go:nosplit
   423  func racereleasemerge(addr unsafe.Pointer) {
   424  	racereleasemergeg(getg(), addr)
   425  }
   426  
   427  //go:nosplit
   428  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   429  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   430  		return
   431  	}
   432  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   433  }
   434  
   435  //go:nosplit
   436  func racefingo() {
   437  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   438  }
   439  
   440  //go:nosplit
   441  
   442  func RaceAcquire(addr unsafe.Pointer) {
   443  	raceacquire(addr)
   444  }
   445  
   446  //go:nosplit
   447  
   448  func RaceRelease(addr unsafe.Pointer) {
   449  	racerelease(addr)
   450  }
   451  
   452  //go:nosplit
   453  
   454  func RaceReleaseMerge(addr unsafe.Pointer) {
   455  	racereleasemerge(addr)
   456  }
   457  
   458  //go:nosplit
   459  
   460  // RaceDisable disables handling of race events in the current goroutine.
   461  func RaceDisable() {
   462  	_g_ := getg()
   463  	if _g_.raceignore == 0 {
   464  		racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
   465  	}
   466  	_g_.raceignore++
   467  }
   468  
   469  //go:nosplit
   470  
   471  // RaceEnable re-enables handling of race events in the current goroutine.
   472  func RaceEnable() {
   473  	_g_ := getg()
   474  	_g_.raceignore--
   475  	if _g_.raceignore == 0 {
   476  		racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
   477  	}
   478  }