github.com/s1s1ty/go@v0.0.0-20180207192209-104445e3140f/src/runtime/race.go (about)

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build race
     6  
     7  package runtime
     8  
     9  import (
    10  	"unsafe"
    11  )
    12  
    13  // Public race detection API, present iff build with -race.
    14  
    15  func RaceRead(addr unsafe.Pointer)
    16  func RaceWrite(addr unsafe.Pointer)
    17  func RaceReadRange(addr unsafe.Pointer, len int)
    18  func RaceWriteRange(addr unsafe.Pointer, len int)
    19  
    20  func RaceErrors() int {
    21  	var n uint64
    22  	racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
    23  	return int(n)
    24  }
    25  
    26  //go:nosplit
    27  
    28  // RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
    29  // between goroutines. These inform the race detector about actual synchronization
    30  // that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
    31  // sections of code).
    32  // RaceAcquire establishes a happens-before relation with the preceding
    33  // RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
    34  // In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
    35  // RaceAcquire is equivalent to atomic_load(memory_order_acquire).
    36  func RaceAcquire(addr unsafe.Pointer) {
    37  	raceacquire(addr)
    38  }
    39  
    40  //go:nosplit
    41  
    42  // RaceRelease performs a release operation on addr that
    43  // can synchronize with a later RaceAcquire on addr.
    44  //
    45  // In terms of the C memory model, RaceRelease is equivalent to
    46  // atomic_store(memory_order_release).
    47  func RaceRelease(addr unsafe.Pointer) {
    48  	racerelease(addr)
    49  }
    50  
    51  //go:nosplit
    52  
    53  // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
    54  // relation with the preceding RaceRelease or RaceReleaseMerge on addr.
    55  //
    56  // In terms of the C memory model, RaceReleaseMerge is equivalent to
    57  // atomic_exchange(memory_order_release).
    58  func RaceReleaseMerge(addr unsafe.Pointer) {
    59  	racereleasemerge(addr)
    60  }
    61  
    62  //go:nosplit
    63  
    64  // RaceDisable disables handling of race synchronization events in the current goroutine.
    65  // Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
    66  // Non-synchronization events (memory accesses, function entry/exit) still affect
    67  // the race detector.
    68  func RaceDisable() {
    69  	_g_ := getg()
    70  	if _g_.raceignore == 0 {
    71  		racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
    72  	}
    73  	_g_.raceignore++
    74  }
    75  
    76  //go:nosplit
    77  
    78  // RaceEnable re-enables handling of race events in the current goroutine.
    79  func RaceEnable() {
    80  	_g_ := getg()
    81  	_g_.raceignore--
    82  	if _g_.raceignore == 0 {
    83  		racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
    84  	}
    85  }
    86  
    87  // Private interface for the runtime.
    88  
    89  const raceenabled = true
    90  
    91  // For all functions accepting callerpc and pc,
    92  // callerpc is a return PC of the function that calls this function,
    93  // pc is start PC of the function that calls this function.
    94  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    95  	kind := t.kind & kindMask
    96  	if kind == kindArray || kind == kindStruct {
    97  		// for composite objects we have to read every address
    98  		// because a write might happen to any subobject.
    99  		racereadrangepc(addr, t.size, callerpc, pc)
   100  	} else {
   101  		// for non-composite objects we can read just the start
   102  		// address, as any write must write the first byte.
   103  		racereadpc(addr, callerpc, pc)
   104  	}
   105  }
   106  
   107  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
   108  	kind := t.kind & kindMask
   109  	if kind == kindArray || kind == kindStruct {
   110  		// for composite objects we have to write every address
   111  		// because a write might happen to any subobject.
   112  		racewriterangepc(addr, t.size, callerpc, pc)
   113  	} else {
   114  		// for non-composite objects we can write just the start
   115  		// address, as any write must write the first byte.
   116  		racewritepc(addr, callerpc, pc)
   117  	}
   118  }
   119  
   120  //go:noescape
   121  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
   122  
   123  //go:noescape
   124  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
   125  
   126  type symbolizeCodeContext struct {
   127  	pc   uintptr
   128  	fn   *byte
   129  	file *byte
   130  	line uintptr
   131  	off  uintptr
   132  	res  uintptr
   133  }
   134  
   135  var qq = [...]byte{'?', '?', 0}
   136  var dash = [...]byte{'-', 0}
   137  
   138  const (
   139  	raceGetProcCmd = iota
   140  	raceSymbolizeCodeCmd
   141  	raceSymbolizeDataCmd
   142  )
   143  
   144  // Callback from C into Go, runs on g0.
   145  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
   146  	switch cmd {
   147  	case raceGetProcCmd:
   148  		throw("should have been handled by racecallbackthunk")
   149  	case raceSymbolizeCodeCmd:
   150  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
   151  	case raceSymbolizeDataCmd:
   152  		raceSymbolizeData((*symbolizeDataContext)(ctx))
   153  	default:
   154  		throw("unknown command")
   155  	}
   156  }
   157  
   158  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
   159  	f := FuncForPC(ctx.pc)
   160  	if f != nil {
   161  		file, line := f.FileLine(ctx.pc)
   162  		if line != 0 {
   163  			ctx.fn = cfuncname(f.funcInfo())
   164  			ctx.line = uintptr(line)
   165  			ctx.file = &bytes(file)[0] // assume NUL-terminated
   166  			ctx.off = ctx.pc - f.Entry()
   167  			ctx.res = 1
   168  			return
   169  		}
   170  	}
   171  	ctx.fn = &qq[0]
   172  	ctx.file = &dash[0]
   173  	ctx.line = 0
   174  	ctx.off = ctx.pc
   175  	ctx.res = 1
   176  }
   177  
   178  type symbolizeDataContext struct {
   179  	addr  uintptr
   180  	heap  uintptr
   181  	start uintptr
   182  	size  uintptr
   183  	name  *byte
   184  	file  *byte
   185  	line  uintptr
   186  	res   uintptr
   187  }
   188  
   189  func raceSymbolizeData(ctx *symbolizeDataContext) {
   190  	if _, x, n := findObject(unsafe.Pointer(ctx.addr)); x != nil {
   191  		ctx.heap = 1
   192  		ctx.start = uintptr(x)
   193  		ctx.size = n
   194  		ctx.res = 1
   195  	}
   196  }
   197  
   198  // Race runtime functions called via runtime·racecall.
   199  //go:linkname __tsan_init __tsan_init
   200  var __tsan_init byte
   201  
   202  //go:linkname __tsan_fini __tsan_fini
   203  var __tsan_fini byte
   204  
   205  //go:linkname __tsan_proc_create __tsan_proc_create
   206  var __tsan_proc_create byte
   207  
   208  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   209  var __tsan_proc_destroy byte
   210  
   211  //go:linkname __tsan_map_shadow __tsan_map_shadow
   212  var __tsan_map_shadow byte
   213  
   214  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   215  var __tsan_finalizer_goroutine byte
   216  
   217  //go:linkname __tsan_go_start __tsan_go_start
   218  var __tsan_go_start byte
   219  
   220  //go:linkname __tsan_go_end __tsan_go_end
   221  var __tsan_go_end byte
   222  
   223  //go:linkname __tsan_malloc __tsan_malloc
   224  var __tsan_malloc byte
   225  
   226  //go:linkname __tsan_free __tsan_free
   227  var __tsan_free byte
   228  
   229  //go:linkname __tsan_acquire __tsan_acquire
   230  var __tsan_acquire byte
   231  
   232  //go:linkname __tsan_release __tsan_release
   233  var __tsan_release byte
   234  
   235  //go:linkname __tsan_release_merge __tsan_release_merge
   236  var __tsan_release_merge byte
   237  
   238  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   239  var __tsan_go_ignore_sync_begin byte
   240  
   241  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   242  var __tsan_go_ignore_sync_end byte
   243  
   244  //go:linkname __tsan_report_count __tsan_report_count
   245  var __tsan_report_count byte
   246  
   247  // Mimic what cmd/cgo would do.
   248  //go:cgo_import_static __tsan_init
   249  //go:cgo_import_static __tsan_fini
   250  //go:cgo_import_static __tsan_proc_create
   251  //go:cgo_import_static __tsan_proc_destroy
   252  //go:cgo_import_static __tsan_map_shadow
   253  //go:cgo_import_static __tsan_finalizer_goroutine
   254  //go:cgo_import_static __tsan_go_start
   255  //go:cgo_import_static __tsan_go_end
   256  //go:cgo_import_static __tsan_malloc
   257  //go:cgo_import_static __tsan_free
   258  //go:cgo_import_static __tsan_acquire
   259  //go:cgo_import_static __tsan_release
   260  //go:cgo_import_static __tsan_release_merge
   261  //go:cgo_import_static __tsan_go_ignore_sync_begin
   262  //go:cgo_import_static __tsan_go_ignore_sync_end
   263  //go:cgo_import_static __tsan_report_count
   264  
   265  // These are called from race_amd64.s.
   266  //go:cgo_import_static __tsan_read
   267  //go:cgo_import_static __tsan_read_pc
   268  //go:cgo_import_static __tsan_read_range
   269  //go:cgo_import_static __tsan_write
   270  //go:cgo_import_static __tsan_write_pc
   271  //go:cgo_import_static __tsan_write_range
   272  //go:cgo_import_static __tsan_func_enter
   273  //go:cgo_import_static __tsan_func_exit
   274  
   275  //go:cgo_import_static __tsan_go_atomic32_load
   276  //go:cgo_import_static __tsan_go_atomic64_load
   277  //go:cgo_import_static __tsan_go_atomic32_store
   278  //go:cgo_import_static __tsan_go_atomic64_store
   279  //go:cgo_import_static __tsan_go_atomic32_exchange
   280  //go:cgo_import_static __tsan_go_atomic64_exchange
   281  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   282  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   283  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   284  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   285  
   286  // start/end of global data (data+bss).
   287  var racedatastart uintptr
   288  var racedataend uintptr
   289  
   290  // start/end of heap for race_amd64.s
   291  var racearenastart uintptr
   292  var racearenaend uintptr
   293  
   294  func racefuncenter(uintptr)
   295  func racefuncexit()
   296  func racereadrangepc1(uintptr, uintptr, uintptr)
   297  func racewriterangepc1(uintptr, uintptr, uintptr)
   298  func racecallbackthunk(uintptr)
   299  
   300  // racecall allows calling an arbitrary function f from C race runtime
   301  // with up to 4 uintptr arguments.
   302  func racecall(*byte, uintptr, uintptr, uintptr, uintptr)
   303  
   304  // checks if the address has shadow (i.e. heap or data/bss)
   305  //go:nosplit
   306  func isvalidaddr(addr unsafe.Pointer) bool {
   307  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   308  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   309  }
   310  
   311  //go:nosplit
   312  func raceinit() (gctx, pctx uintptr) {
   313  	// cgo is required to initialize libc, which is used by race runtime
   314  	if !iscgo {
   315  		throw("raceinit: race build must use cgo")
   316  	}
   317  
   318  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), funcPC(racecallbackthunk), 0)
   319  
   320  	// Round data segment to page boundaries, because it's used in mmap().
   321  	start := ^uintptr(0)
   322  	end := uintptr(0)
   323  	if start > firstmoduledata.noptrdata {
   324  		start = firstmoduledata.noptrdata
   325  	}
   326  	if start > firstmoduledata.data {
   327  		start = firstmoduledata.data
   328  	}
   329  	if start > firstmoduledata.noptrbss {
   330  		start = firstmoduledata.noptrbss
   331  	}
   332  	if start > firstmoduledata.bss {
   333  		start = firstmoduledata.bss
   334  	}
   335  	if end < firstmoduledata.enoptrdata {
   336  		end = firstmoduledata.enoptrdata
   337  	}
   338  	if end < firstmoduledata.edata {
   339  		end = firstmoduledata.edata
   340  	}
   341  	if end < firstmoduledata.enoptrbss {
   342  		end = firstmoduledata.enoptrbss
   343  	}
   344  	if end < firstmoduledata.ebss {
   345  		end = firstmoduledata.ebss
   346  	}
   347  	size := round(end-start, _PageSize)
   348  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   349  	racedatastart = start
   350  	racedataend = start + size
   351  
   352  	return
   353  }
   354  
   355  var raceFiniLock mutex
   356  
   357  //go:nosplit
   358  func racefini() {
   359  	// racefini() can only be called once to avoid races.
   360  	// This eventually (via __tsan_fini) calls C.exit which has
   361  	// undefined behavior if called more than once. If the lock is
   362  	// already held it's assumed that the first caller exits the program
   363  	// so other calls can hang forever without an issue.
   364  	lock(&raceFiniLock)
   365  	racecall(&__tsan_fini, 0, 0, 0, 0)
   366  }
   367  
   368  //go:nosplit
   369  func raceproccreate() uintptr {
   370  	var ctx uintptr
   371  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   372  	return ctx
   373  }
   374  
   375  //go:nosplit
   376  func raceprocdestroy(ctx uintptr) {
   377  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   378  }
   379  
   380  //go:nosplit
   381  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   382  	if racearenastart == 0 {
   383  		racearenastart = uintptr(addr)
   384  	}
   385  	if racearenaend < uintptr(addr)+size {
   386  		racearenaend = uintptr(addr) + size
   387  	}
   388  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   389  }
   390  
   391  //go:nosplit
   392  func racemalloc(p unsafe.Pointer, sz uintptr) {
   393  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   394  }
   395  
   396  //go:nosplit
   397  func racefree(p unsafe.Pointer, sz uintptr) {
   398  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   399  }
   400  
   401  //go:nosplit
   402  func racegostart(pc uintptr) uintptr {
   403  	_g_ := getg()
   404  	var spawng *g
   405  	if _g_.m.curg != nil {
   406  		spawng = _g_.m.curg
   407  	} else {
   408  		spawng = _g_
   409  	}
   410  
   411  	var racectx uintptr
   412  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   413  	return racectx
   414  }
   415  
   416  //go:nosplit
   417  func racegoend() {
   418  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   419  }
   420  
   421  //go:nosplit
   422  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   423  	_g_ := getg()
   424  	if _g_ != _g_.m.curg {
   425  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   426  		// Not interesting.
   427  		return
   428  	}
   429  	if callpc != 0 {
   430  		racefuncenter(callpc)
   431  	}
   432  	racewriterangepc1(uintptr(addr), sz, pc)
   433  	if callpc != 0 {
   434  		racefuncexit()
   435  	}
   436  }
   437  
   438  //go:nosplit
   439  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   440  	_g_ := getg()
   441  	if _g_ != _g_.m.curg {
   442  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   443  		// Not interesting.
   444  		return
   445  	}
   446  	if callpc != 0 {
   447  		racefuncenter(callpc)
   448  	}
   449  	racereadrangepc1(uintptr(addr), sz, pc)
   450  	if callpc != 0 {
   451  		racefuncexit()
   452  	}
   453  }
   454  
   455  //go:nosplit
   456  func raceacquire(addr unsafe.Pointer) {
   457  	raceacquireg(getg(), addr)
   458  }
   459  
   460  //go:nosplit
   461  func raceacquireg(gp *g, addr unsafe.Pointer) {
   462  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   463  		return
   464  	}
   465  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   466  }
   467  
   468  //go:nosplit
   469  func racerelease(addr unsafe.Pointer) {
   470  	racereleaseg(getg(), addr)
   471  }
   472  
   473  //go:nosplit
   474  func racereleaseg(gp *g, addr unsafe.Pointer) {
   475  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   476  		return
   477  	}
   478  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   479  }
   480  
   481  //go:nosplit
   482  func racereleasemerge(addr unsafe.Pointer) {
   483  	racereleasemergeg(getg(), addr)
   484  }
   485  
   486  //go:nosplit
   487  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   488  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   489  		return
   490  	}
   491  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   492  }
   493  
   494  //go:nosplit
   495  func racefingo() {
   496  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   497  }