github.com/dannin/go@v0.0.0-20161031215817-d35dfd405eaa/src/runtime/race.go (about)

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build race
     6  
     7  // Public race detection API, present iff build with -race.
     8  
     9  package runtime
    10  
    11  import (
    12  	"unsafe"
    13  )
    14  
    15  func RaceRead(addr unsafe.Pointer)
    16  func RaceWrite(addr unsafe.Pointer)
    17  func RaceReadRange(addr unsafe.Pointer, len int)
    18  func RaceWriteRange(addr unsafe.Pointer, len int)
    19  
    20  func RaceSemacquire(s *uint32)
    21  func RaceSemrelease(s *uint32)
    22  
    23  // private interface for the runtime
    24  const raceenabled = true
    25  
    26  // For all functions accepting callerpc and pc,
    27  // callerpc is a return PC of the function that calls this function,
    28  // pc is start PC of the function that calls this function.
    29  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    30  	kind := t.kind & kindMask
    31  	if kind == kindArray || kind == kindStruct {
    32  		// for composite objects we have to read every address
    33  		// because a write might happen to any subobject.
    34  		racereadrangepc(addr, t.size, callerpc, pc)
    35  	} else {
    36  		// for non-composite objects we can read just the start
    37  		// address, as any write must write the first byte.
    38  		racereadpc(addr, callerpc, pc)
    39  	}
    40  }
    41  
    42  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    43  	kind := t.kind & kindMask
    44  	if kind == kindArray || kind == kindStruct {
    45  		// for composite objects we have to write every address
    46  		// because a write might happen to any subobject.
    47  		racewriterangepc(addr, t.size, callerpc, pc)
    48  	} else {
    49  		// for non-composite objects we can write just the start
    50  		// address, as any write must write the first byte.
    51  		racewritepc(addr, callerpc, pc)
    52  	}
    53  }
    54  
    55  //go:noescape
    56  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
    57  
    58  //go:noescape
    59  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
    60  
    61  type symbolizeCodeContext struct {
    62  	pc   uintptr
    63  	fn   *byte
    64  	file *byte
    65  	line uintptr
    66  	off  uintptr
    67  	res  uintptr
    68  }
    69  
    70  var qq = [...]byte{'?', '?', 0}
    71  var dash = [...]byte{'-', 0}
    72  
    73  const (
    74  	raceGetProcCmd = iota
    75  	raceSymbolizeCodeCmd
    76  	raceSymbolizeDataCmd
    77  )
    78  
    79  // Callback from C into Go, runs on g0.
    80  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
    81  	switch cmd {
    82  	case raceGetProcCmd:
    83  		throw("should have been handled by racecallbackthunk")
    84  	case raceSymbolizeCodeCmd:
    85  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
    86  	case raceSymbolizeDataCmd:
    87  		raceSymbolizeData((*symbolizeDataContext)(ctx))
    88  	default:
    89  		throw("unknown command")
    90  	}
    91  }
    92  
    93  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
    94  	f := FuncForPC(ctx.pc)
    95  	if f != nil {
    96  		file, line := f.FileLine(ctx.pc)
    97  		if line != 0 {
    98  			ctx.fn = cfuncname(f.raw())
    99  			ctx.line = uintptr(line)
   100  			ctx.file = &bytes(file)[0] // assume NUL-terminated
   101  			ctx.off = ctx.pc - f.Entry()
   102  			ctx.res = 1
   103  			return
   104  		}
   105  	}
   106  	ctx.fn = &qq[0]
   107  	ctx.file = &dash[0]
   108  	ctx.line = 0
   109  	ctx.off = ctx.pc
   110  	ctx.res = 1
   111  }
   112  
   113  type symbolizeDataContext struct {
   114  	addr  uintptr
   115  	heap  uintptr
   116  	start uintptr
   117  	size  uintptr
   118  	name  *byte
   119  	file  *byte
   120  	line  uintptr
   121  	res   uintptr
   122  }
   123  
   124  func raceSymbolizeData(ctx *symbolizeDataContext) {
   125  	if _, x, n := findObject(unsafe.Pointer(ctx.addr)); x != nil {
   126  		ctx.heap = 1
   127  		ctx.start = uintptr(x)
   128  		ctx.size = n
   129  		ctx.res = 1
   130  	}
   131  }
   132  
   133  // Race runtime functions called via runtime·racecall.
   134  //go:linkname __tsan_init __tsan_init
   135  var __tsan_init byte
   136  
   137  //go:linkname __tsan_fini __tsan_fini
   138  var __tsan_fini byte
   139  
   140  //go:linkname __tsan_proc_create __tsan_proc_create
   141  var __tsan_proc_create byte
   142  
   143  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   144  var __tsan_proc_destroy byte
   145  
   146  //go:linkname __tsan_map_shadow __tsan_map_shadow
   147  var __tsan_map_shadow byte
   148  
   149  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   150  var __tsan_finalizer_goroutine byte
   151  
   152  //go:linkname __tsan_go_start __tsan_go_start
   153  var __tsan_go_start byte
   154  
   155  //go:linkname __tsan_go_end __tsan_go_end
   156  var __tsan_go_end byte
   157  
   158  //go:linkname __tsan_malloc __tsan_malloc
   159  var __tsan_malloc byte
   160  
   161  //go:linkname __tsan_free __tsan_free
   162  var __tsan_free byte
   163  
   164  //go:linkname __tsan_acquire __tsan_acquire
   165  var __tsan_acquire byte
   166  
   167  //go:linkname __tsan_release __tsan_release
   168  var __tsan_release byte
   169  
   170  //go:linkname __tsan_release_merge __tsan_release_merge
   171  var __tsan_release_merge byte
   172  
   173  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   174  var __tsan_go_ignore_sync_begin byte
   175  
   176  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   177  var __tsan_go_ignore_sync_end byte
   178  
   179  //go:linkname __tsan_report_count __tsan_report_count
   180  var __tsan_report_count byte
   181  
   182  // Mimic what cmd/cgo would do.
   183  //go:cgo_import_static __tsan_init
   184  //go:cgo_import_static __tsan_fini
   185  //go:cgo_import_static __tsan_proc_create
   186  //go:cgo_import_static __tsan_proc_destroy
   187  //go:cgo_import_static __tsan_map_shadow
   188  //go:cgo_import_static __tsan_finalizer_goroutine
   189  //go:cgo_import_static __tsan_go_start
   190  //go:cgo_import_static __tsan_go_end
   191  //go:cgo_import_static __tsan_malloc
   192  //go:cgo_import_static __tsan_free
   193  //go:cgo_import_static __tsan_acquire
   194  //go:cgo_import_static __tsan_release
   195  //go:cgo_import_static __tsan_release_merge
   196  //go:cgo_import_static __tsan_go_ignore_sync_begin
   197  //go:cgo_import_static __tsan_go_ignore_sync_end
   198  //go:cgo_import_static __tsan_report_count
   199  
   200  // These are called from race_amd64.s.
   201  //go:cgo_import_static __tsan_read
   202  //go:cgo_import_static __tsan_read_pc
   203  //go:cgo_import_static __tsan_read_range
   204  //go:cgo_import_static __tsan_write
   205  //go:cgo_import_static __tsan_write_pc
   206  //go:cgo_import_static __tsan_write_range
   207  //go:cgo_import_static __tsan_func_enter
   208  //go:cgo_import_static __tsan_func_exit
   209  
   210  //go:cgo_import_static __tsan_go_atomic32_load
   211  //go:cgo_import_static __tsan_go_atomic64_load
   212  //go:cgo_import_static __tsan_go_atomic32_store
   213  //go:cgo_import_static __tsan_go_atomic64_store
   214  //go:cgo_import_static __tsan_go_atomic32_exchange
   215  //go:cgo_import_static __tsan_go_atomic64_exchange
   216  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   217  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   218  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   219  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   220  
   221  // start/end of global data (data+bss).
   222  var racedatastart uintptr
   223  var racedataend uintptr
   224  
   225  // start/end of heap for race_amd64.s
   226  var racearenastart uintptr
   227  var racearenaend uintptr
   228  
   229  func racefuncenter(uintptr)
   230  func racefuncexit()
   231  func racereadrangepc1(uintptr, uintptr, uintptr)
   232  func racewriterangepc1(uintptr, uintptr, uintptr)
   233  func racecallbackthunk(uintptr)
   234  
   235  // racecall allows calling an arbitrary function f from C race runtime
   236  // with up to 4 uintptr arguments.
   237  func racecall(*byte, uintptr, uintptr, uintptr, uintptr)
   238  
   239  // checks if the address has shadow (i.e. heap or data/bss)
   240  //go:nosplit
   241  func isvalidaddr(addr unsafe.Pointer) bool {
   242  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   243  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   244  }
   245  
   246  //go:nosplit
   247  func raceinit() (gctx, pctx uintptr) {
   248  	// cgo is required to initialize libc, which is used by race runtime
   249  	if !iscgo {
   250  		throw("raceinit: race build must use cgo")
   251  	}
   252  
   253  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), funcPC(racecallbackthunk), 0)
   254  
   255  	// Round data segment to page boundaries, because it's used in mmap().
   256  	start := ^uintptr(0)
   257  	end := uintptr(0)
   258  	if start > firstmoduledata.noptrdata {
   259  		start = firstmoduledata.noptrdata
   260  	}
   261  	if start > firstmoduledata.data {
   262  		start = firstmoduledata.data
   263  	}
   264  	if start > firstmoduledata.noptrbss {
   265  		start = firstmoduledata.noptrbss
   266  	}
   267  	if start > firstmoduledata.bss {
   268  		start = firstmoduledata.bss
   269  	}
   270  	if end < firstmoduledata.enoptrdata {
   271  		end = firstmoduledata.enoptrdata
   272  	}
   273  	if end < firstmoduledata.edata {
   274  		end = firstmoduledata.edata
   275  	}
   276  	if end < firstmoduledata.enoptrbss {
   277  		end = firstmoduledata.enoptrbss
   278  	}
   279  	if end < firstmoduledata.ebss {
   280  		end = firstmoduledata.ebss
   281  	}
   282  	size := round(end-start, _PageSize)
   283  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   284  	racedatastart = start
   285  	racedataend = start + size
   286  
   287  	return
   288  }
   289  
   290  var raceFiniLock mutex
   291  
   292  //go:nosplit
   293  func racefini() {
   294  	// racefini() can only be called once to avoid races.
   295  	// This eventually (via __tsan_fini) calls C.exit which has
   296  	// undefined behavior if called more than once. If the lock is
   297  	// already held it's assumed that the first caller exits the program
   298  	// so other calls can hang forever without an issue.
   299  	lock(&raceFiniLock)
   300  	racecall(&__tsan_fini, 0, 0, 0, 0)
   301  }
   302  
   303  //go:nosplit
   304  func raceproccreate() uintptr {
   305  	var ctx uintptr
   306  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   307  	return ctx
   308  }
   309  
   310  //go:nosplit
   311  func raceprocdestroy(ctx uintptr) {
   312  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   313  }
   314  
   315  //go:nosplit
   316  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   317  	if racearenastart == 0 {
   318  		racearenastart = uintptr(addr)
   319  	}
   320  	if racearenaend < uintptr(addr)+size {
   321  		racearenaend = uintptr(addr) + size
   322  	}
   323  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   324  }
   325  
   326  //go:nosplit
   327  func racemalloc(p unsafe.Pointer, sz uintptr) {
   328  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   329  }
   330  
   331  //go:nosplit
   332  func racefree(p unsafe.Pointer, sz uintptr) {
   333  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   334  }
   335  
   336  //go:nosplit
   337  func racegostart(pc uintptr) uintptr {
   338  	_g_ := getg()
   339  	var spawng *g
   340  	if _g_.m.curg != nil {
   341  		spawng = _g_.m.curg
   342  	} else {
   343  		spawng = _g_
   344  	}
   345  
   346  	var racectx uintptr
   347  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   348  	return racectx
   349  }
   350  
   351  //go:nosplit
   352  func racegoend() {
   353  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   354  }
   355  
   356  //go:nosplit
   357  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   358  	_g_ := getg()
   359  	if _g_ != _g_.m.curg {
   360  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   361  		// Not interesting.
   362  		return
   363  	}
   364  	if callpc != 0 {
   365  		racefuncenter(callpc)
   366  	}
   367  	racewriterangepc1(uintptr(addr), sz, pc)
   368  	if callpc != 0 {
   369  		racefuncexit()
   370  	}
   371  }
   372  
   373  //go:nosplit
   374  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   375  	_g_ := getg()
   376  	if _g_ != _g_.m.curg {
   377  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   378  		// Not interesting.
   379  		return
   380  	}
   381  	if callpc != 0 {
   382  		racefuncenter(callpc)
   383  	}
   384  	racereadrangepc1(uintptr(addr), sz, pc)
   385  	if callpc != 0 {
   386  		racefuncexit()
   387  	}
   388  }
   389  
   390  //go:nosplit
   391  func raceacquire(addr unsafe.Pointer) {
   392  	raceacquireg(getg(), addr)
   393  }
   394  
   395  //go:nosplit
   396  func raceacquireg(gp *g, addr unsafe.Pointer) {
   397  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   398  		return
   399  	}
   400  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   401  }
   402  
   403  //go:nosplit
   404  func racerelease(addr unsafe.Pointer) {
   405  	racereleaseg(getg(), addr)
   406  }
   407  
   408  //go:nosplit
   409  func racereleaseg(gp *g, addr unsafe.Pointer) {
   410  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   411  		return
   412  	}
   413  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   414  }
   415  
   416  //go:nosplit
   417  func racereleasemerge(addr unsafe.Pointer) {
   418  	racereleasemergeg(getg(), addr)
   419  }
   420  
   421  //go:nosplit
   422  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   423  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   424  		return
   425  	}
   426  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   427  }
   428  
   429  //go:nosplit
   430  func racefingo() {
   431  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   432  }
   433  
   434  //go:nosplit
   435  
   436  func RaceAcquire(addr unsafe.Pointer) {
   437  	raceacquire(addr)
   438  }
   439  
   440  //go:nosplit
   441  
   442  func RaceRelease(addr unsafe.Pointer) {
   443  	racerelease(addr)
   444  }
   445  
   446  //go:nosplit
   447  
   448  func RaceReleaseMerge(addr unsafe.Pointer) {
   449  	racereleasemerge(addr)
   450  }
   451  
   452  //go:nosplit
   453  
   454  // RaceDisable disables handling of race events in the current goroutine.
   455  func RaceDisable() {
   456  	_g_ := getg()
   457  	if _g_.raceignore == 0 {
   458  		racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
   459  	}
   460  	_g_.raceignore++
   461  }
   462  
   463  //go:nosplit
   464  
   465  // RaceEnable re-enables handling of race events in the current goroutine.
   466  func RaceEnable() {
   467  	_g_ := getg()
   468  	_g_.raceignore--
   469  	if _g_.raceignore == 0 {
   470  		racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
   471  	}
   472  }