golang.org/toolchain@v0.0.1-go1.9rc2.windows-amd64/src/runtime/race.go (about)

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build race
     6  
     7  // Public race detection API, present iff build with -race.
     8  
     9  package runtime
    10  
    11  import (
    12  	"unsafe"
    13  )
    14  
    15  func RaceRead(addr unsafe.Pointer)
    16  func RaceWrite(addr unsafe.Pointer)
    17  func RaceReadRange(addr unsafe.Pointer, len int)
    18  func RaceWriteRange(addr unsafe.Pointer, len int)
    19  
    20  func RaceErrors() int {
    21  	var n uint64
    22  	racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
    23  	return int(n)
    24  }
    25  
    26  // private interface for the runtime
    27  const raceenabled = true
    28  
    29  // For all functions accepting callerpc and pc,
    30  // callerpc is a return PC of the function that calls this function,
    31  // pc is start PC of the function that calls this function.
    32  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    33  	kind := t.kind & kindMask
    34  	if kind == kindArray || kind == kindStruct {
    35  		// for composite objects we have to read every address
    36  		// because a write might happen to any subobject.
    37  		racereadrangepc(addr, t.size, callerpc, pc)
    38  	} else {
    39  		// for non-composite objects we can read just the start
    40  		// address, as any write must write the first byte.
    41  		racereadpc(addr, callerpc, pc)
    42  	}
    43  }
    44  
    45  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    46  	kind := t.kind & kindMask
    47  	if kind == kindArray || kind == kindStruct {
    48  		// for composite objects we have to write every address
    49  		// because a write might happen to any subobject.
    50  		racewriterangepc(addr, t.size, callerpc, pc)
    51  	} else {
    52  		// for non-composite objects we can write just the start
    53  		// address, as any write must write the first byte.
    54  		racewritepc(addr, callerpc, pc)
    55  	}
    56  }
    57  
    58  //go:noescape
    59  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
    60  
    61  //go:noescape
    62  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
    63  
    64  type symbolizeCodeContext struct {
    65  	pc   uintptr
    66  	fn   *byte
    67  	file *byte
    68  	line uintptr
    69  	off  uintptr
    70  	res  uintptr
    71  }
    72  
    73  var qq = [...]byte{'?', '?', 0}
    74  var dash = [...]byte{'-', 0}
    75  
    76  const (
    77  	raceGetProcCmd = iota
    78  	raceSymbolizeCodeCmd
    79  	raceSymbolizeDataCmd
    80  )
    81  
    82  // Callback from C into Go, runs on g0.
    83  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
    84  	switch cmd {
    85  	case raceGetProcCmd:
    86  		throw("should have been handled by racecallbackthunk")
    87  	case raceSymbolizeCodeCmd:
    88  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
    89  	case raceSymbolizeDataCmd:
    90  		raceSymbolizeData((*symbolizeDataContext)(ctx))
    91  	default:
    92  		throw("unknown command")
    93  	}
    94  }
    95  
    96  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
    97  	f := FuncForPC(ctx.pc)
    98  	if f != nil {
    99  		file, line := f.FileLine(ctx.pc)
   100  		if line != 0 {
   101  			ctx.fn = cfuncname(f.funcInfo())
   102  			ctx.line = uintptr(line)
   103  			ctx.file = &bytes(file)[0] // assume NUL-terminated
   104  			ctx.off = ctx.pc - f.Entry()
   105  			ctx.res = 1
   106  			return
   107  		}
   108  	}
   109  	ctx.fn = &qq[0]
   110  	ctx.file = &dash[0]
   111  	ctx.line = 0
   112  	ctx.off = ctx.pc
   113  	ctx.res = 1
   114  }
   115  
   116  type symbolizeDataContext struct {
   117  	addr  uintptr
   118  	heap  uintptr
   119  	start uintptr
   120  	size  uintptr
   121  	name  *byte
   122  	file  *byte
   123  	line  uintptr
   124  	res   uintptr
   125  }
   126  
   127  func raceSymbolizeData(ctx *symbolizeDataContext) {
   128  	if _, x, n := findObject(unsafe.Pointer(ctx.addr)); x != nil {
   129  		ctx.heap = 1
   130  		ctx.start = uintptr(x)
   131  		ctx.size = n
   132  		ctx.res = 1
   133  	}
   134  }
   135  
   136  // Race runtime functions called via runtime·racecall.
   137  //go:linkname __tsan_init __tsan_init
   138  var __tsan_init byte
   139  
   140  //go:linkname __tsan_fini __tsan_fini
   141  var __tsan_fini byte
   142  
   143  //go:linkname __tsan_proc_create __tsan_proc_create
   144  var __tsan_proc_create byte
   145  
   146  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   147  var __tsan_proc_destroy byte
   148  
   149  //go:linkname __tsan_map_shadow __tsan_map_shadow
   150  var __tsan_map_shadow byte
   151  
   152  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   153  var __tsan_finalizer_goroutine byte
   154  
   155  //go:linkname __tsan_go_start __tsan_go_start
   156  var __tsan_go_start byte
   157  
   158  //go:linkname __tsan_go_end __tsan_go_end
   159  var __tsan_go_end byte
   160  
   161  //go:linkname __tsan_malloc __tsan_malloc
   162  var __tsan_malloc byte
   163  
   164  //go:linkname __tsan_free __tsan_free
   165  var __tsan_free byte
   166  
   167  //go:linkname __tsan_acquire __tsan_acquire
   168  var __tsan_acquire byte
   169  
   170  //go:linkname __tsan_release __tsan_release
   171  var __tsan_release byte
   172  
   173  //go:linkname __tsan_release_merge __tsan_release_merge
   174  var __tsan_release_merge byte
   175  
   176  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   177  var __tsan_go_ignore_sync_begin byte
   178  
   179  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   180  var __tsan_go_ignore_sync_end byte
   181  
   182  //go:linkname __tsan_report_count __tsan_report_count
   183  var __tsan_report_count byte
   184  
   185  // Mimic what cmd/cgo would do.
   186  //go:cgo_import_static __tsan_init
   187  //go:cgo_import_static __tsan_fini
   188  //go:cgo_import_static __tsan_proc_create
   189  //go:cgo_import_static __tsan_proc_destroy
   190  //go:cgo_import_static __tsan_map_shadow
   191  //go:cgo_import_static __tsan_finalizer_goroutine
   192  //go:cgo_import_static __tsan_go_start
   193  //go:cgo_import_static __tsan_go_end
   194  //go:cgo_import_static __tsan_malloc
   195  //go:cgo_import_static __tsan_free
   196  //go:cgo_import_static __tsan_acquire
   197  //go:cgo_import_static __tsan_release
   198  //go:cgo_import_static __tsan_release_merge
   199  //go:cgo_import_static __tsan_go_ignore_sync_begin
   200  //go:cgo_import_static __tsan_go_ignore_sync_end
   201  //go:cgo_import_static __tsan_report_count
   202  
   203  // These are called from race_amd64.s.
   204  //go:cgo_import_static __tsan_read
   205  //go:cgo_import_static __tsan_read_pc
   206  //go:cgo_import_static __tsan_read_range
   207  //go:cgo_import_static __tsan_write
   208  //go:cgo_import_static __tsan_write_pc
   209  //go:cgo_import_static __tsan_write_range
   210  //go:cgo_import_static __tsan_func_enter
   211  //go:cgo_import_static __tsan_func_exit
   212  
   213  //go:cgo_import_static __tsan_go_atomic32_load
   214  //go:cgo_import_static __tsan_go_atomic64_load
   215  //go:cgo_import_static __tsan_go_atomic32_store
   216  //go:cgo_import_static __tsan_go_atomic64_store
   217  //go:cgo_import_static __tsan_go_atomic32_exchange
   218  //go:cgo_import_static __tsan_go_atomic64_exchange
   219  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   220  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   221  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   222  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   223  
   224  // start/end of global data (data+bss).
   225  var racedatastart uintptr
   226  var racedataend uintptr
   227  
   228  // start/end of heap for race_amd64.s
   229  var racearenastart uintptr
   230  var racearenaend uintptr
   231  
   232  func racefuncenter(uintptr)
   233  func racefuncexit()
   234  func racereadrangepc1(uintptr, uintptr, uintptr)
   235  func racewriterangepc1(uintptr, uintptr, uintptr)
   236  func racecallbackthunk(uintptr)
   237  
   238  // racecall allows calling an arbitrary function f from C race runtime
   239  // with up to 4 uintptr arguments.
   240  func racecall(*byte, uintptr, uintptr, uintptr, uintptr)
   241  
   242  // checks if the address has shadow (i.e. heap or data/bss)
   243  //go:nosplit
   244  func isvalidaddr(addr unsafe.Pointer) bool {
   245  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   246  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   247  }
   248  
   249  //go:nosplit
   250  func raceinit() (gctx, pctx uintptr) {
   251  	// cgo is required to initialize libc, which is used by race runtime
   252  	if !iscgo {
   253  		throw("raceinit: race build must use cgo")
   254  	}
   255  
   256  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), funcPC(racecallbackthunk), 0)
   257  
   258  	// Round data segment to page boundaries, because it's used in mmap().
   259  	start := ^uintptr(0)
   260  	end := uintptr(0)
   261  	if start > firstmoduledata.noptrdata {
   262  		start = firstmoduledata.noptrdata
   263  	}
   264  	if start > firstmoduledata.data {
   265  		start = firstmoduledata.data
   266  	}
   267  	if start > firstmoduledata.noptrbss {
   268  		start = firstmoduledata.noptrbss
   269  	}
   270  	if start > firstmoduledata.bss {
   271  		start = firstmoduledata.bss
   272  	}
   273  	if end < firstmoduledata.enoptrdata {
   274  		end = firstmoduledata.enoptrdata
   275  	}
   276  	if end < firstmoduledata.edata {
   277  		end = firstmoduledata.edata
   278  	}
   279  	if end < firstmoduledata.enoptrbss {
   280  		end = firstmoduledata.enoptrbss
   281  	}
   282  	if end < firstmoduledata.ebss {
   283  		end = firstmoduledata.ebss
   284  	}
   285  	size := round(end-start, _PageSize)
   286  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   287  	racedatastart = start
   288  	racedataend = start + size
   289  
   290  	return
   291  }
   292  
   293  var raceFiniLock mutex
   294  
   295  //go:nosplit
   296  func racefini() {
   297  	// racefini() can only be called once to avoid races.
   298  	// This eventually (via __tsan_fini) calls C.exit which has
   299  	// undefined behavior if called more than once. If the lock is
   300  	// already held it's assumed that the first caller exits the program
   301  	// so other calls can hang forever without an issue.
   302  	lock(&raceFiniLock)
   303  	racecall(&__tsan_fini, 0, 0, 0, 0)
   304  }
   305  
   306  //go:nosplit
   307  func raceproccreate() uintptr {
   308  	var ctx uintptr
   309  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   310  	return ctx
   311  }
   312  
   313  //go:nosplit
   314  func raceprocdestroy(ctx uintptr) {
   315  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   316  }
   317  
   318  //go:nosplit
   319  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   320  	if racearenastart == 0 {
   321  		racearenastart = uintptr(addr)
   322  	}
   323  	if racearenaend < uintptr(addr)+size {
   324  		racearenaend = uintptr(addr) + size
   325  	}
   326  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   327  }
   328  
   329  //go:nosplit
   330  func racemalloc(p unsafe.Pointer, sz uintptr) {
   331  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   332  }
   333  
   334  //go:nosplit
   335  func racefree(p unsafe.Pointer, sz uintptr) {
   336  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   337  }
   338  
   339  //go:nosplit
   340  func racegostart(pc uintptr) uintptr {
   341  	_g_ := getg()
   342  	var spawng *g
   343  	if _g_.m.curg != nil {
   344  		spawng = _g_.m.curg
   345  	} else {
   346  		spawng = _g_
   347  	}
   348  
   349  	var racectx uintptr
   350  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   351  	return racectx
   352  }
   353  
   354  //go:nosplit
   355  func racegoend() {
   356  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   357  }
   358  
   359  //go:nosplit
   360  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   361  	_g_ := getg()
   362  	if _g_ != _g_.m.curg {
   363  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   364  		// Not interesting.
   365  		return
   366  	}
   367  	if callpc != 0 {
   368  		racefuncenter(callpc)
   369  	}
   370  	racewriterangepc1(uintptr(addr), sz, pc)
   371  	if callpc != 0 {
   372  		racefuncexit()
   373  	}
   374  }
   375  
   376  //go:nosplit
   377  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   378  	_g_ := getg()
   379  	if _g_ != _g_.m.curg {
   380  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   381  		// Not interesting.
   382  		return
   383  	}
   384  	if callpc != 0 {
   385  		racefuncenter(callpc)
   386  	}
   387  	racereadrangepc1(uintptr(addr), sz, pc)
   388  	if callpc != 0 {
   389  		racefuncexit()
   390  	}
   391  }
   392  
   393  //go:nosplit
   394  func raceacquire(addr unsafe.Pointer) {
   395  	raceacquireg(getg(), addr)
   396  }
   397  
   398  //go:nosplit
   399  func raceacquireg(gp *g, addr unsafe.Pointer) {
   400  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   401  		return
   402  	}
   403  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   404  }
   405  
   406  //go:nosplit
   407  func racerelease(addr unsafe.Pointer) {
   408  	racereleaseg(getg(), addr)
   409  }
   410  
   411  //go:nosplit
   412  func racereleaseg(gp *g, addr unsafe.Pointer) {
   413  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   414  		return
   415  	}
   416  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   417  }
   418  
   419  //go:nosplit
   420  func racereleasemerge(addr unsafe.Pointer) {
   421  	racereleasemergeg(getg(), addr)
   422  }
   423  
   424  //go:nosplit
   425  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   426  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   427  		return
   428  	}
   429  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   430  }
   431  
   432  //go:nosplit
   433  func racefingo() {
   434  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   435  }
   436  
   437  //go:nosplit
   438  
   439  func RaceAcquire(addr unsafe.Pointer) {
   440  	raceacquire(addr)
   441  }
   442  
   443  //go:nosplit
   444  
   445  func RaceRelease(addr unsafe.Pointer) {
   446  	racerelease(addr)
   447  }
   448  
   449  //go:nosplit
   450  
   451  func RaceReleaseMerge(addr unsafe.Pointer) {
   452  	racereleasemerge(addr)
   453  }
   454  
   455  //go:nosplit
   456  
   457  // RaceDisable disables handling of race events in the current goroutine.
   458  func RaceDisable() {
   459  	_g_ := getg()
   460  	if _g_.raceignore == 0 {
   461  		racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
   462  	}
   463  	_g_.raceignore++
   464  }
   465  
   466  //go:nosplit
   467  
   468  // RaceEnable re-enables handling of race events in the current goroutine.
   469  func RaceEnable() {
   470  	_g_ := getg()
   471  	_g_.raceignore--
   472  	if _g_.raceignore == 0 {
   473  		racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
   474  	}
   475  }