github.com/liujq9674git/golang-src-1.7@v0.0.0-20230517174348-17f6ec47f3f8/src/runtime/race.go (about)

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build race
     6  
     7  // Public race detection API, present iff build with -race.
     8  
     9  package runtime
    10  
    11  import (
    12  	"unsafe"
    13  )
    14  
    15  func RaceRead(addr unsafe.Pointer)
    16  func RaceWrite(addr unsafe.Pointer)
    17  func RaceReadRange(addr unsafe.Pointer, len int)
    18  func RaceWriteRange(addr unsafe.Pointer, len int)
    19  
    20  func RaceSemacquire(s *uint32)
    21  func RaceSemrelease(s *uint32)
    22  
    23  // private interface for the runtime
    24  const raceenabled = true
    25  
    26  // For all functions accepting callerpc and pc,
    27  // callerpc is a return PC of the function that calls this function,
    28  // pc is start PC of the function that calls this function.
    29  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    30  	kind := t.kind & kindMask
    31  	if kind == kindArray || kind == kindStruct {
    32  		// for composite objects we have to read every address
    33  		// because a write might happen to any subobject.
    34  		racereadrangepc(addr, t.size, callerpc, pc)
    35  	} else {
    36  		// for non-composite objects we can read just the start
    37  		// address, as any write must write the first byte.
    38  		racereadpc(addr, callerpc, pc)
    39  	}
    40  }
    41  
    42  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    43  	kind := t.kind & kindMask
    44  	if kind == kindArray || kind == kindStruct {
    45  		// for composite objects we have to write every address
    46  		// because a write might happen to any subobject.
    47  		racewriterangepc(addr, t.size, callerpc, pc)
    48  	} else {
    49  		// for non-composite objects we can write just the start
    50  		// address, as any write must write the first byte.
    51  		racewritepc(addr, callerpc, pc)
    52  	}
    53  }
    54  
    55  //go:noescape
    56  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
    57  
    58  //go:noescape
    59  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
    60  
    61  type symbolizeCodeContext struct {
    62  	pc   uintptr
    63  	fn   *byte
    64  	file *byte
    65  	line uintptr
    66  	off  uintptr
    67  	res  uintptr
    68  }
    69  
    70  var qq = [...]byte{'?', '?', 0}
    71  var dash = [...]byte{'-', 0}
    72  
    73  const (
    74  	raceGetProcCmd = iota
    75  	raceSymbolizeCodeCmd
    76  	raceSymbolizeDataCmd
    77  )
    78  
    79  // Callback from C into Go, runs on g0.
    80  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
    81  	switch cmd {
    82  	case raceGetProcCmd:
    83  		throw("should have been handled by racecallbackthunk")
    84  	case raceSymbolizeCodeCmd:
    85  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
    86  	case raceSymbolizeDataCmd:
    87  		raceSymbolizeData((*symbolizeDataContext)(ctx))
    88  	default:
    89  		throw("unknown command")
    90  	}
    91  }
    92  
    93  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
    94  	f := findfunc(ctx.pc)
    95  	if f == nil {
    96  		ctx.fn = &qq[0]
    97  		ctx.file = &dash[0]
    98  		ctx.line = 0
    99  		ctx.off = ctx.pc
   100  		ctx.res = 1
   101  		return
   102  	}
   103  
   104  	ctx.fn = cfuncname(f)
   105  	file, line := funcline(f, ctx.pc)
   106  	ctx.line = uintptr(line)
   107  	ctx.file = &bytes(file)[0] // assume NUL-terminated
   108  	ctx.off = ctx.pc - f.entry
   109  	ctx.res = 1
   110  	return
   111  }
   112  
   113  type symbolizeDataContext struct {
   114  	addr  uintptr
   115  	heap  uintptr
   116  	start uintptr
   117  	size  uintptr
   118  	name  *byte
   119  	file  *byte
   120  	line  uintptr
   121  	res   uintptr
   122  }
   123  
   124  func raceSymbolizeData(ctx *symbolizeDataContext) {
   125  	if _, x, n := findObject(unsafe.Pointer(ctx.addr)); x != nil {
   126  		ctx.heap = 1
   127  		ctx.start = uintptr(x)
   128  		ctx.size = n
   129  		ctx.res = 1
   130  	}
   131  }
   132  
   133  // Race runtime functions called via runtime·racecall.
   134  //go:linkname __tsan_init __tsan_init
   135  var __tsan_init byte
   136  
   137  //go:linkname __tsan_fini __tsan_fini
   138  var __tsan_fini byte
   139  
   140  //go:linkname __tsan_proc_create __tsan_proc_create
   141  var __tsan_proc_create byte
   142  
   143  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   144  var __tsan_proc_destroy byte
   145  
   146  //go:linkname __tsan_map_shadow __tsan_map_shadow
   147  var __tsan_map_shadow byte
   148  
   149  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   150  var __tsan_finalizer_goroutine byte
   151  
   152  //go:linkname __tsan_go_start __tsan_go_start
   153  var __tsan_go_start byte
   154  
   155  //go:linkname __tsan_go_end __tsan_go_end
   156  var __tsan_go_end byte
   157  
   158  //go:linkname __tsan_malloc __tsan_malloc
   159  var __tsan_malloc byte
   160  
   161  //go:linkname __tsan_free __tsan_free
   162  var __tsan_free byte
   163  
   164  //go:linkname __tsan_acquire __tsan_acquire
   165  var __tsan_acquire byte
   166  
   167  //go:linkname __tsan_release __tsan_release
   168  var __tsan_release byte
   169  
   170  //go:linkname __tsan_release_merge __tsan_release_merge
   171  var __tsan_release_merge byte
   172  
   173  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   174  var __tsan_go_ignore_sync_begin byte
   175  
   176  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   177  var __tsan_go_ignore_sync_end byte
   178  
   179  // Mimic what cmd/cgo would do.
   180  //go:cgo_import_static __tsan_init
   181  //go:cgo_import_static __tsan_fini
   182  //go:cgo_import_static __tsan_proc_create
   183  //go:cgo_import_static __tsan_proc_destroy
   184  //go:cgo_import_static __tsan_map_shadow
   185  //go:cgo_import_static __tsan_finalizer_goroutine
   186  //go:cgo_import_static __tsan_go_start
   187  //go:cgo_import_static __tsan_go_end
   188  //go:cgo_import_static __tsan_malloc
   189  //go:cgo_import_static __tsan_free
   190  //go:cgo_import_static __tsan_acquire
   191  //go:cgo_import_static __tsan_release
   192  //go:cgo_import_static __tsan_release_merge
   193  //go:cgo_import_static __tsan_go_ignore_sync_begin
   194  //go:cgo_import_static __tsan_go_ignore_sync_end
   195  
   196  // These are called from race_amd64.s.
   197  //go:cgo_import_static __tsan_read
   198  //go:cgo_import_static __tsan_read_pc
   199  //go:cgo_import_static __tsan_read_range
   200  //go:cgo_import_static __tsan_write
   201  //go:cgo_import_static __tsan_write_pc
   202  //go:cgo_import_static __tsan_write_range
   203  //go:cgo_import_static __tsan_func_enter
   204  //go:cgo_import_static __tsan_func_exit
   205  
   206  //go:cgo_import_static __tsan_go_atomic32_load
   207  //go:cgo_import_static __tsan_go_atomic64_load
   208  //go:cgo_import_static __tsan_go_atomic32_store
   209  //go:cgo_import_static __tsan_go_atomic64_store
   210  //go:cgo_import_static __tsan_go_atomic32_exchange
   211  //go:cgo_import_static __tsan_go_atomic64_exchange
   212  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   213  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   214  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   215  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   216  
   217  // start/end of global data (data+bss).
   218  var racedatastart uintptr
   219  var racedataend uintptr
   220  
   221  // start/end of heap for race_amd64.s
   222  var racearenastart uintptr
   223  var racearenaend uintptr
   224  
   225  func racefuncenter(uintptr)
   226  func racefuncexit()
   227  func racereadrangepc1(uintptr, uintptr, uintptr)
   228  func racewriterangepc1(uintptr, uintptr, uintptr)
   229  func racecallbackthunk(uintptr)
   230  
   231  // racecall allows calling an arbitrary function f from C race runtime
   232  // with up to 4 uintptr arguments.
   233  func racecall(*byte, uintptr, uintptr, uintptr, uintptr)
   234  
   235  // checks if the address has shadow (i.e. heap or data/bss)
   236  //go:nosplit
   237  func isvalidaddr(addr unsafe.Pointer) bool {
   238  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   239  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   240  }
   241  
   242  //go:nosplit
   243  func raceinit() (gctx, pctx uintptr) {
   244  	// cgo is required to initialize libc, which is used by race runtime
   245  	if !iscgo {
   246  		throw("raceinit: race build must use cgo")
   247  	}
   248  
   249  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), funcPC(racecallbackthunk), 0)
   250  
   251  	// Round data segment to page boundaries, because it's used in mmap().
   252  	start := ^uintptr(0)
   253  	end := uintptr(0)
   254  	if start > firstmoduledata.noptrdata {
   255  		start = firstmoduledata.noptrdata
   256  	}
   257  	if start > firstmoduledata.data {
   258  		start = firstmoduledata.data
   259  	}
   260  	if start > firstmoduledata.noptrbss {
   261  		start = firstmoduledata.noptrbss
   262  	}
   263  	if start > firstmoduledata.bss {
   264  		start = firstmoduledata.bss
   265  	}
   266  	if end < firstmoduledata.enoptrdata {
   267  		end = firstmoduledata.enoptrdata
   268  	}
   269  	if end < firstmoduledata.edata {
   270  		end = firstmoduledata.edata
   271  	}
   272  	if end < firstmoduledata.enoptrbss {
   273  		end = firstmoduledata.enoptrbss
   274  	}
   275  	if end < firstmoduledata.ebss {
   276  		end = firstmoduledata.ebss
   277  	}
   278  	size := round(end-start, _PageSize)
   279  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   280  	racedatastart = start
   281  	racedataend = start + size
   282  
   283  	return
   284  }
   285  
   286  var raceFiniLock mutex
   287  
   288  //go:nosplit
   289  func racefini() {
   290  	// racefini() can only be called once to avoid races.
   291  	// This eventually (via __tsan_fini) calls C.exit which has
   292  	// undefined behavior if called more than once. If the lock is
   293  	// already held it's assumed that the first caller exits the program
   294  	// so other calls can hang forever without an issue.
   295  	lock(&raceFiniLock)
   296  	racecall(&__tsan_fini, 0, 0, 0, 0)
   297  }
   298  
   299  //go:nosplit
   300  func raceproccreate() uintptr {
   301  	var ctx uintptr
   302  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   303  	return ctx
   304  }
   305  
   306  //go:nosplit
   307  func raceprocdestroy(ctx uintptr) {
   308  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   309  }
   310  
   311  //go:nosplit
   312  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   313  	if racearenastart == 0 {
   314  		racearenastart = uintptr(addr)
   315  	}
   316  	if racearenaend < uintptr(addr)+size {
   317  		racearenaend = uintptr(addr) + size
   318  	}
   319  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   320  }
   321  
   322  //go:nosplit
   323  func racemalloc(p unsafe.Pointer, sz uintptr) {
   324  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   325  }
   326  
   327  //go:nosplit
   328  func racefree(p unsafe.Pointer, sz uintptr) {
   329  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   330  }
   331  
   332  //go:nosplit
   333  func racegostart(pc uintptr) uintptr {
   334  	_g_ := getg()
   335  	var spawng *g
   336  	if _g_.m.curg != nil {
   337  		spawng = _g_.m.curg
   338  	} else {
   339  		spawng = _g_
   340  	}
   341  
   342  	var racectx uintptr
   343  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   344  	return racectx
   345  }
   346  
   347  //go:nosplit
   348  func racegoend() {
   349  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   350  }
   351  
   352  //go:nosplit
   353  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   354  	_g_ := getg()
   355  	if _g_ != _g_.m.curg {
   356  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   357  		// Not interesting.
   358  		return
   359  	}
   360  	if callpc != 0 {
   361  		racefuncenter(callpc)
   362  	}
   363  	racewriterangepc1(uintptr(addr), sz, pc)
   364  	if callpc != 0 {
   365  		racefuncexit()
   366  	}
   367  }
   368  
   369  //go:nosplit
   370  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   371  	_g_ := getg()
   372  	if _g_ != _g_.m.curg {
   373  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   374  		// Not interesting.
   375  		return
   376  	}
   377  	if callpc != 0 {
   378  		racefuncenter(callpc)
   379  	}
   380  	racereadrangepc1(uintptr(addr), sz, pc)
   381  	if callpc != 0 {
   382  		racefuncexit()
   383  	}
   384  }
   385  
   386  //go:nosplit
   387  func raceacquire(addr unsafe.Pointer) {
   388  	raceacquireg(getg(), addr)
   389  }
   390  
   391  //go:nosplit
   392  func raceacquireg(gp *g, addr unsafe.Pointer) {
   393  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   394  		return
   395  	}
   396  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   397  }
   398  
   399  //go:nosplit
   400  func racerelease(addr unsafe.Pointer) {
   401  	racereleaseg(getg(), addr)
   402  }
   403  
   404  //go:nosplit
   405  func racereleaseg(gp *g, addr unsafe.Pointer) {
   406  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   407  		return
   408  	}
   409  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   410  }
   411  
   412  //go:nosplit
   413  func racereleasemerge(addr unsafe.Pointer) {
   414  	racereleasemergeg(getg(), addr)
   415  }
   416  
   417  //go:nosplit
   418  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   419  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   420  		return
   421  	}
   422  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   423  }
   424  
   425  //go:nosplit
   426  func racefingo() {
   427  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   428  }
   429  
   430  //go:nosplit
   431  
   432  func RaceAcquire(addr unsafe.Pointer) {
   433  	raceacquire(addr)
   434  }
   435  
   436  //go:nosplit
   437  
   438  func RaceRelease(addr unsafe.Pointer) {
   439  	racerelease(addr)
   440  }
   441  
   442  //go:nosplit
   443  
   444  func RaceReleaseMerge(addr unsafe.Pointer) {
   445  	racereleasemerge(addr)
   446  }
   447  
   448  //go:nosplit
   449  
   450  // RaceDisable disables handling of race events in the current goroutine.
   451  func RaceDisable() {
   452  	_g_ := getg()
   453  	if _g_.raceignore == 0 {
   454  		racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
   455  	}
   456  	_g_.raceignore++
   457  }
   458  
   459  //go:nosplit
   460  
   461  // RaceEnable re-enables handling of race events in the current goroutine.
   462  func RaceEnable() {
   463  	_g_ := getg()
   464  	_g_.raceignore--
   465  	if _g_.raceignore == 0 {
   466  		racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
   467  	}
   468  }