github.com/x04/go/src@v0.0.0-20200202162449-3d481ceb3525/runtime/race.go (about)

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build race
     6  
     7  package runtime
     8  
     9  import (
    10  	"github.com/x04/go/src/unsafe"
    11  )
    12  
    13  // Public race detection API, present iff build with -race.
    14  
    15  func RaceRead(addr unsafe.Pointer)
    16  func RaceWrite(addr unsafe.Pointer)
    17  func RaceReadRange(addr unsafe.Pointer, len int)
    18  func RaceWriteRange(addr unsafe.Pointer, len int)
    19  
    20  func RaceErrors() int {
    21  	var n uint64
    22  	racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
    23  	return int(n)
    24  }
    25  
    26  //go:nosplit
    27  
    28  // RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
    29  // between goroutines. These inform the race detector about actual synchronization
    30  // that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
    31  // sections of code).
    32  // RaceAcquire establishes a happens-before relation with the preceding
    33  // RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
    34  // In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
    35  // RaceAcquire is equivalent to atomic_load(memory_order_acquire).
    36  func RaceAcquire(addr unsafe.Pointer) {
    37  	raceacquire(addr)
    38  }
    39  
    40  //go:nosplit
    41  
    42  // RaceRelease performs a release operation on addr that
    43  // can synchronize with a later RaceAcquire on addr.
    44  //
    45  // In terms of the C memory model, RaceRelease is equivalent to
    46  // atomic_store(memory_order_release).
    47  func RaceRelease(addr unsafe.Pointer) {
    48  	racerelease(addr)
    49  }
    50  
    51  //go:nosplit
    52  
    53  // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
    54  // relation with the preceding RaceRelease or RaceReleaseMerge on addr.
    55  //
    56  // In terms of the C memory model, RaceReleaseMerge is equivalent to
    57  // atomic_exchange(memory_order_release).
    58  func RaceReleaseMerge(addr unsafe.Pointer) {
    59  	racereleasemerge(addr)
    60  }
    61  
    62  //go:nosplit
    63  
    64  // RaceDisable disables handling of race synchronization events in the current goroutine.
    65  // Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
    66  // Non-synchronization events (memory accesses, function entry/exit) still affect
    67  // the race detector.
    68  func RaceDisable() {
    69  	_g_ := getg()
    70  	if _g_.raceignore == 0 {
    71  		racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
    72  	}
    73  	_g_.raceignore++
    74  }
    75  
    76  //go:nosplit
    77  
    78  // RaceEnable re-enables handling of race events in the current goroutine.
    79  func RaceEnable() {
    80  	_g_ := getg()
    81  	_g_.raceignore--
    82  	if _g_.raceignore == 0 {
    83  		racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
    84  	}
    85  }
    86  
    87  // Private interface for the runtime.
    88  
    89  const raceenabled = true
    90  
    91  // For all functions accepting callerpc and pc,
    92  // callerpc is a return PC of the function that calls this function,
    93  // pc is start PC of the function that calls this function.
    94  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    95  	kind := t.kind & kindMask
    96  	if kind == kindArray || kind == kindStruct {
    97  		// for composite objects we have to read every address
    98  		// because a write might happen to any subobject.
    99  		racereadrangepc(addr, t.size, callerpc, pc)
   100  	} else {
   101  		// for non-composite objects we can read just the start
   102  		// address, as any write must write the first byte.
   103  		racereadpc(addr, callerpc, pc)
   104  	}
   105  }
   106  
   107  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
   108  	kind := t.kind & kindMask
   109  	if kind == kindArray || kind == kindStruct {
   110  		// for composite objects we have to write every address
   111  		// because a write might happen to any subobject.
   112  		racewriterangepc(addr, t.size, callerpc, pc)
   113  	} else {
   114  		// for non-composite objects we can write just the start
   115  		// address, as any write must write the first byte.
   116  		racewritepc(addr, callerpc, pc)
   117  	}
   118  }
   119  
   120  //go:noescape
   121  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
   122  
   123  //go:noescape
   124  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
   125  
   126  type symbolizeCodeContext struct {
   127  	pc	uintptr
   128  	fn	*byte
   129  	file	*byte
   130  	line	uintptr
   131  	off	uintptr
   132  	res	uintptr
   133  }
   134  
   135  var qq = [...]byte{'?', '?', 0}
   136  var dash = [...]byte{'-', 0}
   137  
   138  const (
   139  	raceGetProcCmd	= iota
   140  	raceSymbolizeCodeCmd
   141  	raceSymbolizeDataCmd
   142  )
   143  
   144  // Callback from C into Go, runs on g0.
   145  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
   146  	switch cmd {
   147  	case raceGetProcCmd:
   148  		throw("should have been handled by racecallbackthunk")
   149  	case raceSymbolizeCodeCmd:
   150  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
   151  	case raceSymbolizeDataCmd:
   152  		raceSymbolizeData((*symbolizeDataContext)(ctx))
   153  	default:
   154  		throw("unknown command")
   155  	}
   156  }
   157  
   158  // raceSymbolizeCode reads ctx.pc and populates the rest of *ctx with
   159  // information about the code at that pc.
   160  //
   161  // The race detector has already subtracted 1 from pcs, so they point to the last
   162  // byte of call instructions (including calls to runtime.racewrite and friends).
   163  //
   164  // If the incoming pc is part of an inlined function, *ctx is populated
   165  // with information about the inlined function, and on return ctx.pc is set
   166  // to a pc in the logically containing function. (The race detector should call this
   167  // function again with that pc.)
   168  //
   169  // If the incoming pc is not part of an inlined function, the return pc is unchanged.
   170  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
   171  	pc := ctx.pc
   172  	fi := findfunc(pc)
   173  	f := fi._Func()
   174  	if f != nil {
   175  		file, line := f.FileLine(pc)
   176  		if line != 0 {
   177  			if inldata := funcdata(fi, _FUNCDATA_InlTree); inldata != nil {
   178  				inltree := (*[1 << 20]inlinedCall)(inldata)
   179  				for {
   180  					ix := pcdatavalue(fi, _PCDATA_InlTreeIndex, pc, nil)
   181  					if ix >= 0 {
   182  						if inltree[ix].funcID == funcID_wrapper {
   183  							// ignore wrappers
   184  							// Back up to an instruction in the "caller".
   185  							pc = f.Entry() + uintptr(inltree[ix].parentPc)
   186  							continue
   187  						}
   188  						ctx.pc = f.Entry() + uintptr(inltree[ix].parentPc)	// "caller" pc
   189  						ctx.fn = cfuncnameFromNameoff(fi, inltree[ix].func_)
   190  						ctx.line = uintptr(line)
   191  						ctx.file = &bytes(file)[0]	// assume NUL-terminated
   192  						ctx.off = pc - f.Entry()
   193  						ctx.res = 1
   194  						return
   195  					}
   196  					break
   197  				}
   198  			}
   199  			ctx.fn = cfuncname(fi)
   200  			ctx.line = uintptr(line)
   201  			ctx.file = &bytes(file)[0]	// assume NUL-terminated
   202  			ctx.off = pc - f.Entry()
   203  			ctx.res = 1
   204  			return
   205  		}
   206  	}
   207  	ctx.fn = &qq[0]
   208  	ctx.file = &dash[0]
   209  	ctx.line = 0
   210  	ctx.off = ctx.pc
   211  	ctx.res = 1
   212  }
   213  
   214  type symbolizeDataContext struct {
   215  	addr	uintptr
   216  	heap	uintptr
   217  	start	uintptr
   218  	size	uintptr
   219  	name	*byte
   220  	file	*byte
   221  	line	uintptr
   222  	res	uintptr
   223  }
   224  
   225  func raceSymbolizeData(ctx *symbolizeDataContext) {
   226  	if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
   227  		ctx.heap = 1
   228  		ctx.start = base
   229  		ctx.size = span.elemsize
   230  		ctx.res = 1
   231  	}
   232  }
   233  
   234  // Race runtime functions called via runtime·racecall.
   235  //go:linkname __tsan_init __tsan_init
   236  var __tsan_init byte
   237  
   238  //go:linkname __tsan_fini __tsan_fini
   239  var __tsan_fini byte
   240  
   241  //go:linkname __tsan_proc_create __tsan_proc_create
   242  var __tsan_proc_create byte
   243  
   244  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   245  var __tsan_proc_destroy byte
   246  
   247  //go:linkname __tsan_map_shadow __tsan_map_shadow
   248  var __tsan_map_shadow byte
   249  
   250  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   251  var __tsan_finalizer_goroutine byte
   252  
   253  //go:linkname __tsan_go_start __tsan_go_start
   254  var __tsan_go_start byte
   255  
   256  //go:linkname __tsan_go_end __tsan_go_end
   257  var __tsan_go_end byte
   258  
   259  //go:linkname __tsan_malloc __tsan_malloc
   260  var __tsan_malloc byte
   261  
   262  //go:linkname __tsan_free __tsan_free
   263  var __tsan_free byte
   264  
   265  //go:linkname __tsan_acquire __tsan_acquire
   266  var __tsan_acquire byte
   267  
   268  //go:linkname __tsan_release __tsan_release
   269  var __tsan_release byte
   270  
   271  //go:linkname __tsan_release_merge __tsan_release_merge
   272  var __tsan_release_merge byte
   273  
   274  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   275  var __tsan_go_ignore_sync_begin byte
   276  
   277  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   278  var __tsan_go_ignore_sync_end byte
   279  
   280  //go:linkname __tsan_report_count __tsan_report_count
   281  var __tsan_report_count byte
   282  
   283  // Mimic what cmd/cgo would do.
   284  //go:cgo_import_static __tsan_init
   285  //go:cgo_import_static __tsan_fini
   286  //go:cgo_import_static __tsan_proc_create
   287  //go:cgo_import_static __tsan_proc_destroy
   288  //go:cgo_import_static __tsan_map_shadow
   289  //go:cgo_import_static __tsan_finalizer_goroutine
   290  //go:cgo_import_static __tsan_go_start
   291  //go:cgo_import_static __tsan_go_end
   292  //go:cgo_import_static __tsan_malloc
   293  //go:cgo_import_static __tsan_free
   294  //go:cgo_import_static __tsan_acquire
   295  //go:cgo_import_static __tsan_release
   296  //go:cgo_import_static __tsan_release_merge
   297  //go:cgo_import_static __tsan_go_ignore_sync_begin
   298  //go:cgo_import_static __tsan_go_ignore_sync_end
   299  //go:cgo_import_static __tsan_report_count
   300  
   301  // These are called from race_amd64.s.
   302  //go:cgo_import_static __tsan_read
   303  //go:cgo_import_static __tsan_read_pc
   304  //go:cgo_import_static __tsan_read_range
   305  //go:cgo_import_static __tsan_write
   306  //go:cgo_import_static __tsan_write_pc
   307  //go:cgo_import_static __tsan_write_range
   308  //go:cgo_import_static __tsan_func_enter
   309  //go:cgo_import_static __tsan_func_exit
   310  
   311  //go:cgo_import_static __tsan_go_atomic32_load
   312  //go:cgo_import_static __tsan_go_atomic64_load
   313  //go:cgo_import_static __tsan_go_atomic32_store
   314  //go:cgo_import_static __tsan_go_atomic64_store
   315  //go:cgo_import_static __tsan_go_atomic32_exchange
   316  //go:cgo_import_static __tsan_go_atomic64_exchange
   317  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   318  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   319  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   320  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   321  
   322  // start/end of global data (data+bss).
   323  var racedatastart uintptr
   324  var racedataend uintptr
   325  
   326  // start/end of heap for race_amd64.s
   327  var racearenastart uintptr
   328  var racearenaend uintptr
   329  
   330  func racefuncenter(callpc uintptr)
   331  func racefuncenterfp(fp uintptr)
   332  func racefuncexit()
   333  func raceread(addr uintptr)
   334  func racewrite(addr uintptr)
   335  func racereadrange(addr, size uintptr)
   336  func racewriterange(addr, size uintptr)
   337  func racereadrangepc1(addr, size, pc uintptr)
   338  func racewriterangepc1(addr, size, pc uintptr)
   339  func racecallbackthunk(uintptr)
   340  
   341  // racecall allows calling an arbitrary function f from C race runtime
   342  // with up to 4 uintptr arguments.
   343  func racecall(fn *byte, arg0, arg1, arg2, arg3 uintptr)
   344  
   345  // checks if the address has shadow (i.e. heap or data/bss)
   346  //go:nosplit
   347  func isvalidaddr(addr unsafe.Pointer) bool {
   348  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   349  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   350  }
   351  
   352  //go:nosplit
   353  func raceinit() (gctx, pctx uintptr) {
   354  	// cgo is required to initialize libc, which is used by race runtime
   355  	if !iscgo {
   356  		throw("raceinit: race build must use cgo")
   357  	}
   358  
   359  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), funcPC(racecallbackthunk), 0)
   360  
   361  	// Round data segment to page boundaries, because it's used in mmap().
   362  	start := ^uintptr(0)
   363  	end := uintptr(0)
   364  	if start > firstmoduledata.noptrdata {
   365  		start = firstmoduledata.noptrdata
   366  	}
   367  	if start > firstmoduledata.data {
   368  		start = firstmoduledata.data
   369  	}
   370  	if start > firstmoduledata.noptrbss {
   371  		start = firstmoduledata.noptrbss
   372  	}
   373  	if start > firstmoduledata.bss {
   374  		start = firstmoduledata.bss
   375  	}
   376  	if end < firstmoduledata.enoptrdata {
   377  		end = firstmoduledata.enoptrdata
   378  	}
   379  	if end < firstmoduledata.edata {
   380  		end = firstmoduledata.edata
   381  	}
   382  	if end < firstmoduledata.enoptrbss {
   383  		end = firstmoduledata.enoptrbss
   384  	}
   385  	if end < firstmoduledata.ebss {
   386  		end = firstmoduledata.ebss
   387  	}
   388  	size := alignUp(end-start, _PageSize)
   389  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   390  	racedatastart = start
   391  	racedataend = start + size
   392  
   393  	return
   394  }
   395  
   396  var raceFiniLock mutex
   397  
   398  //go:nosplit
   399  func racefini() {
   400  	// racefini() can only be called once to avoid races.
   401  	// This eventually (via __tsan_fini) calls C.exit which has
   402  	// undefined behavior if called more than once. If the lock is
   403  	// already held it's assumed that the first caller exits the program
   404  	// so other calls can hang forever without an issue.
   405  	lock(&raceFiniLock)
   406  	// We're entering external code that may call ExitProcess on
   407  	// Windows.
   408  	osPreemptExtEnter(getg().m)
   409  	racecall(&__tsan_fini, 0, 0, 0, 0)
   410  }
   411  
   412  //go:nosplit
   413  func raceproccreate() uintptr {
   414  	var ctx uintptr
   415  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   416  	return ctx
   417  }
   418  
   419  //go:nosplit
   420  func raceprocdestroy(ctx uintptr) {
   421  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   422  }
   423  
   424  //go:nosplit
   425  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   426  	if racearenastart == 0 {
   427  		racearenastart = uintptr(addr)
   428  	}
   429  	if racearenaend < uintptr(addr)+size {
   430  		racearenaend = uintptr(addr) + size
   431  	}
   432  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   433  }
   434  
   435  //go:nosplit
   436  func racemalloc(p unsafe.Pointer, sz uintptr) {
   437  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   438  }
   439  
   440  //go:nosplit
   441  func racefree(p unsafe.Pointer, sz uintptr) {
   442  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   443  }
   444  
   445  //go:nosplit
   446  func racegostart(pc uintptr) uintptr {
   447  	_g_ := getg()
   448  	var spawng *g
   449  	if _g_.m.curg != nil {
   450  		spawng = _g_.m.curg
   451  	} else {
   452  		spawng = _g_
   453  	}
   454  
   455  	var racectx uintptr
   456  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   457  	return racectx
   458  }
   459  
   460  //go:nosplit
   461  func racegoend() {
   462  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   463  }
   464  
   465  //go:nosplit
   466  func racectxend(racectx uintptr) {
   467  	racecall(&__tsan_go_end, racectx, 0, 0, 0)
   468  }
   469  
   470  //go:nosplit
   471  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   472  	_g_ := getg()
   473  	if _g_ != _g_.m.curg {
   474  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   475  		// Not interesting.
   476  		return
   477  	}
   478  	if callpc != 0 {
   479  		racefuncenter(callpc)
   480  	}
   481  	racewriterangepc1(uintptr(addr), sz, pc)
   482  	if callpc != 0 {
   483  		racefuncexit()
   484  	}
   485  }
   486  
   487  //go:nosplit
   488  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   489  	_g_ := getg()
   490  	if _g_ != _g_.m.curg {
   491  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   492  		// Not interesting.
   493  		return
   494  	}
   495  	if callpc != 0 {
   496  		racefuncenter(callpc)
   497  	}
   498  	racereadrangepc1(uintptr(addr), sz, pc)
   499  	if callpc != 0 {
   500  		racefuncexit()
   501  	}
   502  }
   503  
   504  //go:nosplit
   505  func raceacquire(addr unsafe.Pointer) {
   506  	raceacquireg(getg(), addr)
   507  }
   508  
   509  //go:nosplit
   510  func raceacquireg(gp *g, addr unsafe.Pointer) {
   511  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   512  		return
   513  	}
   514  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   515  }
   516  
   517  //go:nosplit
   518  func raceacquirectx(racectx uintptr, addr unsafe.Pointer) {
   519  	if !isvalidaddr(addr) {
   520  		return
   521  	}
   522  	racecall(&__tsan_acquire, racectx, uintptr(addr), 0, 0)
   523  }
   524  
   525  //go:nosplit
   526  func racerelease(addr unsafe.Pointer) {
   527  	racereleaseg(getg(), addr)
   528  }
   529  
   530  //go:nosplit
   531  func racereleaseg(gp *g, addr unsafe.Pointer) {
   532  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   533  		return
   534  	}
   535  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   536  }
   537  
   538  //go:nosplit
   539  func racereleasemerge(addr unsafe.Pointer) {
   540  	racereleasemergeg(getg(), addr)
   541  }
   542  
   543  //go:nosplit
   544  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   545  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   546  		return
   547  	}
   548  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   549  }
   550  
   551  //go:nosplit
   552  func racefingo() {
   553  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   554  }
   555  
   556  // The declarations below generate ABI wrappers for functions
   557  // implemented in assembly in this package but declared in another
   558  // package.
   559  
   560  //go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32
   561  func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)
   562  
   563  //go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64
   564  func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)
   565  
   566  //go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32
   567  func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)
   568  
   569  //go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
   570  func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
   571  
   572  //go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
   573  func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
   574  
   575  //go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
   576  func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
   577  
   578  //go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32
   579  func abigen_sync_atomic_StoreInt32(addr *int32, val int32)
   580  
   581  //go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64
   582  func abigen_sync_atomic_StoreInt64(addr *int64, val int64)
   583  
   584  //go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32
   585  func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)
   586  
   587  //go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64
   588  func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)
   589  
   590  //go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32
   591  func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)
   592  
   593  //go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64
   594  func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)
   595  
   596  //go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32
   597  func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)
   598  
   599  //go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64
   600  func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)
   601  
   602  //go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32
   603  func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)
   604  
   605  //go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32
   606  func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)
   607  
   608  //go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64
   609  func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)
   610  
   611  //go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64
   612  func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
   613  
   614  //go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
   615  func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
   616  
   617  //go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
   618  func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
   619  
   620  //go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64
   621  func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
   622  
   623  //go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32
   624  func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
   625  
   626  //go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64
   627  func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)