github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/runtime/race.go (about)

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build race
     6  
     7  package runtime
     8  
     9  import (
    10  	"unsafe"
    11  )
    12  
    13  // Public race detection API, present iff build with -race.
    14  
    15  func RaceRead(addr unsafe.Pointer)
    16  func RaceWrite(addr unsafe.Pointer)
    17  func RaceReadRange(addr unsafe.Pointer, len int)
    18  func RaceWriteRange(addr unsafe.Pointer, len int)
    19  
    20  func RaceErrors() int {
    21  	var n uint64
    22  	racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
    23  	return int(n)
    24  }
    25  
    26  //go:nosplit
    27  
    28  // RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
    29  // between goroutines. These inform the race detector about actual synchronization
    30  // that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
    31  // sections of code).
    32  // RaceAcquire establishes a happens-before relation with the preceding
    33  // RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
    34  // In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
    35  // RaceAcquire is equivalent to atomic_load(memory_order_acquire).
    36  func RaceAcquire(addr unsafe.Pointer) {
    37  	raceacquire(addr)
    38  }
    39  
    40  //go:nosplit
    41  
    42  // RaceRelease performs a release operation on addr that
    43  // can synchronize with a later RaceAcquire on addr.
    44  //
    45  // In terms of the C memory model, RaceRelease is equivalent to
    46  // atomic_store(memory_order_release).
    47  func RaceRelease(addr unsafe.Pointer) {
    48  	racerelease(addr)
    49  }
    50  
    51  //go:nosplit
    52  
    53  // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
    54  // relation with the preceding RaceRelease or RaceReleaseMerge on addr.
    55  //
    56  // In terms of the C memory model, RaceReleaseMerge is equivalent to
    57  // atomic_exchange(memory_order_release).
    58  func RaceReleaseMerge(addr unsafe.Pointer) {
    59  	racereleasemerge(addr)
    60  }
    61  
    62  //go:nosplit
    63  
    64  // RaceDisable disables handling of race synchronization events in the current goroutine.
    65  // Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
    66  // Non-synchronization events (memory accesses, function entry/exit) still affect
    67  // the race detector.
    68  func RaceDisable() {
    69  	_g_ := getg()
    70  	if _g_.raceignore == 0 {
    71  		racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
    72  	}
    73  	_g_.raceignore++
    74  }
    75  
    76  //go:nosplit
    77  
    78  // RaceEnable re-enables handling of race events in the current goroutine.
    79  func RaceEnable() {
    80  	_g_ := getg()
    81  	_g_.raceignore--
    82  	if _g_.raceignore == 0 {
    83  		racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
    84  	}
    85  }
    86  
    87  // Private interface for the runtime.
    88  
    89  const raceenabled = true
    90  
    91  // For all functions accepting callerpc and pc,
    92  // callerpc is a return PC of the function that calls this function,
    93  // pc is start PC of the function that calls this function.
    94  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    95  	kind := t.kind & kindMask
    96  	if kind == kindArray || kind == kindStruct {
    97  		// for composite objects we have to read every address
    98  		// because a write might happen to any subobject.
    99  		racereadrangepc(addr, t.size, callerpc, pc)
   100  	} else {
   101  		// for non-composite objects we can read just the start
   102  		// address, as any write must write the first byte.
   103  		racereadpc(addr, callerpc, pc)
   104  	}
   105  }
   106  
   107  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
   108  	kind := t.kind & kindMask
   109  	if kind == kindArray || kind == kindStruct {
   110  		// for composite objects we have to write every address
   111  		// because a write might happen to any subobject.
   112  		racewriterangepc(addr, t.size, callerpc, pc)
   113  	} else {
   114  		// for non-composite objects we can write just the start
   115  		// address, as any write must write the first byte.
   116  		racewritepc(addr, callerpc, pc)
   117  	}
   118  }
   119  
   120  //go:noescape
   121  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
   122  
   123  //go:noescape
   124  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
   125  
   126  type symbolizeCodeContext struct {
   127  	pc   uintptr
   128  	fn   *byte
   129  	file *byte
   130  	line uintptr
   131  	off  uintptr
   132  	res  uintptr
   133  }
   134  
   135  var qq = [...]byte{'?', '?', 0}
   136  var dash = [...]byte{'-', 0}
   137  
   138  const (
   139  	raceGetProcCmd = iota
   140  	raceSymbolizeCodeCmd
   141  	raceSymbolizeDataCmd
   142  )
   143  
   144  // Callback from C into Go, runs on g0.
   145  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
   146  	switch cmd {
   147  	case raceGetProcCmd:
   148  		throw("should have been handled by racecallbackthunk")
   149  	case raceSymbolizeCodeCmd:
   150  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
   151  	case raceSymbolizeDataCmd:
   152  		raceSymbolizeData((*symbolizeDataContext)(ctx))
   153  	default:
   154  		throw("unknown command")
   155  	}
   156  }
   157  
   158  // raceSymbolizeCode reads ctx.pc and populates the rest of *ctx with
   159  // information about the code at that pc.
   160  //
   161  // The race detector has already subtracted 1 from pcs, so they point to the last
   162  // byte of call instructions (including calls to runtime.racewrite and friends).
   163  //
   164  // If the incoming pc is part of an inlined function, *ctx is populated
   165  // with information about the inlined function, and on return ctx.pc is set
   166  // to a pc in the logically containing function. (The race detector should call this
   167  // function again with that pc.)
   168  //
   169  // If the incoming pc is not part of an inlined function, the return pc is unchanged.
   170  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
   171  	pc := ctx.pc
   172  	fi := findfunc(pc)
   173  	f := fi._Func()
   174  	if f != nil {
   175  		file, line := f.FileLine(pc)
   176  		if line != 0 {
   177  			if inldata := funcdata(fi, _FUNCDATA_InlTree); inldata != nil {
   178  				inltree := (*[1 << 20]inlinedCall)(inldata)
   179  				for {
   180  					ix := pcdatavalue(fi, _PCDATA_InlTreeIndex, pc, nil)
   181  					if ix >= 0 {
   182  						if inltree[ix].funcID == funcID_wrapper {
   183  							// ignore wrappers
   184  							// Back up to an instruction in the "caller".
   185  							pc = f.Entry() + uintptr(inltree[ix].parentPc)
   186  							continue
   187  						}
   188  						ctx.pc = f.Entry() + uintptr(inltree[ix].parentPc) // "caller" pc
   189  						ctx.fn = cfuncnameFromNameoff(fi, inltree[ix].func_)
   190  						ctx.line = uintptr(line)
   191  						ctx.file = &bytes(file)[0] // assume NUL-terminated
   192  						ctx.off = pc - f.Entry()
   193  						ctx.res = 1
   194  						return
   195  					}
   196  					break
   197  				}
   198  			}
   199  			ctx.fn = cfuncname(fi)
   200  			ctx.line = uintptr(line)
   201  			ctx.file = &bytes(file)[0] // assume NUL-terminated
   202  			ctx.off = pc - f.Entry()
   203  			ctx.res = 1
   204  			return
   205  		}
   206  	}
   207  	ctx.fn = &qq[0]
   208  	ctx.file = &dash[0]
   209  	ctx.line = 0
   210  	ctx.off = ctx.pc
   211  	ctx.res = 1
   212  }
   213  
   214  type symbolizeDataContext struct {
   215  	addr  uintptr
   216  	heap  uintptr
   217  	start uintptr
   218  	size  uintptr
   219  	name  *byte
   220  	file  *byte
   221  	line  uintptr
   222  	res   uintptr
   223  }
   224  
   225  func raceSymbolizeData(ctx *symbolizeDataContext) {
   226  	if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
   227  		ctx.heap = 1
   228  		ctx.start = base
   229  		ctx.size = span.elemsize
   230  		ctx.res = 1
   231  	}
   232  }
   233  
   234  // Race runtime functions called via runtime·racecall.
   235  //go:linkname __tsan_init __tsan_init
   236  var __tsan_init byte
   237  
   238  //go:linkname __tsan_fini __tsan_fini
   239  var __tsan_fini byte
   240  
   241  //go:linkname __tsan_proc_create __tsan_proc_create
   242  var __tsan_proc_create byte
   243  
   244  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   245  var __tsan_proc_destroy byte
   246  
   247  //go:linkname __tsan_map_shadow __tsan_map_shadow
   248  var __tsan_map_shadow byte
   249  
   250  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   251  var __tsan_finalizer_goroutine byte
   252  
   253  //go:linkname __tsan_go_start __tsan_go_start
   254  var __tsan_go_start byte
   255  
   256  //go:linkname __tsan_go_end __tsan_go_end
   257  var __tsan_go_end byte
   258  
   259  //go:linkname __tsan_malloc __tsan_malloc
   260  var __tsan_malloc byte
   261  
   262  //go:linkname __tsan_free __tsan_free
   263  var __tsan_free byte
   264  
   265  //go:linkname __tsan_acquire __tsan_acquire
   266  var __tsan_acquire byte
   267  
   268  //go:linkname __tsan_release __tsan_release
   269  var __tsan_release byte
   270  
   271  //go:linkname __tsan_release_acquire __tsan_release_acquire
   272  var __tsan_release_acquire byte
   273  
   274  //go:linkname __tsan_release_merge __tsan_release_merge
   275  var __tsan_release_merge byte
   276  
   277  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   278  var __tsan_go_ignore_sync_begin byte
   279  
   280  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   281  var __tsan_go_ignore_sync_end byte
   282  
   283  //go:linkname __tsan_report_count __tsan_report_count
   284  var __tsan_report_count byte
   285  
   286  // Mimic what cmd/cgo would do.
   287  //go:cgo_import_static __tsan_init
   288  //go:cgo_import_static __tsan_fini
   289  //go:cgo_import_static __tsan_proc_create
   290  //go:cgo_import_static __tsan_proc_destroy
   291  //go:cgo_import_static __tsan_map_shadow
   292  //go:cgo_import_static __tsan_finalizer_goroutine
   293  //go:cgo_import_static __tsan_go_start
   294  //go:cgo_import_static __tsan_go_end
   295  //go:cgo_import_static __tsan_malloc
   296  //go:cgo_import_static __tsan_free
   297  //go:cgo_import_static __tsan_acquire
   298  //go:cgo_import_static __tsan_release
   299  //go:cgo_import_static __tsan_release_acquire
   300  //go:cgo_import_static __tsan_release_merge
   301  //go:cgo_import_static __tsan_go_ignore_sync_begin
   302  //go:cgo_import_static __tsan_go_ignore_sync_end
   303  //go:cgo_import_static __tsan_report_count
   304  
   305  // These are called from race_amd64.s.
   306  //go:cgo_import_static __tsan_read
   307  //go:cgo_import_static __tsan_read_pc
   308  //go:cgo_import_static __tsan_read_range
   309  //go:cgo_import_static __tsan_write
   310  //go:cgo_import_static __tsan_write_pc
   311  //go:cgo_import_static __tsan_write_range
   312  //go:cgo_import_static __tsan_func_enter
   313  //go:cgo_import_static __tsan_func_exit
   314  
   315  //go:cgo_import_static __tsan_go_atomic32_load
   316  //go:cgo_import_static __tsan_go_atomic64_load
   317  //go:cgo_import_static __tsan_go_atomic32_store
   318  //go:cgo_import_static __tsan_go_atomic64_store
   319  //go:cgo_import_static __tsan_go_atomic32_exchange
   320  //go:cgo_import_static __tsan_go_atomic64_exchange
   321  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   322  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   323  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   324  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   325  
   326  // start/end of global data (data+bss).
   327  var racedatastart uintptr
   328  var racedataend uintptr
   329  
   330  // start/end of heap for race_amd64.s
   331  var racearenastart uintptr
   332  var racearenaend uintptr
   333  
   334  func racefuncenter(callpc uintptr)
   335  func racefuncenterfp(fp uintptr)
   336  func racefuncexit()
   337  func raceread(addr uintptr)
   338  func racewrite(addr uintptr)
   339  func racereadrange(addr, size uintptr)
   340  func racewriterange(addr, size uintptr)
   341  func racereadrangepc1(addr, size, pc uintptr)
   342  func racewriterangepc1(addr, size, pc uintptr)
   343  func racecallbackthunk(uintptr)
   344  
   345  // racecall allows calling an arbitrary function f from C race runtime
   346  // with up to 4 uintptr arguments.
   347  func racecall(fn *byte, arg0, arg1, arg2, arg3 uintptr)
   348  
   349  // checks if the address has shadow (i.e. heap or data/bss)
   350  //go:nosplit
   351  func isvalidaddr(addr unsafe.Pointer) bool {
   352  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   353  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   354  }
   355  
   356  //go:nosplit
   357  func raceinit() (gctx, pctx uintptr) {
   358  	// cgo is required to initialize libc, which is used by race runtime
   359  	if !iscgo {
   360  		throw("raceinit: race build must use cgo")
   361  	}
   362  
   363  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), funcPC(racecallbackthunk), 0)
   364  
   365  	// Round data segment to page boundaries, because it's used in mmap().
   366  	start := ^uintptr(0)
   367  	end := uintptr(0)
   368  	if start > firstmoduledata.noptrdata {
   369  		start = firstmoduledata.noptrdata
   370  	}
   371  	if start > firstmoduledata.data {
   372  		start = firstmoduledata.data
   373  	}
   374  	if start > firstmoduledata.noptrbss {
   375  		start = firstmoduledata.noptrbss
   376  	}
   377  	if start > firstmoduledata.bss {
   378  		start = firstmoduledata.bss
   379  	}
   380  	if end < firstmoduledata.enoptrdata {
   381  		end = firstmoduledata.enoptrdata
   382  	}
   383  	if end < firstmoduledata.edata {
   384  		end = firstmoduledata.edata
   385  	}
   386  	if end < firstmoduledata.enoptrbss {
   387  		end = firstmoduledata.enoptrbss
   388  	}
   389  	if end < firstmoduledata.ebss {
   390  		end = firstmoduledata.ebss
   391  	}
   392  	size := alignUp(end-start, _PageSize)
   393  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   394  	racedatastart = start
   395  	racedataend = start + size
   396  
   397  	return
   398  }
   399  
   400  var raceFiniLock mutex
   401  
   402  //go:nosplit
   403  func racefini() {
   404  	// racefini() can only be called once to avoid races.
   405  	// This eventually (via __tsan_fini) calls C.exit which has
   406  	// undefined behavior if called more than once. If the lock is
   407  	// already held it's assumed that the first caller exits the program
   408  	// so other calls can hang forever without an issue.
   409  	lock(&raceFiniLock)
   410  	// We're entering external code that may call ExitProcess on
   411  	// Windows.
   412  	osPreemptExtEnter(getg().m)
   413  	racecall(&__tsan_fini, 0, 0, 0, 0)
   414  }
   415  
   416  //go:nosplit
   417  func raceproccreate() uintptr {
   418  	var ctx uintptr
   419  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   420  	return ctx
   421  }
   422  
   423  //go:nosplit
   424  func raceprocdestroy(ctx uintptr) {
   425  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   426  }
   427  
   428  //go:nosplit
   429  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   430  	if racearenastart == 0 {
   431  		racearenastart = uintptr(addr)
   432  	}
   433  	if racearenaend < uintptr(addr)+size {
   434  		racearenaend = uintptr(addr) + size
   435  	}
   436  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   437  }
   438  
   439  //go:nosplit
   440  func racemalloc(p unsafe.Pointer, sz uintptr) {
   441  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   442  }
   443  
   444  //go:nosplit
   445  func racefree(p unsafe.Pointer, sz uintptr) {
   446  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   447  }
   448  
   449  //go:nosplit
   450  func racegostart(pc uintptr) uintptr {
   451  	_g_ := getg()
   452  	var spawng *g
   453  	if _g_.m.curg != nil {
   454  		spawng = _g_.m.curg
   455  	} else {
   456  		spawng = _g_
   457  	}
   458  
   459  	var racectx uintptr
   460  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   461  	return racectx
   462  }
   463  
   464  //go:nosplit
   465  func racegoend() {
   466  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   467  }
   468  
   469  //go:nosplit
   470  func racectxend(racectx uintptr) {
   471  	racecall(&__tsan_go_end, racectx, 0, 0, 0)
   472  }
   473  
   474  //go:nosplit
   475  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   476  	_g_ := getg()
   477  	if _g_ != _g_.m.curg {
   478  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   479  		// Not interesting.
   480  		return
   481  	}
   482  	if callpc != 0 {
   483  		racefuncenter(callpc)
   484  	}
   485  	racewriterangepc1(uintptr(addr), sz, pc)
   486  	if callpc != 0 {
   487  		racefuncexit()
   488  	}
   489  }
   490  
   491  //go:nosplit
   492  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   493  	_g_ := getg()
   494  	if _g_ != _g_.m.curg {
   495  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   496  		// Not interesting.
   497  		return
   498  	}
   499  	if callpc != 0 {
   500  		racefuncenter(callpc)
   501  	}
   502  	racereadrangepc1(uintptr(addr), sz, pc)
   503  	if callpc != 0 {
   504  		racefuncexit()
   505  	}
   506  }
   507  
   508  //go:nosplit
   509  func raceacquire(addr unsafe.Pointer) {
   510  	raceacquireg(getg(), addr)
   511  }
   512  
   513  //go:nosplit
   514  func raceacquireg(gp *g, addr unsafe.Pointer) {
   515  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   516  		return
   517  	}
   518  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   519  }
   520  
   521  //go:nosplit
   522  func raceacquirectx(racectx uintptr, addr unsafe.Pointer) {
   523  	if !isvalidaddr(addr) {
   524  		return
   525  	}
   526  	racecall(&__tsan_acquire, racectx, uintptr(addr), 0, 0)
   527  }
   528  
   529  //go:nosplit
   530  func racerelease(addr unsafe.Pointer) {
   531  	racereleaseg(getg(), addr)
   532  }
   533  
   534  //go:nosplit
   535  func racereleaseg(gp *g, addr unsafe.Pointer) {
   536  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   537  		return
   538  	}
   539  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   540  }
   541  
   542  //go:nosplit
   543  func racereleaseacquire(addr unsafe.Pointer) {
   544  	racereleaseacquireg(getg(), addr)
   545  }
   546  
   547  //go:nosplit
   548  func racereleaseacquireg(gp *g, addr unsafe.Pointer) {
   549  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   550  		return
   551  	}
   552  	racecall(&__tsan_release_acquire, gp.racectx, uintptr(addr), 0, 0)
   553  }
   554  
   555  //go:nosplit
   556  func racereleasemerge(addr unsafe.Pointer) {
   557  	racereleasemergeg(getg(), addr)
   558  }
   559  
   560  //go:nosplit
   561  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   562  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   563  		return
   564  	}
   565  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   566  }
   567  
   568  //go:nosplit
   569  func racefingo() {
   570  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   571  }
   572  
   573  // The declarations below generate ABI wrappers for functions
   574  // implemented in assembly in this package but declared in another
   575  // package.
   576  
   577  //go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32
   578  func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)
   579  
   580  //go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64
   581  func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)
   582  
   583  //go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32
   584  func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)
   585  
   586  //go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
   587  func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
   588  
   589  //go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
   590  func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
   591  
   592  //go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
   593  func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
   594  
   595  //go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32
   596  func abigen_sync_atomic_StoreInt32(addr *int32, val int32)
   597  
   598  //go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64
   599  func abigen_sync_atomic_StoreInt64(addr *int64, val int64)
   600  
   601  //go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32
   602  func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)
   603  
   604  //go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64
   605  func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)
   606  
   607  //go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32
   608  func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)
   609  
   610  //go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64
   611  func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)
   612  
   613  //go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32
   614  func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)
   615  
   616  //go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64
   617  func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)
   618  
   619  //go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32
   620  func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)
   621  
   622  //go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32
   623  func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)
   624  
   625  //go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64
   626  func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)
   627  
   628  //go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64
   629  func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
   630  
   631  //go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
   632  func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
   633  
   634  //go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
   635  func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
   636  
   637  //go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64
   638  func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
   639  
   640  //go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32
   641  func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
   642  
   643  //go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64
   644  func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)