github.com/lovishpuri/go-40569/src@v0.0.0-20230519171745-f8623e7c56cf/runtime/race.go (about)

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build race
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"unsafe"
    12  )
    13  
    14  // Public race detection API, present iff build with -race.
    15  
    16  func RaceRead(addr unsafe.Pointer)
    17  func RaceWrite(addr unsafe.Pointer)
    18  func RaceReadRange(addr unsafe.Pointer, len int)
    19  func RaceWriteRange(addr unsafe.Pointer, len int)
    20  
    21  func RaceErrors() int {
    22  	var n uint64
    23  	racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
    24  	return int(n)
    25  }
    26  
    27  // RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
    28  // between goroutines. These inform the race detector about actual synchronization
    29  // that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
    30  // sections of code).
    31  // RaceAcquire establishes a happens-before relation with the preceding
    32  // RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
    33  // In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
    34  // RaceAcquire is equivalent to atomic_load(memory_order_acquire).
    35  //
    36  //go:nosplit
    37  func RaceAcquire(addr unsafe.Pointer) {
    38  	raceacquire(addr)
    39  }
    40  
    41  // RaceRelease performs a release operation on addr that
    42  // can synchronize with a later RaceAcquire on addr.
    43  //
    44  // In terms of the C memory model, RaceRelease is equivalent to
    45  // atomic_store(memory_order_release).
    46  //
    47  //go:nosplit
    48  func RaceRelease(addr unsafe.Pointer) {
    49  	racerelease(addr)
    50  }
    51  
    52  // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
    53  // relation with the preceding RaceRelease or RaceReleaseMerge on addr.
    54  //
    55  // In terms of the C memory model, RaceReleaseMerge is equivalent to
    56  // atomic_exchange(memory_order_release).
    57  //
    58  //go:nosplit
    59  func RaceReleaseMerge(addr unsafe.Pointer) {
    60  	racereleasemerge(addr)
    61  }
    62  
    63  // RaceDisable disables handling of race synchronization events in the current goroutine.
    64  // Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
    65  // Non-synchronization events (memory accesses, function entry/exit) still affect
    66  // the race detector.
    67  //
    68  //go:nosplit
    69  func RaceDisable() {
    70  	gp := getg()
    71  	if gp.raceignore == 0 {
    72  		racecall(&__tsan_go_ignore_sync_begin, gp.racectx, 0, 0, 0)
    73  	}
    74  	gp.raceignore++
    75  }
    76  
    77  // RaceEnable re-enables handling of race events in the current goroutine.
    78  //
    79  //go:nosplit
    80  func RaceEnable() {
    81  	gp := getg()
    82  	gp.raceignore--
    83  	if gp.raceignore == 0 {
    84  		racecall(&__tsan_go_ignore_sync_end, gp.racectx, 0, 0, 0)
    85  	}
    86  }
    87  
    88  // Private interface for the runtime.
    89  
    90  const raceenabled = true
    91  
    92  // For all functions accepting callerpc and pc,
    93  // callerpc is a return PC of the function that calls this function,
    94  // pc is start PC of the function that calls this function.
    95  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    96  	kind := t.Kind_ & kindMask
    97  	if kind == kindArray || kind == kindStruct {
    98  		// for composite objects we have to read every address
    99  		// because a write might happen to any subobject.
   100  		racereadrangepc(addr, t.Size_, callerpc, pc)
   101  	} else {
   102  		// for non-composite objects we can read just the start
   103  		// address, as any write must write the first byte.
   104  		racereadpc(addr, callerpc, pc)
   105  	}
   106  }
   107  
   108  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
   109  	kind := t.Kind_ & kindMask
   110  	if kind == kindArray || kind == kindStruct {
   111  		// for composite objects we have to write every address
   112  		// because a write might happen to any subobject.
   113  		racewriterangepc(addr, t.Size_, callerpc, pc)
   114  	} else {
   115  		// for non-composite objects we can write just the start
   116  		// address, as any write must write the first byte.
   117  		racewritepc(addr, callerpc, pc)
   118  	}
   119  }
   120  
   121  //go:noescape
   122  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
   123  
   124  //go:noescape
   125  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
   126  
   127  type symbolizeCodeContext struct {
   128  	pc   uintptr
   129  	fn   *byte
   130  	file *byte
   131  	line uintptr
   132  	off  uintptr
   133  	res  uintptr
   134  }
   135  
   136  var qq = [...]byte{'?', '?', 0}
   137  var dash = [...]byte{'-', 0}
   138  
   139  const (
   140  	raceGetProcCmd = iota
   141  	raceSymbolizeCodeCmd
   142  	raceSymbolizeDataCmd
   143  )
   144  
   145  // Callback from C into Go, runs on g0.
   146  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
   147  	switch cmd {
   148  	case raceGetProcCmd:
   149  		throw("should have been handled by racecallbackthunk")
   150  	case raceSymbolizeCodeCmd:
   151  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
   152  	case raceSymbolizeDataCmd:
   153  		raceSymbolizeData((*symbolizeDataContext)(ctx))
   154  	default:
   155  		throw("unknown command")
   156  	}
   157  }
   158  
   159  // raceSymbolizeCode reads ctx.pc and populates the rest of *ctx with
   160  // information about the code at that pc.
   161  //
   162  // The race detector has already subtracted 1 from pcs, so they point to the last
   163  // byte of call instructions (including calls to runtime.racewrite and friends).
   164  //
   165  // If the incoming pc is part of an inlined function, *ctx is populated
   166  // with information about the inlined function, and on return ctx.pc is set
   167  // to a pc in the logically containing function. (The race detector should call this
   168  // function again with that pc.)
   169  //
   170  // If the incoming pc is not part of an inlined function, the return pc is unchanged.
   171  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
   172  	pc := ctx.pc
   173  	fi := findfunc(pc)
   174  	if fi.valid() {
   175  		u, uf := newInlineUnwinder(fi, pc, nil)
   176  		for ; uf.valid(); uf = u.next(uf) {
   177  			sf := u.srcFunc(uf)
   178  			if sf.funcID == abi.FuncIDWrapper {
   179  				// ignore wrappers
   180  				continue
   181  			}
   182  
   183  			name := sf.name()
   184  			file, line := u.fileLine(uf)
   185  			if line == 0 {
   186  				// Failure to symbolize
   187  				continue
   188  			}
   189  			ctx.fn = &bytes(name)[0] // assume NUL-terminated
   190  			ctx.line = uintptr(line)
   191  			ctx.file = &bytes(file)[0] // assume NUL-terminated
   192  			ctx.off = pc - fi.entry()
   193  			ctx.res = 1
   194  			if u.isInlined(uf) {
   195  				// Set ctx.pc to the "caller" so the race detector calls this again
   196  				// to further unwind.
   197  				uf = u.next(uf)
   198  				ctx.pc = uf.pc
   199  			}
   200  			return
   201  		}
   202  	}
   203  	ctx.fn = &qq[0]
   204  	ctx.file = &dash[0]
   205  	ctx.line = 0
   206  	ctx.off = ctx.pc
   207  	ctx.res = 1
   208  }
   209  
   210  type symbolizeDataContext struct {
   211  	addr  uintptr
   212  	heap  uintptr
   213  	start uintptr
   214  	size  uintptr
   215  	name  *byte
   216  	file  *byte
   217  	line  uintptr
   218  	res   uintptr
   219  }
   220  
   221  func raceSymbolizeData(ctx *symbolizeDataContext) {
   222  	if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
   223  		ctx.heap = 1
   224  		ctx.start = base
   225  		ctx.size = span.elemsize
   226  		ctx.res = 1
   227  	}
   228  }
   229  
   230  // Race runtime functions called via runtime·racecall.
   231  //
   232  //go:linkname __tsan_init __tsan_init
   233  var __tsan_init byte
   234  
   235  //go:linkname __tsan_fini __tsan_fini
   236  var __tsan_fini byte
   237  
   238  //go:linkname __tsan_proc_create __tsan_proc_create
   239  var __tsan_proc_create byte
   240  
   241  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   242  var __tsan_proc_destroy byte
   243  
   244  //go:linkname __tsan_map_shadow __tsan_map_shadow
   245  var __tsan_map_shadow byte
   246  
   247  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   248  var __tsan_finalizer_goroutine byte
   249  
   250  //go:linkname __tsan_go_start __tsan_go_start
   251  var __tsan_go_start byte
   252  
   253  //go:linkname __tsan_go_end __tsan_go_end
   254  var __tsan_go_end byte
   255  
   256  //go:linkname __tsan_malloc __tsan_malloc
   257  var __tsan_malloc byte
   258  
   259  //go:linkname __tsan_free __tsan_free
   260  var __tsan_free byte
   261  
   262  //go:linkname __tsan_acquire __tsan_acquire
   263  var __tsan_acquire byte
   264  
   265  //go:linkname __tsan_release __tsan_release
   266  var __tsan_release byte
   267  
   268  //go:linkname __tsan_release_acquire __tsan_release_acquire
   269  var __tsan_release_acquire byte
   270  
   271  //go:linkname __tsan_release_merge __tsan_release_merge
   272  var __tsan_release_merge byte
   273  
   274  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   275  var __tsan_go_ignore_sync_begin byte
   276  
   277  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   278  var __tsan_go_ignore_sync_end byte
   279  
   280  //go:linkname __tsan_report_count __tsan_report_count
   281  var __tsan_report_count byte
   282  
   283  // Mimic what cmd/cgo would do.
   284  //
   285  //go:cgo_import_static __tsan_init
   286  //go:cgo_import_static __tsan_fini
   287  //go:cgo_import_static __tsan_proc_create
   288  //go:cgo_import_static __tsan_proc_destroy
   289  //go:cgo_import_static __tsan_map_shadow
   290  //go:cgo_import_static __tsan_finalizer_goroutine
   291  //go:cgo_import_static __tsan_go_start
   292  //go:cgo_import_static __tsan_go_end
   293  //go:cgo_import_static __tsan_malloc
   294  //go:cgo_import_static __tsan_free
   295  //go:cgo_import_static __tsan_acquire
   296  //go:cgo_import_static __tsan_release
   297  //go:cgo_import_static __tsan_release_acquire
   298  //go:cgo_import_static __tsan_release_merge
   299  //go:cgo_import_static __tsan_go_ignore_sync_begin
   300  //go:cgo_import_static __tsan_go_ignore_sync_end
   301  //go:cgo_import_static __tsan_report_count
   302  
   303  // These are called from race_amd64.s.
   304  //
   305  //go:cgo_import_static __tsan_read
   306  //go:cgo_import_static __tsan_read_pc
   307  //go:cgo_import_static __tsan_read_range
   308  //go:cgo_import_static __tsan_write
   309  //go:cgo_import_static __tsan_write_pc
   310  //go:cgo_import_static __tsan_write_range
   311  //go:cgo_import_static __tsan_func_enter
   312  //go:cgo_import_static __tsan_func_exit
   313  
   314  //go:cgo_import_static __tsan_go_atomic32_load
   315  //go:cgo_import_static __tsan_go_atomic64_load
   316  //go:cgo_import_static __tsan_go_atomic32_store
   317  //go:cgo_import_static __tsan_go_atomic64_store
   318  //go:cgo_import_static __tsan_go_atomic32_exchange
   319  //go:cgo_import_static __tsan_go_atomic64_exchange
   320  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   321  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   322  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   323  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   324  
   325  // start/end of global data (data+bss).
   326  var racedatastart uintptr
   327  var racedataend uintptr
   328  
   329  // start/end of heap for race_amd64.s
   330  var racearenastart uintptr
   331  var racearenaend uintptr
   332  
   333  func racefuncenter(callpc uintptr)
   334  func racefuncenterfp(fp uintptr)
   335  func racefuncexit()
   336  func raceread(addr uintptr)
   337  func racewrite(addr uintptr)
   338  func racereadrange(addr, size uintptr)
   339  func racewriterange(addr, size uintptr)
   340  func racereadrangepc1(addr, size, pc uintptr)
   341  func racewriterangepc1(addr, size, pc uintptr)
   342  func racecallbackthunk(uintptr)
   343  
   344  // racecall allows calling an arbitrary function fn from C race runtime
   345  // with up to 4 uintptr arguments.
   346  func racecall(fn *byte, arg0, arg1, arg2, arg3 uintptr)
   347  
   348  // checks if the address has shadow (i.e. heap or data/bss).
   349  //
   350  //go:nosplit
   351  func isvalidaddr(addr unsafe.Pointer) bool {
   352  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   353  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   354  }
   355  
   356  //go:nosplit
   357  func raceinit() (gctx, pctx uintptr) {
   358  	lockInit(&raceFiniLock, lockRankRaceFini)
   359  
   360  	// On most machines, cgo is required to initialize libc, which is used by race runtime.
   361  	if !iscgo && GOOS != "darwin" {
   362  		throw("raceinit: race build must use cgo")
   363  	}
   364  
   365  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), abi.FuncPCABI0(racecallbackthunk), 0)
   366  
   367  	// Round data segment to page boundaries, because it's used in mmap().
   368  	start := ^uintptr(0)
   369  	end := uintptr(0)
   370  	if start > firstmoduledata.noptrdata {
   371  		start = firstmoduledata.noptrdata
   372  	}
   373  	if start > firstmoduledata.data {
   374  		start = firstmoduledata.data
   375  	}
   376  	if start > firstmoduledata.noptrbss {
   377  		start = firstmoduledata.noptrbss
   378  	}
   379  	if start > firstmoduledata.bss {
   380  		start = firstmoduledata.bss
   381  	}
   382  	if end < firstmoduledata.enoptrdata {
   383  		end = firstmoduledata.enoptrdata
   384  	}
   385  	if end < firstmoduledata.edata {
   386  		end = firstmoduledata.edata
   387  	}
   388  	if end < firstmoduledata.enoptrbss {
   389  		end = firstmoduledata.enoptrbss
   390  	}
   391  	if end < firstmoduledata.ebss {
   392  		end = firstmoduledata.ebss
   393  	}
   394  	size := alignUp(end-start, _PageSize)
   395  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   396  	racedatastart = start
   397  	racedataend = start + size
   398  
   399  	return
   400  }
   401  
   402  //go:nosplit
   403  func racefini() {
   404  	// racefini() can only be called once to avoid races.
   405  	// This eventually (via __tsan_fini) calls C.exit which has
   406  	// undefined behavior if called more than once. If the lock is
   407  	// already held it's assumed that the first caller exits the program
   408  	// so other calls can hang forever without an issue.
   409  	lock(&raceFiniLock)
   410  
   411  	// __tsan_fini will run C atexit functions and C++ destructors,
   412  	// which can theoretically call back into Go.
   413  	// Tell the scheduler we entering external code.
   414  	entersyscall()
   415  
   416  	// We're entering external code that may call ExitProcess on
   417  	// Windows.
   418  	osPreemptExtEnter(getg().m)
   419  
   420  	racecall(&__tsan_fini, 0, 0, 0, 0)
   421  }
   422  
   423  //go:nosplit
   424  func raceproccreate() uintptr {
   425  	var ctx uintptr
   426  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   427  	return ctx
   428  }
   429  
   430  //go:nosplit
   431  func raceprocdestroy(ctx uintptr) {
   432  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   433  }
   434  
   435  //go:nosplit
   436  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   437  	if racearenastart == 0 {
   438  		racearenastart = uintptr(addr)
   439  	}
   440  	if racearenaend < uintptr(addr)+size {
   441  		racearenaend = uintptr(addr) + size
   442  	}
   443  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   444  }
   445  
   446  //go:nosplit
   447  func racemalloc(p unsafe.Pointer, sz uintptr) {
   448  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   449  }
   450  
   451  //go:nosplit
   452  func racefree(p unsafe.Pointer, sz uintptr) {
   453  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   454  }
   455  
   456  //go:nosplit
   457  func racegostart(pc uintptr) uintptr {
   458  	gp := getg()
   459  	var spawng *g
   460  	if gp.m.curg != nil {
   461  		spawng = gp.m.curg
   462  	} else {
   463  		spawng = gp
   464  	}
   465  
   466  	var racectx uintptr
   467  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   468  	return racectx
   469  }
   470  
   471  //go:nosplit
   472  func racegoend() {
   473  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   474  }
   475  
   476  //go:nosplit
   477  func racectxend(racectx uintptr) {
   478  	racecall(&__tsan_go_end, racectx, 0, 0, 0)
   479  }
   480  
   481  //go:nosplit
   482  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   483  	gp := getg()
   484  	if gp != gp.m.curg {
   485  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   486  		// Not interesting.
   487  		return
   488  	}
   489  	if callpc != 0 {
   490  		racefuncenter(callpc)
   491  	}
   492  	racewriterangepc1(uintptr(addr), sz, pc)
   493  	if callpc != 0 {
   494  		racefuncexit()
   495  	}
   496  }
   497  
   498  //go:nosplit
   499  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   500  	gp := getg()
   501  	if gp != gp.m.curg {
   502  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   503  		// Not interesting.
   504  		return
   505  	}
   506  	if callpc != 0 {
   507  		racefuncenter(callpc)
   508  	}
   509  	racereadrangepc1(uintptr(addr), sz, pc)
   510  	if callpc != 0 {
   511  		racefuncexit()
   512  	}
   513  }
   514  
   515  //go:nosplit
   516  func raceacquire(addr unsafe.Pointer) {
   517  	raceacquireg(getg(), addr)
   518  }
   519  
   520  //go:nosplit
   521  func raceacquireg(gp *g, addr unsafe.Pointer) {
   522  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   523  		return
   524  	}
   525  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   526  }
   527  
   528  //go:nosplit
   529  func raceacquirectx(racectx uintptr, addr unsafe.Pointer) {
   530  	if !isvalidaddr(addr) {
   531  		return
   532  	}
   533  	racecall(&__tsan_acquire, racectx, uintptr(addr), 0, 0)
   534  }
   535  
   536  //go:nosplit
   537  func racerelease(addr unsafe.Pointer) {
   538  	racereleaseg(getg(), addr)
   539  }
   540  
   541  //go:nosplit
   542  func racereleaseg(gp *g, addr unsafe.Pointer) {
   543  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   544  		return
   545  	}
   546  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   547  }
   548  
   549  //go:nosplit
   550  func racereleaseacquire(addr unsafe.Pointer) {
   551  	racereleaseacquireg(getg(), addr)
   552  }
   553  
   554  //go:nosplit
   555  func racereleaseacquireg(gp *g, addr unsafe.Pointer) {
   556  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   557  		return
   558  	}
   559  	racecall(&__tsan_release_acquire, gp.racectx, uintptr(addr), 0, 0)
   560  }
   561  
   562  //go:nosplit
   563  func racereleasemerge(addr unsafe.Pointer) {
   564  	racereleasemergeg(getg(), addr)
   565  }
   566  
   567  //go:nosplit
   568  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   569  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   570  		return
   571  	}
   572  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   573  }
   574  
   575  //go:nosplit
   576  func racefingo() {
   577  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   578  }
   579  
   580  // The declarations below generate ABI wrappers for functions
   581  // implemented in assembly in this package but declared in another
   582  // package.
   583  
   584  //go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32
   585  func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)
   586  
   587  //go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64
   588  func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)
   589  
   590  //go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32
   591  func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)
   592  
   593  //go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
   594  func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
   595  
   596  //go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
   597  func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
   598  
   599  //go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
   600  func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
   601  
   602  //go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32
   603  func abigen_sync_atomic_StoreInt32(addr *int32, val int32)
   604  
   605  //go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64
   606  func abigen_sync_atomic_StoreInt64(addr *int64, val int64)
   607  
   608  //go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32
   609  func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)
   610  
   611  //go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64
   612  func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)
   613  
   614  //go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32
   615  func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)
   616  
   617  //go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64
   618  func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)
   619  
   620  //go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32
   621  func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)
   622  
   623  //go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64
   624  func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)
   625  
   626  //go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32
   627  func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)
   628  
   629  //go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32
   630  func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)
   631  
   632  //go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64
   633  func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)
   634  
   635  //go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64
   636  func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
   637  
   638  //go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
   639  func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
   640  
   641  //go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
   642  func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
   643  
   644  //go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64
   645  func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
   646  
   647  //go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32
   648  func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
   649  
   650  //go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64
   651  func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)