github.com/bir3/gocompiler@v0.3.205/src/cmd/compile/internal/ssa/writebarrier.go (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ssa
     6  
     7  import (
     8  	"github.com/bir3/gocompiler/src/cmd/compile/internal/reflectdata"
     9  	"github.com/bir3/gocompiler/src/cmd/compile/internal/types"
    10  	"github.com/bir3/gocompiler/src/cmd/internal/obj"
    11  	"github.com/bir3/gocompiler/src/cmd/internal/objabi"
    12  	"github.com/bir3/gocompiler/src/cmd/internal/src"
    13  	"fmt"
    14  )
    15  
    16  // A ZeroRegion records parts of an object which are known to be zero.
    17  // A ZeroRegion only applies to a single memory state.
    18  // Each bit in mask is set if the corresponding pointer-sized word of
    19  // the base object is known to be zero.
    20  // In other words, if mask & (1<<i) != 0, then [base+i*ptrSize, base+(i+1)*ptrSize)
    21  // is known to be zero.
    22  type ZeroRegion struct {
    23  	base *Value
    24  	mask uint64
    25  }
    26  
    27  // needwb reports whether we need write barrier for store op v.
    28  // v must be Store/Move/Zero.
    29  // zeroes provides known zero information (keyed by ID of memory-type values).
    30  func needwb(v *Value, zeroes map[ID]ZeroRegion, select1 []*Value) bool {
    31  	t, ok := v.Aux.(*types.Type)
    32  	if !ok {
    33  		v.Fatalf("store aux is not a type: %s", v.LongString())
    34  	}
    35  	if !t.HasPointers() {
    36  		return false
    37  	}
    38  	if IsStackAddr(v.Args[0]) {
    39  		return false // write on stack doesn't need write barrier
    40  	}
    41  	if v.Op == OpMove && IsReadOnlyGlobalAddr(v.Args[1]) {
    42  		if mem, ok := IsNewObject(v.Args[0], select1); ok && mem == v.MemoryArg() {
    43  			// Copying data from readonly memory into a fresh object doesn't need a write barrier.
    44  			return false
    45  		}
    46  	}
    47  	if v.Op == OpStore && IsGlobalAddr(v.Args[1]) {
    48  		// Storing pointers to non-heap locations into zeroed memory doesn't need a write barrier.
    49  		ptr := v.Args[0]
    50  		var off int64
    51  		size := v.Aux.(*types.Type).Size()
    52  		for ptr.Op == OpOffPtr {
    53  			off += ptr.AuxInt
    54  			ptr = ptr.Args[0]
    55  		}
    56  		ptrSize := v.Block.Func.Config.PtrSize
    57  		if off%ptrSize != 0 || size%ptrSize != 0 {
    58  			v.Fatalf("unaligned pointer write")
    59  		}
    60  		if off < 0 || off+size > 64*ptrSize {
    61  			// write goes off end of tracked offsets
    62  			return true
    63  		}
    64  		z := zeroes[v.MemoryArg().ID]
    65  		if ptr != z.base {
    66  			return true
    67  		}
    68  		for i := off; i < off+size; i += ptrSize {
    69  			if z.mask>>uint(i/ptrSize)&1 == 0 {
    70  				return true // not known to be zero
    71  			}
    72  		}
    73  		// All written locations are known to be zero - write barrier not needed.
    74  		return false
    75  	}
    76  	return true
    77  }
    78  
    79  // writebarrier pass inserts write barriers for store ops (Store, Move, Zero)
    80  // when necessary (the condition above). It rewrites store ops to branches
    81  // and runtime calls, like
    82  //
    83  //	if writeBarrier.enabled {
    84  //		gcWriteBarrier(ptr, val)	// Not a regular Go call
    85  //	} else {
    86  //		*ptr = val
    87  //	}
    88  //
    89  // A sequence of WB stores for many pointer fields of a single type will
    90  // be emitted together, with a single branch.
    91  func writebarrier(f *Func) {
    92  	if !f.fe.UseWriteBarrier() {
    93  		return
    94  	}
    95  
    96  	var sb, sp, wbaddr, const0 *Value
    97  	var typedmemmove, typedmemclr, gcWriteBarrier *obj.LSym
    98  	var stores, after []*Value
    99  	var sset *sparseSet
   100  	var storeNumber []int32
   101  
   102  	// Compute map from a value to the SelectN [1] value that uses it.
   103  	select1 := f.Cache.allocValueSlice(f.NumValues())
   104  	defer func() { f.Cache.freeValueSlice(select1) }()
   105  	for _, b := range f.Blocks {
   106  		for _, v := range b.Values {
   107  			if v.Op != OpSelectN {
   108  				continue
   109  			}
   110  			if v.AuxInt != 1 {
   111  				continue
   112  			}
   113  			select1[v.Args[0].ID] = v
   114  		}
   115  	}
   116  
   117  	zeroes := f.computeZeroMap(select1)
   118  	for _, b := range f.Blocks { // range loop is safe since the blocks we added contain no stores to expand
   119  		// first, identify all the stores that need to insert a write barrier.
   120  		// mark them with WB ops temporarily. record presence of WB ops.
   121  		nWBops := 0 // count of temporarily created WB ops remaining to be rewritten in the current block
   122  		for _, v := range b.Values {
   123  			switch v.Op {
   124  			case OpStore, OpMove, OpZero:
   125  				if needwb(v, zeroes, select1) {
   126  					switch v.Op {
   127  					case OpStore:
   128  						v.Op = OpStoreWB
   129  					case OpMove:
   130  						v.Op = OpMoveWB
   131  					case OpZero:
   132  						v.Op = OpZeroWB
   133  					}
   134  					nWBops++
   135  				}
   136  			}
   137  		}
   138  		if nWBops == 0 {
   139  			continue
   140  		}
   141  
   142  		if wbaddr == nil {
   143  			// lazily initialize global values for write barrier test and calls
   144  			// find SB and SP values in entry block
   145  			initpos := f.Entry.Pos
   146  			sp, sb = f.spSb()
   147  			wbsym := f.fe.Syslook("writeBarrier")
   148  			wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32Ptr, wbsym, sb)
   149  			gcWriteBarrier = f.fe.Syslook("gcWriteBarrier")
   150  			typedmemmove = f.fe.Syslook("typedmemmove")
   151  			typedmemclr = f.fe.Syslook("typedmemclr")
   152  			const0 = f.ConstInt32(f.Config.Types.UInt32, 0)
   153  
   154  			// allocate auxiliary data structures for computing store order
   155  			sset = f.newSparseSet(f.NumValues())
   156  			defer f.retSparseSet(sset)
   157  			storeNumber = f.Cache.allocInt32Slice(f.NumValues())
   158  			defer f.Cache.freeInt32Slice(storeNumber)
   159  		}
   160  
   161  		// order values in store order
   162  		b.Values = storeOrder(b.Values, sset, storeNumber)
   163  
   164  		firstSplit := true
   165  	again:
   166  		// find the start and end of the last contiguous WB store sequence.
   167  		// a branch will be inserted there. values after it will be moved
   168  		// to a new block.
   169  		var last *Value
   170  		var start, end int
   171  		values := b.Values
   172  	FindSeq:
   173  		for i := len(values) - 1; i >= 0; i-- {
   174  			w := values[i]
   175  			switch w.Op {
   176  			case OpStoreWB, OpMoveWB, OpZeroWB:
   177  				start = i
   178  				if last == nil {
   179  					last = w
   180  					end = i + 1
   181  				}
   182  			case OpVarDef, OpVarLive:
   183  				continue
   184  			default:
   185  				if last == nil {
   186  					continue
   187  				}
   188  				break FindSeq
   189  			}
   190  		}
   191  		stores = append(stores[:0], b.Values[start:end]...) // copy to avoid aliasing
   192  		after = append(after[:0], b.Values[end:]...)
   193  		b.Values = b.Values[:start]
   194  
   195  		// find the memory before the WB stores
   196  		mem := stores[0].MemoryArg()
   197  		pos := stores[0].Pos
   198  		bThen := f.NewBlock(BlockPlain)
   199  		bElse := f.NewBlock(BlockPlain)
   200  		bEnd := f.NewBlock(b.Kind)
   201  		bThen.Pos = pos
   202  		bElse.Pos = pos
   203  		bEnd.Pos = b.Pos
   204  		b.Pos = pos
   205  
   206  		// set up control flow for end block
   207  		bEnd.CopyControls(b)
   208  		bEnd.Likely = b.Likely
   209  		for _, e := range b.Succs {
   210  			bEnd.Succs = append(bEnd.Succs, e)
   211  			e.b.Preds[e.i].b = bEnd
   212  		}
   213  
   214  		// set up control flow for write barrier test
   215  		// load word, test word, avoiding partial register write from load byte.
   216  		cfgtypes := &f.Config.Types
   217  		flag := b.NewValue2(pos, OpLoad, cfgtypes.UInt32, wbaddr, mem)
   218  		flag = b.NewValue2(pos, OpNeq32, cfgtypes.Bool, flag, const0)
   219  		b.Kind = BlockIf
   220  		b.SetControl(flag)
   221  		b.Likely = BranchUnlikely
   222  		b.Succs = b.Succs[:0]
   223  		b.AddEdgeTo(bThen)
   224  		b.AddEdgeTo(bElse)
   225  		// TODO: For OpStoreWB and the buffered write barrier,
   226  		// we could move the write out of the write barrier,
   227  		// which would lead to fewer branches. We could do
   228  		// something similar to OpZeroWB, since the runtime
   229  		// could provide just the barrier half and then we
   230  		// could unconditionally do an OpZero (which could
   231  		// also generate better zeroing code). OpMoveWB is
   232  		// trickier and would require changing how
   233  		// cgoCheckMemmove works.
   234  		bThen.AddEdgeTo(bEnd)
   235  		bElse.AddEdgeTo(bEnd)
   236  
   237  		// for each write barrier store, append write barrier version to bThen
   238  		// and simple store version to bElse
   239  		memThen := mem
   240  		memElse := mem
   241  
   242  		// If the source of a MoveWB is volatile (will be clobbered by a
   243  		// function call), we need to copy it to a temporary location, as
   244  		// marshaling the args of typedmemmove might clobber the value we're
   245  		// trying to move.
   246  		// Look for volatile source, copy it to temporary before we emit any
   247  		// call.
   248  		// It is unlikely to have more than one of them. Just do a linear
   249  		// search instead of using a map.
   250  		type volatileCopy struct {
   251  			src *Value // address of original volatile value
   252  			tmp *Value // address of temporary we've copied the volatile value into
   253  		}
   254  		var volatiles []volatileCopy
   255  	copyLoop:
   256  		for _, w := range stores {
   257  			if w.Op == OpMoveWB {
   258  				val := w.Args[1]
   259  				if isVolatile(val) {
   260  					for _, c := range volatiles {
   261  						if val == c.src {
   262  							continue copyLoop // already copied
   263  						}
   264  					}
   265  
   266  					t := val.Type.Elem()
   267  					tmp := f.fe.Auto(w.Pos, t)
   268  					memThen = bThen.NewValue1A(w.Pos, OpVarDef, types.TypeMem, tmp, memThen)
   269  					tmpaddr := bThen.NewValue2A(w.Pos, OpLocalAddr, t.PtrTo(), tmp, sp, memThen)
   270  					siz := t.Size()
   271  					memThen = bThen.NewValue3I(w.Pos, OpMove, types.TypeMem, siz, tmpaddr, val, memThen)
   272  					memThen.Aux = t
   273  					volatiles = append(volatiles, volatileCopy{val, tmpaddr})
   274  				}
   275  			}
   276  		}
   277  
   278  		for _, w := range stores {
   279  			ptr := w.Args[0]
   280  			pos := w.Pos
   281  
   282  			var fn *obj.LSym
   283  			var typ *obj.LSym
   284  			var val *Value
   285  			switch w.Op {
   286  			case OpStoreWB:
   287  				val = w.Args[1]
   288  				nWBops--
   289  			case OpMoveWB:
   290  				fn = typedmemmove
   291  				val = w.Args[1]
   292  				typ = reflectdata.TypeLinksym(w.Aux.(*types.Type))
   293  				nWBops--
   294  			case OpZeroWB:
   295  				fn = typedmemclr
   296  				typ = reflectdata.TypeLinksym(w.Aux.(*types.Type))
   297  				nWBops--
   298  			case OpVarDef, OpVarLive:
   299  			}
   300  
   301  			// then block: emit write barrier call
   302  			switch w.Op {
   303  			case OpStoreWB, OpMoveWB, OpZeroWB:
   304  				if w.Op == OpStoreWB {
   305  					memThen = bThen.NewValue3A(pos, OpWB, types.TypeMem, gcWriteBarrier, ptr, val, memThen)
   306  				} else {
   307  					srcval := val
   308  					if w.Op == OpMoveWB && isVolatile(srcval) {
   309  						for _, c := range volatiles {
   310  							if srcval == c.src {
   311  								srcval = c.tmp
   312  								break
   313  							}
   314  						}
   315  					}
   316  					memThen = wbcall(pos, bThen, fn, typ, ptr, srcval, memThen, sp, sb)
   317  				}
   318  				// Note that we set up a writebarrier function call.
   319  				f.fe.SetWBPos(pos)
   320  			case OpVarDef, OpVarLive:
   321  				memThen = bThen.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, memThen)
   322  			}
   323  
   324  			// else block: normal store
   325  			switch w.Op {
   326  			case OpStoreWB:
   327  				memElse = bElse.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, memElse)
   328  			case OpMoveWB:
   329  				memElse = bElse.NewValue3I(pos, OpMove, types.TypeMem, w.AuxInt, ptr, val, memElse)
   330  				memElse.Aux = w.Aux
   331  			case OpZeroWB:
   332  				memElse = bElse.NewValue2I(pos, OpZero, types.TypeMem, w.AuxInt, ptr, memElse)
   333  				memElse.Aux = w.Aux
   334  			case OpVarDef, OpVarLive:
   335  				memElse = bElse.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, memElse)
   336  			}
   337  		}
   338  
   339  		// merge memory
   340  		// Splice memory Phi into the last memory of the original sequence,
   341  		// which may be used in subsequent blocks. Other memories in the
   342  		// sequence must be dead after this block since there can be only
   343  		// one memory live.
   344  		bEnd.Values = append(bEnd.Values, last)
   345  		last.Block = bEnd
   346  		last.reset(OpPhi)
   347  		last.Pos = last.Pos.WithNotStmt()
   348  		last.Type = types.TypeMem
   349  		last.AddArg(memThen)
   350  		last.AddArg(memElse)
   351  		for _, w := range stores {
   352  			if w != last {
   353  				w.resetArgs()
   354  			}
   355  		}
   356  		for _, w := range stores {
   357  			if w != last {
   358  				f.freeValue(w)
   359  			}
   360  		}
   361  
   362  		// put values after the store sequence into the end block
   363  		bEnd.Values = append(bEnd.Values, after...)
   364  		for _, w := range after {
   365  			w.Block = bEnd
   366  		}
   367  
   368  		// Preemption is unsafe between loading the write
   369  		// barrier-enabled flag and performing the write
   370  		// because that would allow a GC phase transition,
   371  		// which would invalidate the flag. Remember the
   372  		// conditional block so liveness analysis can disable
   373  		// safe-points. This is somewhat subtle because we're
   374  		// splitting b bottom-up.
   375  		if firstSplit {
   376  			// Add b itself.
   377  			b.Func.WBLoads = append(b.Func.WBLoads, b)
   378  			firstSplit = false
   379  		} else {
   380  			// We've already split b, so we just pushed a
   381  			// write barrier test into bEnd.
   382  			b.Func.WBLoads = append(b.Func.WBLoads, bEnd)
   383  		}
   384  
   385  		// if we have more stores in this block, do this block again
   386  		if nWBops > 0 {
   387  			goto again
   388  		}
   389  	}
   390  }
   391  
   392  // computeZeroMap returns a map from an ID of a memory value to
   393  // a set of locations that are known to be zeroed at that memory value.
   394  func (f *Func) computeZeroMap(select1 []*Value) map[ID]ZeroRegion {
   395  
   396  	ptrSize := f.Config.PtrSize
   397  	// Keep track of which parts of memory are known to be zero.
   398  	// This helps with removing write barriers for various initialization patterns.
   399  	// This analysis is conservative. We only keep track, for each memory state, of
   400  	// which of the first 64 words of a single object are known to be zero.
   401  	zeroes := map[ID]ZeroRegion{}
   402  	// Find new objects.
   403  	for _, b := range f.Blocks {
   404  		for _, v := range b.Values {
   405  			if mem, ok := IsNewObject(v, select1); ok {
   406  				// While compiling package runtime itself, we might see user
   407  				// calls to newobject, which will have result type
   408  				// unsafe.Pointer instead. We can't easily infer how large the
   409  				// allocated memory is, so just skip it.
   410  				if types.LocalPkg.Path == "runtime" && v.Type.IsUnsafePtr() {
   411  					continue
   412  				}
   413  
   414  				nptr := v.Type.Elem().Size() / ptrSize
   415  				if nptr > 64 {
   416  					nptr = 64
   417  				}
   418  				zeroes[mem.ID] = ZeroRegion{base: v, mask: 1<<uint(nptr) - 1}
   419  			}
   420  		}
   421  	}
   422  	// Find stores to those new objects.
   423  	for {
   424  		changed := false
   425  		for _, b := range f.Blocks {
   426  			// Note: iterating forwards helps convergence, as values are
   427  			// typically (but not always!) in store order.
   428  			for _, v := range b.Values {
   429  				if v.Op != OpStore {
   430  					continue
   431  				}
   432  				z, ok := zeroes[v.MemoryArg().ID]
   433  				if !ok {
   434  					continue
   435  				}
   436  				ptr := v.Args[0]
   437  				var off int64
   438  				size := v.Aux.(*types.Type).Size()
   439  				for ptr.Op == OpOffPtr {
   440  					off += ptr.AuxInt
   441  					ptr = ptr.Args[0]
   442  				}
   443  				if ptr != z.base {
   444  					// Different base object - we don't know anything.
   445  					// We could even be writing to the base object we know
   446  					// about, but through an aliased but offset pointer.
   447  					// So we have to throw all the zero information we have away.
   448  					continue
   449  				}
   450  				// Round to cover any partially written pointer slots.
   451  				// Pointer writes should never be unaligned like this, but non-pointer
   452  				// writes to pointer-containing types will do this.
   453  				if d := off % ptrSize; d != 0 {
   454  					off -= d
   455  					size += d
   456  				}
   457  				if d := size % ptrSize; d != 0 {
   458  					size += ptrSize - d
   459  				}
   460  				// Clip to the 64 words that we track.
   461  				min := off
   462  				max := off + size
   463  				if min < 0 {
   464  					min = 0
   465  				}
   466  				if max > 64*ptrSize {
   467  					max = 64 * ptrSize
   468  				}
   469  				// Clear bits for parts that we are writing (and hence
   470  				// will no longer necessarily be zero).
   471  				for i := min; i < max; i += ptrSize {
   472  					bit := i / ptrSize
   473  					z.mask &^= 1 << uint(bit)
   474  				}
   475  				if z.mask == 0 {
   476  					// No more known zeros - don't bother keeping.
   477  					continue
   478  				}
   479  				// Save updated known zero contents for new store.
   480  				if zeroes[v.ID] != z {
   481  					zeroes[v.ID] = z
   482  					changed = true
   483  				}
   484  			}
   485  		}
   486  		if !changed {
   487  			break
   488  		}
   489  	}
   490  	if f.pass.debug > 0 {
   491  		fmt.Printf("func %s\n", f.Name)
   492  		for mem, z := range zeroes {
   493  			fmt.Printf("  memory=v%d ptr=%v zeromask=%b\n", mem, z.base, z.mask)
   494  		}
   495  	}
   496  	return zeroes
   497  }
   498  
   499  // wbcall emits write barrier runtime call in b, returns memory.
   500  func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Value) *Value {
   501  	config := b.Func.Config
   502  
   503  	var wbargs []*Value
   504  	// TODO (register args) this is a bit of a hack.
   505  	inRegs := b.Func.ABIDefault == b.Func.ABI1 && len(config.intParamRegs) >= 3
   506  
   507  	// put arguments on stack
   508  	off := config.ctxt.Arch.FixedFrameSize
   509  
   510  	var argTypes []*types.Type
   511  	if typ != nil { // for typedmemmove
   512  		taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
   513  		argTypes = append(argTypes, b.Func.Config.Types.Uintptr)
   514  		off = round(off, taddr.Type.Alignment())
   515  		if inRegs {
   516  			wbargs = append(wbargs, taddr)
   517  		} else {
   518  			arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp)
   519  			mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, taddr, mem)
   520  		}
   521  		off += taddr.Type.Size()
   522  	}
   523  
   524  	argTypes = append(argTypes, ptr.Type)
   525  	off = round(off, ptr.Type.Alignment())
   526  	if inRegs {
   527  		wbargs = append(wbargs, ptr)
   528  	} else {
   529  		arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp)
   530  		mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, ptr, mem)
   531  	}
   532  	off += ptr.Type.Size()
   533  
   534  	if val != nil {
   535  		argTypes = append(argTypes, val.Type)
   536  		off = round(off, val.Type.Alignment())
   537  		if inRegs {
   538  			wbargs = append(wbargs, val)
   539  		} else {
   540  			arg := b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp)
   541  			mem = b.NewValue3A(pos, OpStore, types.TypeMem, val.Type, arg, val, mem)
   542  		}
   543  		off += val.Type.Size()
   544  	}
   545  	off = round(off, config.PtrSize)
   546  	wbargs = append(wbargs, mem)
   547  
   548  	// issue call
   549  	call := b.NewValue0A(pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(fn, b.Func.ABIDefault.ABIAnalyzeTypes(nil, argTypes, nil)))
   550  	call.AddArgs(wbargs...)
   551  	call.AuxInt = off - config.ctxt.Arch.FixedFrameSize
   552  	return b.NewValue1I(pos, OpSelectN, types.TypeMem, 0, call)
   553  }
   554  
   555  // round to a multiple of r, r is a power of 2.
   556  func round(o int64, r int64) int64 {
   557  	return (o + r - 1) &^ (r - 1)
   558  }
   559  
   560  // IsStackAddr reports whether v is known to be an address of a stack slot.
   561  func IsStackAddr(v *Value) bool {
   562  	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
   563  		v = v.Args[0]
   564  	}
   565  	switch v.Op {
   566  	case OpSP, OpLocalAddr, OpSelectNAddr, OpGetCallerSP:
   567  		return true
   568  	}
   569  	return false
   570  }
   571  
   572  // IsGlobalAddr reports whether v is known to be an address of a global (or nil).
   573  func IsGlobalAddr(v *Value) bool {
   574  	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
   575  		v = v.Args[0]
   576  	}
   577  	if v.Op == OpAddr && v.Args[0].Op == OpSB {
   578  		return true // address of a global
   579  	}
   580  	if v.Op == OpConstNil {
   581  		return true
   582  	}
   583  	if v.Op == OpLoad && IsReadOnlyGlobalAddr(v.Args[0]) {
   584  		return true // loading from a read-only global - the resulting address can't be a heap address.
   585  	}
   586  	return false
   587  }
   588  
   589  // IsReadOnlyGlobalAddr reports whether v is known to be an address of a read-only global.
   590  func IsReadOnlyGlobalAddr(v *Value) bool {
   591  	if v.Op == OpConstNil {
   592  		// Nil pointers are read only. See issue 33438.
   593  		return true
   594  	}
   595  	if v.Op == OpAddr && v.Aux.(*obj.LSym).Type == objabi.SRODATA {
   596  		return true
   597  	}
   598  	return false
   599  }
   600  
   601  // IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object,
   602  // if so, also returns the memory state mem at which v is zero.
   603  func IsNewObject(v *Value, select1 []*Value) (mem *Value, ok bool) {
   604  	f := v.Block.Func
   605  	c := f.Config
   606  	if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 {
   607  		if v.Op != OpSelectN || v.AuxInt != 0 {
   608  			return nil, false
   609  		}
   610  		mem = select1[v.Args[0].ID]
   611  		if mem == nil {
   612  			return nil, false
   613  		}
   614  	} else {
   615  		if v.Op != OpLoad {
   616  			return nil, false
   617  		}
   618  		mem = v.MemoryArg()
   619  		if mem.Op != OpSelectN {
   620  			return nil, false
   621  		}
   622  		if mem.Type != types.TypeMem {
   623  			return nil, false
   624  		} // assume it is the right selection if true
   625  	}
   626  	call := mem.Args[0]
   627  	if call.Op != OpStaticCall {
   628  		return nil, false
   629  	}
   630  	if !isSameCall(call.Aux, "runtime.newobject") {
   631  		return nil, false
   632  	}
   633  	if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 {
   634  		if v.Args[0] == call {
   635  			return mem, true
   636  		}
   637  		return nil, false
   638  	}
   639  	if v.Args[0].Op != OpOffPtr {
   640  		return nil, false
   641  	}
   642  	if v.Args[0].Args[0].Op != OpSP {
   643  		return nil, false
   644  	}
   645  	if v.Args[0].AuxInt != c.ctxt.Arch.FixedFrameSize+c.RegSize { // offset of return value
   646  		return nil, false
   647  	}
   648  	return mem, true
   649  }
   650  
   651  // IsSanitizerSafeAddr reports whether v is known to be an address
   652  // that doesn't need instrumentation.
   653  func IsSanitizerSafeAddr(v *Value) bool {
   654  	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
   655  		v = v.Args[0]
   656  	}
   657  	switch v.Op {
   658  	case OpSP, OpLocalAddr, OpSelectNAddr:
   659  		// Stack addresses are always safe.
   660  		return true
   661  	case OpITab, OpStringPtr, OpGetClosurePtr:
   662  		// Itabs, string data, and closure fields are
   663  		// read-only once initialized.
   664  		return true
   665  	case OpAddr:
   666  		vt := v.Aux.(*obj.LSym).Type
   667  		return vt == objabi.SRODATA || vt == objabi.SLIBFUZZER_8BIT_COUNTER || vt == objabi.SCOVERAGE_COUNTER || vt == objabi.SCOVERAGE_AUXVAR
   668  	}
   669  	return false
   670  }
   671  
   672  // isVolatile reports whether v is a pointer to argument region on stack which
   673  // will be clobbered by a function call.
   674  func isVolatile(v *Value) bool {
   675  	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy || v.Op == OpSelectNAddr {
   676  		v = v.Args[0]
   677  	}
   678  	return v.Op == OpSP
   679  }