github.com/mattn/go@v0.0.0-20171011075504-07f7db3ea99f/src/cmd/compile/internal/ssa/writebarrier.go (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ssa
     6  
     7  import (
     8  	"cmd/compile/internal/types"
     9  	"cmd/internal/obj"
    10  	"cmd/internal/src"
    11  )
    12  
    13  // needwb returns whether we need write barrier for store op v.
    14  // v must be Store/Move/Zero.
    15  func needwb(v *Value) bool {
    16  	t, ok := v.Aux.(*types.Type)
    17  	if !ok {
    18  		v.Fatalf("store aux is not a type: %s", v.LongString())
    19  	}
    20  	if !t.HasHeapPointer() {
    21  		return false
    22  	}
    23  	if IsStackAddr(v.Args[0]) {
    24  		return false // write on stack doesn't need write barrier
    25  	}
    26  	return true
    27  }
    28  
    29  // writebarrier pass inserts write barriers for store ops (Store, Move, Zero)
    30  // when necessary (the condition above). It rewrites store ops to branches
    31  // and runtime calls, like
    32  //
    33  // if writeBarrier.enabled {
    34  //   writebarrierptr(ptr, val)
    35  // } else {
    36  //   *ptr = val
    37  // }
    38  //
    39  // A sequence of WB stores for many pointer fields of a single type will
    40  // be emitted together, with a single branch.
    41  func writebarrier(f *Func) {
    42  	if !f.fe.UseWriteBarrier() {
    43  		return
    44  	}
    45  
    46  	var sb, sp, wbaddr, const0 *Value
    47  	var writebarrierptr, typedmemmove, typedmemclr *obj.LSym
    48  	var stores, after []*Value
    49  	var sset *sparseSet
    50  	var storeNumber []int32
    51  
    52  	for _, b := range f.Blocks { // range loop is safe since the blocks we added contain no stores to expand
    53  		// first, identify all the stores that need to insert a write barrier.
    54  		// mark them with WB ops temporarily. record presence of WB ops.
    55  		nWBops := 0 // count of temporarily created WB ops remaining to be rewritten in the current block
    56  		for _, v := range b.Values {
    57  			switch v.Op {
    58  			case OpStore, OpMove, OpZero:
    59  				if needwb(v) {
    60  					switch v.Op {
    61  					case OpStore:
    62  						v.Op = OpStoreWB
    63  					case OpMove:
    64  						v.Op = OpMoveWB
    65  					case OpZero:
    66  						v.Op = OpZeroWB
    67  					}
    68  					nWBops++
    69  				}
    70  			}
    71  		}
    72  		if nWBops == 0 {
    73  			continue
    74  		}
    75  
    76  		if wbaddr == nil {
    77  			// lazily initialize global values for write barrier test and calls
    78  			// find SB and SP values in entry block
    79  			initpos := f.Entry.Pos
    80  			for _, v := range f.Entry.Values {
    81  				if v.Op == OpSB {
    82  					sb = v
    83  				}
    84  				if v.Op == OpSP {
    85  					sp = v
    86  				}
    87  				if sb != nil && sp != nil {
    88  					break
    89  				}
    90  			}
    91  			if sb == nil {
    92  				sb = f.Entry.NewValue0(initpos, OpSB, f.Config.Types.Uintptr)
    93  			}
    94  			if sp == nil {
    95  				sp = f.Entry.NewValue0(initpos, OpSP, f.Config.Types.Uintptr)
    96  			}
    97  			wbsym := f.fe.Syslook("writeBarrier")
    98  			wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32Ptr, wbsym, sb)
    99  			writebarrierptr = f.fe.Syslook("writebarrierptr")
   100  			typedmemmove = f.fe.Syslook("typedmemmove")
   101  			typedmemclr = f.fe.Syslook("typedmemclr")
   102  			const0 = f.ConstInt32(initpos, f.Config.Types.UInt32, 0)
   103  
   104  			// allocate auxiliary data structures for computing store order
   105  			sset = f.newSparseSet(f.NumValues())
   106  			defer f.retSparseSet(sset)
   107  			storeNumber = make([]int32, f.NumValues())
   108  		}
   109  
   110  		// order values in store order
   111  		b.Values = storeOrder(b.Values, sset, storeNumber)
   112  
   113  	again:
   114  		// find the start and end of the last contiguous WB store sequence.
   115  		// a branch will be inserted there. values after it will be moved
   116  		// to a new block.
   117  		var last *Value
   118  		var start, end int
   119  		values := b.Values
   120  	FindSeq:
   121  		for i := len(values) - 1; i >= 0; i-- {
   122  			w := values[i]
   123  			switch w.Op {
   124  			case OpStoreWB, OpMoveWB, OpZeroWB:
   125  				start = i
   126  				if last == nil {
   127  					last = w
   128  					end = i + 1
   129  				}
   130  			case OpVarDef, OpVarLive, OpVarKill:
   131  				continue
   132  			default:
   133  				if last == nil {
   134  					continue
   135  				}
   136  				break FindSeq
   137  			}
   138  		}
   139  		stores = append(stores[:0], b.Values[start:end]...) // copy to avoid aliasing
   140  		after = append(after[:0], b.Values[end:]...)
   141  		b.Values = b.Values[:start]
   142  
   143  		// find the memory before the WB stores
   144  		mem := stores[0].MemoryArg()
   145  		pos := stores[0].Pos
   146  		bThen := f.NewBlock(BlockPlain)
   147  		bElse := f.NewBlock(BlockPlain)
   148  		bEnd := f.NewBlock(b.Kind)
   149  		bThen.Pos = pos
   150  		bElse.Pos = pos
   151  		bEnd.Pos = b.Pos
   152  		b.Pos = pos
   153  
   154  		// set up control flow for end block
   155  		bEnd.SetControl(b.Control)
   156  		bEnd.Likely = b.Likely
   157  		for _, e := range b.Succs {
   158  			bEnd.Succs = append(bEnd.Succs, e)
   159  			e.b.Preds[e.i].b = bEnd
   160  		}
   161  
   162  		// set up control flow for write barrier test
   163  		// load word, test word, avoiding partial register write from load byte.
   164  		cfgtypes := &f.Config.Types
   165  		flag := b.NewValue2(pos, OpLoad, cfgtypes.UInt32, wbaddr, mem)
   166  		flag = b.NewValue2(pos, OpNeq32, cfgtypes.Bool, flag, const0)
   167  		b.Kind = BlockIf
   168  		b.SetControl(flag)
   169  		b.Likely = BranchUnlikely
   170  		b.Succs = b.Succs[:0]
   171  		b.AddEdgeTo(bThen)
   172  		b.AddEdgeTo(bElse)
   173  		bThen.AddEdgeTo(bEnd)
   174  		bElse.AddEdgeTo(bEnd)
   175  
   176  		// for each write barrier store, append write barrier version to bThen
   177  		// and simple store version to bElse
   178  		memThen := mem
   179  		memElse := mem
   180  		for _, w := range stores {
   181  			ptr := w.Args[0]
   182  			pos := w.Pos
   183  
   184  			var fn *obj.LSym
   185  			var typ *obj.LSym
   186  			var val *Value
   187  			switch w.Op {
   188  			case OpStoreWB:
   189  				fn = writebarrierptr
   190  				val = w.Args[1]
   191  				nWBops--
   192  			case OpMoveWB:
   193  				fn = typedmemmove
   194  				val = w.Args[1]
   195  				typ = w.Aux.(*types.Type).Symbol()
   196  				nWBops--
   197  			case OpZeroWB:
   198  				fn = typedmemclr
   199  				typ = w.Aux.(*types.Type).Symbol()
   200  				nWBops--
   201  			case OpVarDef, OpVarLive, OpVarKill:
   202  			}
   203  
   204  			// then block: emit write barrier call
   205  			switch w.Op {
   206  			case OpStoreWB, OpMoveWB, OpZeroWB:
   207  				volatile := w.Op == OpMoveWB && isVolatile(val)
   208  				memThen = wbcall(pos, bThen, fn, typ, ptr, val, memThen, sp, sb, volatile)
   209  			case OpVarDef, OpVarLive, OpVarKill:
   210  				memThen = bThen.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, memThen)
   211  			}
   212  
   213  			// else block: normal store
   214  			switch w.Op {
   215  			case OpStoreWB:
   216  				memElse = bElse.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, memElse)
   217  			case OpMoveWB:
   218  				memElse = bElse.NewValue3I(pos, OpMove, types.TypeMem, w.AuxInt, ptr, val, memElse)
   219  				memElse.Aux = w.Aux
   220  			case OpZeroWB:
   221  				memElse = bElse.NewValue2I(pos, OpZero, types.TypeMem, w.AuxInt, ptr, memElse)
   222  				memElse.Aux = w.Aux
   223  			case OpVarDef, OpVarLive, OpVarKill:
   224  				memElse = bElse.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, memElse)
   225  			}
   226  
   227  			if fn != nil {
   228  				// Note that we set up a writebarrier function call.
   229  				if !f.WBPos.IsKnown() {
   230  					f.WBPos = pos
   231  				}
   232  				if f.fe.Debug_wb() {
   233  					f.Warnl(pos, "write barrier")
   234  				}
   235  			}
   236  		}
   237  
   238  		// merge memory
   239  		// Splice memory Phi into the last memory of the original sequence,
   240  		// which may be used in subsequent blocks. Other memories in the
   241  		// sequence must be dead after this block since there can be only
   242  		// one memory live.
   243  		bEnd.Values = append(bEnd.Values, last)
   244  		last.Block = bEnd
   245  		last.reset(OpPhi)
   246  		last.Type = types.TypeMem
   247  		last.AddArg(memThen)
   248  		last.AddArg(memElse)
   249  		for _, w := range stores {
   250  			if w != last {
   251  				w.resetArgs()
   252  			}
   253  		}
   254  		for _, w := range stores {
   255  			if w != last {
   256  				f.freeValue(w)
   257  			}
   258  		}
   259  
   260  		// put values after the store sequence into the end block
   261  		bEnd.Values = append(bEnd.Values, after...)
   262  		for _, w := range after {
   263  			w.Block = bEnd
   264  		}
   265  
   266  		// if we have more stores in this block, do this block again
   267  		if nWBops > 0 {
   268  			goto again
   269  		}
   270  	}
   271  }
   272  
   273  // wbcall emits write barrier runtime call in b, returns memory.
   274  // if valIsVolatile, it moves val into temp space before making the call.
   275  func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Value, valIsVolatile bool) *Value {
   276  	config := b.Func.Config
   277  
   278  	var tmp GCNode
   279  	if valIsVolatile {
   280  		// Copy to temp location if the source is volatile (will be clobbered by
   281  		// a function call). Marshaling the args to typedmemmove might clobber the
   282  		// value we're trying to move.
   283  		t := val.Type.ElemType()
   284  		tmp = b.Func.fe.Auto(val.Pos, t)
   285  		mem = b.NewValue1A(pos, OpVarDef, types.TypeMem, tmp, mem)
   286  		tmpaddr := b.NewValue1A(pos, OpAddr, t.PtrTo(), tmp, sp)
   287  		siz := t.Size()
   288  		mem = b.NewValue3I(pos, OpMove, types.TypeMem, siz, tmpaddr, val, mem)
   289  		mem.Aux = t
   290  		val = tmpaddr
   291  	}
   292  
   293  	// put arguments on stack
   294  	off := config.ctxt.FixedFrameSize()
   295  
   296  	if typ != nil { // for typedmemmove
   297  		taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
   298  		off = round(off, taddr.Type.Alignment())
   299  		arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp)
   300  		mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, taddr, mem)
   301  		off += taddr.Type.Size()
   302  	}
   303  
   304  	off = round(off, ptr.Type.Alignment())
   305  	arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp)
   306  	mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, ptr, mem)
   307  	off += ptr.Type.Size()
   308  
   309  	if val != nil {
   310  		off = round(off, val.Type.Alignment())
   311  		arg = b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp)
   312  		mem = b.NewValue3A(pos, OpStore, types.TypeMem, val.Type, arg, val, mem)
   313  		off += val.Type.Size()
   314  	}
   315  	off = round(off, config.PtrSize)
   316  
   317  	// issue call
   318  	mem = b.NewValue1A(pos, OpStaticCall, types.TypeMem, fn, mem)
   319  	mem.AuxInt = off - config.ctxt.FixedFrameSize()
   320  
   321  	if valIsVolatile {
   322  		mem = b.NewValue1A(pos, OpVarKill, types.TypeMem, tmp, mem) // mark temp dead
   323  	}
   324  
   325  	return mem
   326  }
   327  
   328  // round to a multiple of r, r is a power of 2
   329  func round(o int64, r int64) int64 {
   330  	return (o + r - 1) &^ (r - 1)
   331  }
   332  
   333  // IsStackAddr returns whether v is known to be an address of a stack slot
   334  func IsStackAddr(v *Value) bool {
   335  	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
   336  		v = v.Args[0]
   337  	}
   338  	switch v.Op {
   339  	case OpSP:
   340  		return true
   341  	case OpAddr:
   342  		return v.Args[0].Op == OpSP
   343  	}
   344  	return false
   345  }
   346  
   347  // isVolatile returns whether v is a pointer to argument region on stack which
   348  // will be clobbered by a function call.
   349  func isVolatile(v *Value) bool {
   350  	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
   351  		v = v.Args[0]
   352  	}
   353  	return v.Op == OpSP
   354  }