github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/cmd/compile/internal/ssa/writebarrier.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package ssa 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/obj" 10 "cmd/internal/src" 11 "strings" 12 ) 13 14 // needwb reports whether we need write barrier for store op v. 15 // v must be Store/Move/Zero. 16 func needwb(v *Value) bool { 17 t, ok := v.Aux.(*types.Type) 18 if !ok { 19 v.Fatalf("store aux is not a type: %s", v.LongString()) 20 } 21 if !t.HasHeapPointer() { 22 return false 23 } 24 if IsStackAddr(v.Args[0]) { 25 return false // write on stack doesn't need write barrier 26 } 27 if v.Op == OpStore && IsGlobalAddr(v.Args[1]) && IsNewObject(v.Args[0], v.MemoryArg()) { 28 // Storing pointers to non-heap locations into a fresh object doesn't need a write barrier. 29 return false 30 } 31 if v.Op == OpMove && IsReadOnlyGlobalAddr(v.Args[1]) && IsNewObject(v.Args[0], v.MemoryArg()) { 32 // Copying data from readonly memory into a fresh object doesn't need a write barrier. 33 return false 34 } 35 return true 36 } 37 38 // writebarrier pass inserts write barriers for store ops (Store, Move, Zero) 39 // when necessary (the condition above). It rewrites store ops to branches 40 // and runtime calls, like 41 // 42 // if writeBarrier.enabled { 43 // gcWriteBarrier(ptr, val) // Not a regular Go call 44 // } else { 45 // *ptr = val 46 // } 47 // 48 // A sequence of WB stores for many pointer fields of a single type will 49 // be emitted together, with a single branch. 50 func writebarrier(f *Func) { 51 if !f.fe.UseWriteBarrier() { 52 return 53 } 54 55 var sb, sp, wbaddr, const0 *Value 56 var typedmemmove, typedmemclr, gcWriteBarrier *obj.LSym 57 var stores, after []*Value 58 var sset *sparseSet 59 var storeNumber []int32 60 61 for _, b := range f.Blocks { // range loop is safe since the blocks we added contain no stores to expand 62 // first, identify all the stores that need to insert a write barrier. 63 // mark them with WB ops temporarily. record presence of WB ops. 64 nWBops := 0 // count of temporarily created WB ops remaining to be rewritten in the current block 65 for _, v := range b.Values { 66 switch v.Op { 67 case OpStore, OpMove, OpZero: 68 if needwb(v) { 69 switch v.Op { 70 case OpStore: 71 v.Op = OpStoreWB 72 case OpMove: 73 v.Op = OpMoveWB 74 case OpZero: 75 v.Op = OpZeroWB 76 } 77 nWBops++ 78 } 79 } 80 } 81 if nWBops == 0 { 82 continue 83 } 84 85 if wbaddr == nil { 86 // lazily initialize global values for write barrier test and calls 87 // find SB and SP values in entry block 88 initpos := f.Entry.Pos 89 for _, v := range f.Entry.Values { 90 if v.Op == OpSB { 91 sb = v 92 } 93 if v.Op == OpSP { 94 sp = v 95 } 96 if sb != nil && sp != nil { 97 break 98 } 99 } 100 if sb == nil { 101 sb = f.Entry.NewValue0(initpos, OpSB, f.Config.Types.Uintptr) 102 } 103 if sp == nil { 104 sp = f.Entry.NewValue0(initpos, OpSP, f.Config.Types.Uintptr) 105 } 106 wbsym := f.fe.Syslook("writeBarrier") 107 wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32Ptr, wbsym, sb) 108 gcWriteBarrier = f.fe.Syslook("gcWriteBarrier") 109 typedmemmove = f.fe.Syslook("typedmemmove") 110 typedmemclr = f.fe.Syslook("typedmemclr") 111 const0 = f.ConstInt32(f.Config.Types.UInt32, 0) 112 113 // allocate auxiliary data structures for computing store order 114 sset = f.newSparseSet(f.NumValues()) 115 defer f.retSparseSet(sset) 116 storeNumber = make([]int32, f.NumValues()) 117 } 118 119 // order values in store order 120 b.Values = storeOrder(b.Values, sset, storeNumber) 121 122 firstSplit := true 123 again: 124 // find the start and end of the last contiguous WB store sequence. 125 // a branch will be inserted there. values after it will be moved 126 // to a new block. 127 var last *Value 128 var start, end int 129 values := b.Values 130 FindSeq: 131 for i := len(values) - 1; i >= 0; i-- { 132 w := values[i] 133 switch w.Op { 134 case OpStoreWB, OpMoveWB, OpZeroWB: 135 start = i 136 if last == nil { 137 last = w 138 end = i + 1 139 } 140 case OpVarDef, OpVarLive, OpVarKill: 141 continue 142 default: 143 if last == nil { 144 continue 145 } 146 break FindSeq 147 } 148 } 149 stores = append(stores[:0], b.Values[start:end]...) // copy to avoid aliasing 150 after = append(after[:0], b.Values[end:]...) 151 b.Values = b.Values[:start] 152 153 // find the memory before the WB stores 154 mem := stores[0].MemoryArg() 155 pos := stores[0].Pos 156 bThen := f.NewBlock(BlockPlain) 157 bElse := f.NewBlock(BlockPlain) 158 bEnd := f.NewBlock(b.Kind) 159 bThen.Pos = pos 160 bElse.Pos = pos 161 bEnd.Pos = b.Pos 162 b.Pos = pos 163 164 // set up control flow for end block 165 bEnd.SetControl(b.Control) 166 bEnd.Likely = b.Likely 167 for _, e := range b.Succs { 168 bEnd.Succs = append(bEnd.Succs, e) 169 e.b.Preds[e.i].b = bEnd 170 } 171 172 // set up control flow for write barrier test 173 // load word, test word, avoiding partial register write from load byte. 174 cfgtypes := &f.Config.Types 175 flag := b.NewValue2(pos, OpLoad, cfgtypes.UInt32, wbaddr, mem) 176 flag = b.NewValue2(pos, OpNeq32, cfgtypes.Bool, flag, const0) 177 b.Kind = BlockIf 178 b.SetControl(flag) 179 b.Likely = BranchUnlikely 180 b.Succs = b.Succs[:0] 181 b.AddEdgeTo(bThen) 182 b.AddEdgeTo(bElse) 183 // TODO: For OpStoreWB and the buffered write barrier, 184 // we could move the write out of the write barrier, 185 // which would lead to fewer branches. We could do 186 // something similar to OpZeroWB, since the runtime 187 // could provide just the barrier half and then we 188 // could unconditionally do an OpZero (which could 189 // also generate better zeroing code). OpMoveWB is 190 // trickier and would require changing how 191 // cgoCheckMemmove works. 192 bThen.AddEdgeTo(bEnd) 193 bElse.AddEdgeTo(bEnd) 194 195 // for each write barrier store, append write barrier version to bThen 196 // and simple store version to bElse 197 memThen := mem 198 memElse := mem 199 for _, w := range stores { 200 ptr := w.Args[0] 201 pos := w.Pos 202 203 var fn *obj.LSym 204 var typ *obj.LSym 205 var val *Value 206 switch w.Op { 207 case OpStoreWB: 208 val = w.Args[1] 209 nWBops-- 210 case OpMoveWB: 211 fn = typedmemmove 212 val = w.Args[1] 213 typ = w.Aux.(*types.Type).Symbol() 214 nWBops-- 215 case OpZeroWB: 216 fn = typedmemclr 217 typ = w.Aux.(*types.Type).Symbol() 218 nWBops-- 219 case OpVarDef, OpVarLive, OpVarKill: 220 } 221 222 // then block: emit write barrier call 223 switch w.Op { 224 case OpStoreWB, OpMoveWB, OpZeroWB: 225 volatile := w.Op == OpMoveWB && isVolatile(val) 226 if w.Op == OpStoreWB { 227 memThen = bThen.NewValue3A(pos, OpWB, types.TypeMem, gcWriteBarrier, ptr, val, memThen) 228 } else { 229 memThen = wbcall(pos, bThen, fn, typ, ptr, val, memThen, sp, sb, volatile) 230 } 231 // Note that we set up a writebarrier function call. 232 f.fe.SetWBPos(pos) 233 case OpVarDef, OpVarLive, OpVarKill: 234 memThen = bThen.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, memThen) 235 } 236 237 // else block: normal store 238 switch w.Op { 239 case OpStoreWB: 240 memElse = bElse.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, memElse) 241 case OpMoveWB: 242 memElse = bElse.NewValue3I(pos, OpMove, types.TypeMem, w.AuxInt, ptr, val, memElse) 243 memElse.Aux = w.Aux 244 case OpZeroWB: 245 memElse = bElse.NewValue2I(pos, OpZero, types.TypeMem, w.AuxInt, ptr, memElse) 246 memElse.Aux = w.Aux 247 case OpVarDef, OpVarLive, OpVarKill: 248 memElse = bElse.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, memElse) 249 } 250 } 251 252 // merge memory 253 // Splice memory Phi into the last memory of the original sequence, 254 // which may be used in subsequent blocks. Other memories in the 255 // sequence must be dead after this block since there can be only 256 // one memory live. 257 bEnd.Values = append(bEnd.Values, last) 258 last.Block = bEnd 259 last.reset(OpPhi) 260 last.Type = types.TypeMem 261 last.AddArg(memThen) 262 last.AddArg(memElse) 263 for _, w := range stores { 264 if w != last { 265 w.resetArgs() 266 } 267 } 268 for _, w := range stores { 269 if w != last { 270 f.freeValue(w) 271 } 272 } 273 274 // put values after the store sequence into the end block 275 bEnd.Values = append(bEnd.Values, after...) 276 for _, w := range after { 277 w.Block = bEnd 278 } 279 280 // Preemption is unsafe between loading the write 281 // barrier-enabled flag and performing the write 282 // because that would allow a GC phase transition, 283 // which would invalidate the flag. Remember the 284 // conditional block so liveness analysis can disable 285 // safe-points. This is somewhat subtle because we're 286 // splitting b bottom-up. 287 if firstSplit { 288 // Add b itself. 289 b.Func.WBLoads = append(b.Func.WBLoads, b) 290 firstSplit = false 291 } else { 292 // We've already split b, so we just pushed a 293 // write barrier test into bEnd. 294 b.Func.WBLoads = append(b.Func.WBLoads, bEnd) 295 } 296 297 // if we have more stores in this block, do this block again 298 if nWBops > 0 { 299 goto again 300 } 301 } 302 } 303 304 // wbcall emits write barrier runtime call in b, returns memory. 305 // if valIsVolatile, it moves val into temp space before making the call. 306 func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Value, valIsVolatile bool) *Value { 307 config := b.Func.Config 308 309 var tmp GCNode 310 if valIsVolatile { 311 // Copy to temp location if the source is volatile (will be clobbered by 312 // a function call). Marshaling the args to typedmemmove might clobber the 313 // value we're trying to move. 314 t := val.Type.Elem() 315 tmp = b.Func.fe.Auto(val.Pos, t) 316 mem = b.NewValue1A(pos, OpVarDef, types.TypeMem, tmp, mem) 317 tmpaddr := b.NewValue2A(pos, OpLocalAddr, t.PtrTo(), tmp, sp, mem) 318 siz := t.Size() 319 mem = b.NewValue3I(pos, OpMove, types.TypeMem, siz, tmpaddr, val, mem) 320 mem.Aux = t 321 val = tmpaddr 322 } 323 324 // put arguments on stack 325 off := config.ctxt.FixedFrameSize() 326 327 if typ != nil { // for typedmemmove 328 taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb) 329 off = round(off, taddr.Type.Alignment()) 330 arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp) 331 mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, taddr, mem) 332 off += taddr.Type.Size() 333 } 334 335 off = round(off, ptr.Type.Alignment()) 336 arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp) 337 mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, ptr, mem) 338 off += ptr.Type.Size() 339 340 if val != nil { 341 off = round(off, val.Type.Alignment()) 342 arg = b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp) 343 mem = b.NewValue3A(pos, OpStore, types.TypeMem, val.Type, arg, val, mem) 344 off += val.Type.Size() 345 } 346 off = round(off, config.PtrSize) 347 348 // issue call 349 mem = b.NewValue1A(pos, OpStaticCall, types.TypeMem, fn, mem) 350 mem.AuxInt = off - config.ctxt.FixedFrameSize() 351 352 if valIsVolatile { 353 mem = b.NewValue1A(pos, OpVarKill, types.TypeMem, tmp, mem) // mark temp dead 354 } 355 356 return mem 357 } 358 359 // round to a multiple of r, r is a power of 2 360 func round(o int64, r int64) int64 { 361 return (o + r - 1) &^ (r - 1) 362 } 363 364 // IsStackAddr reports whether v is known to be an address of a stack slot. 365 func IsStackAddr(v *Value) bool { 366 for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy { 367 v = v.Args[0] 368 } 369 switch v.Op { 370 case OpSP, OpLocalAddr: 371 return true 372 } 373 return false 374 } 375 376 // IsGlobalAddr reports whether v is known to be an address of a global. 377 func IsGlobalAddr(v *Value) bool { 378 return v.Op == OpAddr && v.Args[0].Op == OpSB 379 } 380 381 // IsReadOnlyGlobalAddr reports whether v is known to be an address of a read-only global. 382 func IsReadOnlyGlobalAddr(v *Value) bool { 383 if !IsGlobalAddr(v) { 384 return false 385 } 386 // See TODO in OpAddr case in IsSanitizerSafeAddr below. 387 return strings.HasPrefix(v.Aux.(*obj.LSym).Name, `"".statictmp_`) 388 } 389 390 // IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object at memory state mem. 391 // TODO: Be more precise. We really want "IsNilPointer" for the particular field in question. 392 // Right now, we can only detect a new object before any writes have been done to it. 393 // We could ignore non-pointer writes, writes to offsets which 394 // are known not to overlap the write in question, etc. 395 func IsNewObject(v *Value, mem *Value) bool { 396 if v.Op != OpLoad { 397 return false 398 } 399 if v.MemoryArg() != mem { 400 return false 401 } 402 if mem.Op != OpStaticCall { 403 return false 404 } 405 if !isSameSym(mem.Aux, "runtime.newobject") { 406 return false 407 } 408 if v.Args[0].Op != OpOffPtr { 409 return false 410 } 411 if v.Args[0].Args[0].Op != OpSP { 412 return false 413 } 414 c := v.Block.Func.Config 415 if v.Args[0].AuxInt != c.ctxt.FixedFrameSize()+c.RegSize { // offset of return value 416 return false 417 } 418 return true 419 } 420 421 // IsSanitizerSafeAddr reports whether v is known to be an address 422 // that doesn't need instrumentation. 423 func IsSanitizerSafeAddr(v *Value) bool { 424 for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy { 425 v = v.Args[0] 426 } 427 switch v.Op { 428 case OpSP, OpLocalAddr: 429 // Stack addresses are always safe. 430 return true 431 case OpITab, OpStringPtr, OpGetClosurePtr: 432 // Itabs, string data, and closure fields are 433 // read-only once initialized. 434 return true 435 case OpAddr: 436 sym := v.Aux.(*obj.LSym) 437 // TODO(mdempsky): Find a cleaner way to 438 // detect this. It would be nice if we could 439 // test sym.Type==objabi.SRODATA, but we don't 440 // initialize sym.Type until after function 441 // compilation. 442 if strings.HasPrefix(sym.Name, `"".statictmp_`) { 443 return true 444 } 445 } 446 return false 447 } 448 449 // isVolatile reports whether v is a pointer to argument region on stack which 450 // will be clobbered by a function call. 451 func isVolatile(v *Value) bool { 452 for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy { 453 v = v.Args[0] 454 } 455 return v.Op == OpSP 456 }