github.com/yukk001/go1.10.8@v0.0.0-20190813125351-6df2d3982e20/src/cmd/compile/internal/gc/esc.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "fmt" 10 "strconv" 11 "strings" 12 ) 13 14 // Run analysis on minimal sets of mutually recursive functions 15 // or single non-recursive functions, bottom up. 16 // 17 // Finding these sets is finding strongly connected components 18 // by reverse topological order in the static call graph. 19 // The algorithm (known as Tarjan's algorithm) for doing that is taken from 20 // Sedgewick, Algorithms, Second Edition, p. 482, with two adaptations. 21 // 22 // First, a hidden closure function (n.Func.IsHiddenClosure()) cannot be the 23 // root of a connected component. Refusing to use it as a root 24 // forces it into the component of the function in which it appears. 25 // This is more convenient for escape analysis. 26 // 27 // Second, each function becomes two virtual nodes in the graph, 28 // with numbers n and n+1. We record the function's node number as n 29 // but search from node n+1. If the search tells us that the component 30 // number (min) is n+1, we know that this is a trivial component: one function 31 // plus its closures. If the search tells us that the component number is 32 // n, then there was a path from node n+1 back to node n, meaning that 33 // the function set is mutually recursive. The escape analysis can be 34 // more precise when analyzing a single non-recursive function than 35 // when analyzing a set of mutually recursive functions. 36 37 type bottomUpVisitor struct { 38 analyze func([]*Node, bool) 39 visitgen uint32 40 nodeID map[*Node]uint32 41 stack []*Node 42 } 43 44 // visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list. 45 // It calls analyze with successive groups of functions, working from 46 // the bottom of the call graph upward. Each time analyze is called with 47 // a list of functions, every function on that list only calls other functions 48 // on the list or functions that have been passed in previous invocations of 49 // analyze. Closures appear in the same list as their outer functions. 50 // The lists are as short as possible while preserving those requirements. 51 // (In a typical program, many invocations of analyze will be passed just 52 // a single function.) The boolean argument 'recursive' passed to analyze 53 // specifies whether the functions on the list are mutually recursive. 54 // If recursive is false, the list consists of only a single function and its closures. 55 // If recursive is true, the list may still contain only a single function, 56 // if that function is itself recursive. 57 func visitBottomUp(list []*Node, analyze func(list []*Node, recursive bool)) { 58 var v bottomUpVisitor 59 v.analyze = analyze 60 v.nodeID = make(map[*Node]uint32) 61 for _, n := range list { 62 if n.Op == ODCLFUNC && !n.Func.IsHiddenClosure() { 63 v.visit(n) 64 } 65 } 66 } 67 68 func (v *bottomUpVisitor) visit(n *Node) uint32 { 69 if id := v.nodeID[n]; id > 0 { 70 // already visited 71 return id 72 } 73 74 v.visitgen++ 75 id := v.visitgen 76 v.nodeID[n] = id 77 v.visitgen++ 78 min := v.visitgen 79 80 v.stack = append(v.stack, n) 81 min = v.visitcodelist(n.Nbody, min) 82 if (min == id || min == id+1) && !n.Func.IsHiddenClosure() { 83 // This node is the root of a strongly connected component. 84 85 // The original min passed to visitcodelist was v.nodeID[n]+1. 86 // If visitcodelist found its way back to v.nodeID[n], then this 87 // block is a set of mutually recursive functions. 88 // Otherwise it's just a lone function that does not recurse. 89 recursive := min == id 90 91 // Remove connected component from stack. 92 // Mark walkgen so that future visits return a large number 93 // so as not to affect the caller's min. 94 95 var i int 96 for i = len(v.stack) - 1; i >= 0; i-- { 97 x := v.stack[i] 98 if x == n { 99 break 100 } 101 v.nodeID[x] = ^uint32(0) 102 } 103 v.nodeID[n] = ^uint32(0) 104 block := v.stack[i:] 105 // Run escape analysis on this set of functions. 106 v.stack = v.stack[:i] 107 v.analyze(block, recursive) 108 } 109 110 return min 111 } 112 113 func (v *bottomUpVisitor) visitcodelist(l Nodes, min uint32) uint32 { 114 for _, n := range l.Slice() { 115 min = v.visitcode(n, min) 116 } 117 return min 118 } 119 120 func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 { 121 if n == nil { 122 return min 123 } 124 125 min = v.visitcodelist(n.Ninit, min) 126 min = v.visitcode(n.Left, min) 127 min = v.visitcode(n.Right, min) 128 min = v.visitcodelist(n.List, min) 129 min = v.visitcodelist(n.Nbody, min) 130 min = v.visitcodelist(n.Rlist, min) 131 132 switch n.Op { 133 case OCALLFUNC, OCALLMETH: 134 fn := asNode(n.Left.Type.Nname()) 135 if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil { 136 m := v.visit(fn.Name.Defn) 137 if m < min { 138 min = m 139 } 140 } 141 142 case OCLOSURE: 143 m := v.visit(n.Func.Closure) 144 if m < min { 145 min = m 146 } 147 } 148 149 return min 150 } 151 152 // Escape analysis. 153 154 // An escape analysis pass for a set of functions. 155 // The analysis assumes that closures and the functions in which they 156 // appear are analyzed together, so that the aliasing between their 157 // variables can be modeled more precisely. 158 // 159 // First escfunc, esc and escassign recurse over the ast of each 160 // function to dig out flow(dst,src) edges between any 161 // pointer-containing nodes and store them in e.nodeEscState(dst).Flowsrc. For 162 // variables assigned to a variable in an outer scope or used as a 163 // return value, they store a flow(theSink, src) edge to a fake node 164 // 'the Sink'. For variables referenced in closures, an edge 165 // flow(closure, &var) is recorded and the flow of a closure itself to 166 // an outer scope is tracked the same way as other variables. 167 // 168 // Then escflood walks the graph starting at theSink and tags all 169 // variables of it can reach an & node as escaping and all function 170 // parameters it can reach as leaking. 171 // 172 // If a value's address is taken but the address does not escape, 173 // then the value can stay on the stack. If the value new(T) does 174 // not escape, then new(T) can be rewritten into a stack allocation. 175 // The same is true of slice literals. 176 177 func escapes(all []*Node) { 178 visitBottomUp(all, escAnalyze) 179 } 180 181 const ( 182 EscFuncUnknown = 0 + iota 183 EscFuncPlanned 184 EscFuncStarted 185 EscFuncTagged 186 ) 187 188 // There appear to be some loops in the escape graph, causing 189 // arbitrary recursion into deeper and deeper levels. 190 // Cut this off safely by making minLevel sticky: once you 191 // get that deep, you cannot go down any further but you also 192 // cannot go up any further. This is a conservative fix. 193 // Making minLevel smaller (more negative) would handle more 194 // complex chains of indirections followed by address-of operations, 195 // at the cost of repeating the traversal once for each additional 196 // allowed level when a loop is encountered. Using -2 suffices to 197 // pass all the tests we have written so far, which we assume matches 198 // the level of complexity we want the escape analysis code to handle. 199 const MinLevel = -2 200 201 // A Level encodes the reference state and context applied to 202 // (stack, heap) allocated memory. 203 // 204 // value is the overall sum of *(1) and &(-1) operations encountered 205 // along a path from a destination (sink, return value) to a source 206 // (allocation, parameter). 207 // 208 // suffixValue is the maximum-copy-started-suffix-level applied to a sink. 209 // For example: 210 // sink = x.left.left --> level=2, x is dereferenced twice and does not escape to sink. 211 // sink = &Node{x} --> level=-1, x is accessible from sink via one "address of" 212 // sink = &Node{&Node{x}} --> level=-2, x is accessible from sink via two "address of" 213 // sink = &Node{&Node{x.left}} --> level=-1, but x is NOT accessible from sink because it was indirected and then copied. 214 // (The copy operations are sometimes implicit in the source code; in this case, 215 // value of x.left was copied into a field of a newly allocated Node) 216 // 217 // There's one of these for each Node, and the integer values 218 // rarely exceed even what can be stored in 4 bits, never mind 8. 219 type Level struct { 220 value, suffixValue int8 221 } 222 223 func (l Level) int() int { 224 return int(l.value) 225 } 226 227 func levelFrom(i int) Level { 228 if i <= MinLevel { 229 return Level{value: MinLevel} 230 } 231 return Level{value: int8(i)} 232 } 233 234 func satInc8(x int8) int8 { 235 if x == 127 { 236 return 127 237 } 238 return x + 1 239 } 240 241 func min8(a, b int8) int8 { 242 if a < b { 243 return a 244 } 245 return b 246 } 247 248 func max8(a, b int8) int8 { 249 if a > b { 250 return a 251 } 252 return b 253 } 254 255 // inc returns the level l + 1, representing the effect of an indirect (*) operation. 256 func (l Level) inc() Level { 257 if l.value <= MinLevel { 258 return Level{value: MinLevel} 259 } 260 return Level{value: satInc8(l.value), suffixValue: satInc8(l.suffixValue)} 261 } 262 263 // dec returns the level l - 1, representing the effect of an address-of (&) operation. 264 func (l Level) dec() Level { 265 if l.value <= MinLevel { 266 return Level{value: MinLevel} 267 } 268 return Level{value: l.value - 1, suffixValue: l.suffixValue - 1} 269 } 270 271 // copy returns the level for a copy of a value with level l. 272 func (l Level) copy() Level { 273 return Level{value: l.value, suffixValue: max8(l.suffixValue, 0)} 274 } 275 276 func (l1 Level) min(l2 Level) Level { 277 return Level{ 278 value: min8(l1.value, l2.value), 279 suffixValue: min8(l1.suffixValue, l2.suffixValue)} 280 } 281 282 // guaranteedDereference returns the number of dereferences 283 // applied to a pointer before addresses are taken/generated. 284 // This is the maximum level computed from path suffixes starting 285 // with copies where paths flow from destination to source. 286 func (l Level) guaranteedDereference() int { 287 return int(l.suffixValue) 288 } 289 290 // An EscStep documents one step in the path from memory 291 // that is heap allocated to the (alleged) reason for the 292 // heap allocation. 293 type EscStep struct { 294 src, dst *Node // the endpoints of this edge in the escape-to-heap chain. 295 where *Node // sometimes the endpoints don't match source locations; set 'where' to make that right 296 parent *EscStep // used in flood to record path 297 why string // explanation for this step in the escape-to-heap chain 298 busy bool // used in prevent to snip cycles. 299 } 300 301 type NodeEscState struct { 302 Curfn *Node 303 Flowsrc []EscStep // flow(this, src) 304 Retval Nodes // on OCALLxxx, list of dummy return values 305 Loopdepth int32 // -1: global, 0: return variables, 1:function top level, increased inside function for every loop or label to mark scopes 306 Level Level 307 Walkgen uint32 308 Maxextraloopdepth int32 309 } 310 311 func (e *EscState) nodeEscState(n *Node) *NodeEscState { 312 if nE, ok := n.Opt().(*NodeEscState); ok { 313 return nE 314 } 315 if n.Opt() != nil { 316 Fatalf("nodeEscState: opt in use (%T)", n.Opt()) 317 } 318 nE := &NodeEscState{ 319 Curfn: Curfn, 320 } 321 n.SetOpt(nE) 322 e.opts = append(e.opts, n) 323 return nE 324 } 325 326 func (e *EscState) track(n *Node) { 327 if Curfn == nil { 328 Fatalf("EscState.track: Curfn nil") 329 } 330 n.Esc = EscNone // until proven otherwise 331 nE := e.nodeEscState(n) 332 nE.Loopdepth = e.loopdepth 333 e.noesc = append(e.noesc, n) 334 } 335 336 // Escape constants are numbered in order of increasing "escapiness" 337 // to help make inferences be monotonic. With the exception of 338 // EscNever which is sticky, eX < eY means that eY is more exposed 339 // than eX, and hence replaces it in a conservative analysis. 340 const ( 341 EscUnknown = iota 342 EscNone // Does not escape to heap, result, or parameters. 343 EscReturn // Is returned or reachable from returned. 344 EscHeap // Reachable from the heap 345 EscNever // By construction will not escape. 346 EscBits = 3 347 EscMask = (1 << EscBits) - 1 348 EscContentEscapes = 1 << EscBits // value obtained by indirect of parameter escapes to heap 349 EscReturnBits = EscBits + 1 350 // Node.esc encoding = | escapeReturnEncoding:(width-4) | contentEscapes:1 | escEnum:3 351 ) 352 353 // escMax returns the maximum of an existing escape value 354 // (and its additional parameter flow flags) and a new escape type. 355 func escMax(e, etype uint16) uint16 { 356 if e&EscMask >= EscHeap { 357 // normalize 358 if e&^EscMask != 0 { 359 Fatalf("Escape information had unexpected return encoding bits (w/ EscHeap, EscNever), e&EscMask=%v", e&EscMask) 360 } 361 } 362 if e&EscMask > etype { 363 return e 364 } 365 if etype == EscNone || etype == EscReturn { 366 return (e &^ EscMask) | etype 367 } 368 return etype 369 } 370 371 // For each input parameter to a function, the escapeReturnEncoding describes 372 // how the parameter may leak to the function's outputs. This is currently the 373 // "level" of the leak where level is 0 or larger (negative level means stored into 374 // something whose address is returned -- but that implies stored into the heap, 375 // hence EscHeap, which means that the details are not currently relevant. ) 376 const ( 377 bitsPerOutputInTag = 3 // For each output, the number of bits for a tag 378 bitsMaskForTag = uint16(1<<bitsPerOutputInTag) - 1 // The bit mask to extract a single tag. 379 maxEncodedLevel = int(bitsMaskForTag - 1) // The largest level that can be stored in a tag. 380 ) 381 382 type EscState struct { 383 // Fake node that all 384 // - return values and output variables 385 // - parameters on imported functions not marked 'safe' 386 // - assignments to global variables 387 // flow to. 388 theSink Node 389 390 dsts []*Node // all dst nodes 391 loopdepth int32 // for detecting nested loop scopes 392 pdepth int // for debug printing in recursions. 393 dstcount int // diagnostic 394 edgecount int // diagnostic 395 noesc []*Node // list of possible non-escaping nodes, for printing 396 recursive bool // recursive function or group of mutually recursive functions. 397 opts []*Node // nodes with .Opt initialized 398 walkgen uint32 399 } 400 401 func newEscState(recursive bool) *EscState { 402 e := new(EscState) 403 e.theSink.Op = ONAME 404 e.theSink.Orig = &e.theSink 405 e.theSink.SetClass(PEXTERN) 406 e.theSink.Sym = lookup(".sink") 407 e.nodeEscState(&e.theSink).Loopdepth = -1 408 e.recursive = recursive 409 return e 410 } 411 412 func (e *EscState) stepWalk(dst, src *Node, why string, parent *EscStep) *EscStep { 413 // TODO: keep a cache of these, mark entry/exit in escwalk to avoid allocation 414 // Or perhaps never mind, since it is disabled unless printing is on. 415 // We may want to revisit this, since the EscStep nodes would make 416 // an excellent replacement for the poorly-separated graph-build/graph-flood 417 // stages. 418 if Debug['m'] == 0 { 419 return nil 420 } 421 return &EscStep{src: src, dst: dst, why: why, parent: parent} 422 } 423 424 func (e *EscState) stepAssign(step *EscStep, dst, src *Node, why string) *EscStep { 425 if Debug['m'] == 0 { 426 return nil 427 } 428 if step != nil { // Caller may have known better. 429 if step.why == "" { 430 step.why = why 431 } 432 if step.dst == nil { 433 step.dst = dst 434 } 435 if step.src == nil { 436 step.src = src 437 } 438 return step 439 } 440 return &EscStep{src: src, dst: dst, why: why} 441 } 442 443 func (e *EscState) stepAssignWhere(dst, src *Node, why string, where *Node) *EscStep { 444 if Debug['m'] == 0 { 445 return nil 446 } 447 return &EscStep{src: src, dst: dst, why: why, where: where} 448 } 449 450 // funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way. 451 func funcSym(fn *Node) *types.Sym { 452 if fn == nil || fn.Func.Nname == nil { 453 return nil 454 } 455 return fn.Func.Nname.Sym 456 } 457 458 // curfnSym returns n.Curfn.Nname.Sym if no nils are encountered along the way. 459 func (e *EscState) curfnSym(n *Node) *types.Sym { 460 nE := e.nodeEscState(n) 461 return funcSym(nE.Curfn) 462 } 463 464 func escAnalyze(all []*Node, recursive bool) { 465 e := newEscState(recursive) 466 467 for _, n := range all { 468 if n.Op == ODCLFUNC { 469 n.Esc = EscFuncPlanned 470 if Debug['m'] > 3 { 471 Dump("escAnalyze", n) 472 } 473 474 } 475 } 476 477 // flow-analyze functions 478 for _, n := range all { 479 if n.Op == ODCLFUNC { 480 e.escfunc(n) 481 } 482 } 483 484 // print("escapes: %d e.dsts, %d edges\n", e.dstcount, e.edgecount); 485 486 // visit the upstream of each dst, mark address nodes with 487 // addrescapes, mark parameters unsafe 488 escapes := make([]uint16, len(e.dsts)) 489 for i, n := range e.dsts { 490 escapes[i] = n.Esc 491 } 492 for _, n := range e.dsts { 493 e.escflood(n) 494 } 495 for { 496 done := true 497 for i, n := range e.dsts { 498 if n.Esc != escapes[i] { 499 done = false 500 if Debug['m'] > 2 { 501 Warnl(n.Pos, "Reflooding %v %S", e.curfnSym(n), n) 502 } 503 escapes[i] = n.Esc 504 e.escflood(n) 505 } 506 } 507 if done { 508 break 509 } 510 } 511 512 // for all top level functions, tag the typenodes corresponding to the param nodes 513 for _, n := range all { 514 if n.Op == ODCLFUNC { 515 e.esctag(n) 516 } 517 } 518 519 if Debug['m'] != 0 { 520 for _, n := range e.noesc { 521 if n.Esc == EscNone { 522 Warnl(n.Pos, "%v %S does not escape", e.curfnSym(n), n) 523 } 524 } 525 } 526 527 for _, x := range e.opts { 528 x.SetOpt(nil) 529 } 530 } 531 532 func (e *EscState) escfunc(fn *Node) { 533 // print("escfunc %N %s\n", fn.Func.Nname, e.recursive?"(recursive)":""); 534 if fn.Esc != EscFuncPlanned { 535 Fatalf("repeat escfunc %v", fn.Func.Nname) 536 } 537 fn.Esc = EscFuncStarted 538 539 saveld := e.loopdepth 540 e.loopdepth = 1 541 savefn := Curfn 542 Curfn = fn 543 544 for _, ln := range Curfn.Func.Dcl { 545 if ln.Op != ONAME { 546 continue 547 } 548 lnE := e.nodeEscState(ln) 549 switch ln.Class() { 550 // out params are in a loopdepth between the sink and all local variables 551 case PPARAMOUT: 552 lnE.Loopdepth = 0 553 554 case PPARAM: 555 lnE.Loopdepth = 1 556 if ln.Type != nil && !types.Haspointers(ln.Type) { 557 break 558 } 559 if Curfn.Nbody.Len() == 0 && !Curfn.Noescape() { 560 ln.Esc = EscHeap 561 } else { 562 ln.Esc = EscNone // prime for escflood later 563 } 564 e.noesc = append(e.noesc, ln) 565 } 566 } 567 568 // in a mutually recursive group we lose track of the return values 569 if e.recursive { 570 for _, ln := range Curfn.Func.Dcl { 571 if ln.Op == ONAME && ln.Class() == PPARAMOUT { 572 e.escflows(&e.theSink, ln, e.stepAssign(nil, ln, ln, "returned from recursive function")) 573 } 574 } 575 } 576 577 e.escloopdepthlist(Curfn.Nbody) 578 e.esclist(Curfn.Nbody, Curfn) 579 Curfn = savefn 580 e.loopdepth = saveld 581 } 582 583 // Mark labels that have no backjumps to them as not increasing e.loopdepth. 584 // Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat 585 // and set it to one of the following two. Then in esc we'll clear it again. 586 var ( 587 looping Node 588 nonlooping Node 589 ) 590 591 func (e *EscState) escloopdepthlist(l Nodes) { 592 for _, n := range l.Slice() { 593 e.escloopdepth(n) 594 } 595 } 596 597 func (e *EscState) escloopdepth(n *Node) { 598 if n == nil { 599 return 600 } 601 602 e.escloopdepthlist(n.Ninit) 603 604 switch n.Op { 605 case OLABEL: 606 if n.Left == nil || n.Left.Sym == nil { 607 Fatalf("esc:label without label: %+v", n) 608 } 609 610 // Walk will complain about this label being already defined, but that's not until 611 // after escape analysis. in the future, maybe pull label & goto analysis out of walk and put before esc 612 // if(n.Left.Sym.Label != nil) 613 // fatal("escape analysis messed up analyzing label: %+N", n); 614 n.Left.Sym.Label = asTypesNode(&nonlooping) 615 616 case OGOTO: 617 if n.Left == nil || n.Left.Sym == nil { 618 Fatalf("esc:goto without label: %+v", n) 619 } 620 621 // If we come past one that's uninitialized, this must be a (harmless) forward jump 622 // but if it's set to nonlooping the label must have preceded this goto. 623 if asNode(n.Left.Sym.Label) == &nonlooping { 624 n.Left.Sym.Label = asTypesNode(&looping) 625 } 626 } 627 628 e.escloopdepth(n.Left) 629 e.escloopdepth(n.Right) 630 e.escloopdepthlist(n.List) 631 e.escloopdepthlist(n.Nbody) 632 e.escloopdepthlist(n.Rlist) 633 } 634 635 func (e *EscState) esclist(l Nodes, parent *Node) { 636 for _, n := range l.Slice() { 637 e.esc(n, parent) 638 } 639 } 640 641 func (e *EscState) esc(n *Node, parent *Node) { 642 if n == nil { 643 return 644 } 645 646 lno := setlineno(n) 647 648 // ninit logically runs at a different loopdepth than the rest of the for loop. 649 e.esclist(n.Ninit, n) 650 651 if n.Op == OFOR || n.Op == OFORUNTIL || n.Op == ORANGE { 652 e.loopdepth++ 653 } 654 655 // type switch variables have no ODCL. 656 // process type switch as declaration. 657 // must happen before processing of switch body, 658 // so before recursion. 659 if n.Op == OSWITCH && n.Left != nil && n.Left.Op == OTYPESW { 660 for _, cas := range n.List.Slice() { // cases 661 // it.N().Rlist is the variable per case 662 if cas.Rlist.Len() != 0 { 663 e.nodeEscState(cas.Rlist.First()).Loopdepth = e.loopdepth 664 } 665 } 666 } 667 668 // Big stuff escapes unconditionally 669 // "Big" conditions that were scattered around in walk have been gathered here 670 if n.Esc != EscHeap && n.Type != nil && 671 (n.Type.Width > maxStackVarSize || 672 (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= 1<<16 || 673 n.Op == OMAKESLICE && !isSmallMakeSlice(n)) { 674 if Debug['m'] > 2 { 675 Warnl(n.Pos, "%v is too large for stack", n) 676 } 677 n.Esc = EscHeap 678 addrescapes(n) 679 e.escassignSinkWhy(n, n, "too large for stack") // TODO category: tooLarge 680 } 681 682 e.esc(n.Left, n) 683 684 if n.Op == ORANGE { 685 // ORANGE node's Right is evaluated before the loop 686 e.loopdepth-- 687 } 688 689 e.esc(n.Right, n) 690 691 if n.Op == ORANGE { 692 e.loopdepth++ 693 } 694 695 e.esclist(n.Nbody, n) 696 e.esclist(n.List, n) 697 e.esclist(n.Rlist, n) 698 699 if n.Op == OFOR || n.Op == OFORUNTIL || n.Op == ORANGE { 700 e.loopdepth-- 701 } 702 703 if Debug['m'] > 2 { 704 fmt.Printf("%v:[%d] %v esc: %v\n", linestr(lineno), e.loopdepth, funcSym(Curfn), n) 705 } 706 707 switch n.Op { 708 // Record loop depth at declaration. 709 case ODCL: 710 if n.Left != nil { 711 e.nodeEscState(n.Left).Loopdepth = e.loopdepth 712 } 713 714 case OLABEL: 715 if asNode(n.Left.Sym.Label) == &nonlooping { 716 if Debug['m'] > 2 { 717 fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n) 718 } 719 } else if asNode(n.Left.Sym.Label) == &looping { 720 if Debug['m'] > 2 { 721 fmt.Printf("%v: %v looping label\n", linestr(lineno), n) 722 } 723 e.loopdepth++ 724 } 725 726 // See case OLABEL in escloopdepth above 727 // else if(n.Left.Sym.Label == nil) 728 // fatal("escape analysis missed or messed up a label: %+N", n); 729 730 n.Left.Sym.Label = nil 731 732 case ORANGE: 733 if n.List.Len() >= 2 { 734 // Everything but fixed array is a dereference. 735 736 // If fixed array is really the address of fixed array, 737 // it is also a dereference, because it is implicitly 738 // dereferenced (see #12588) 739 if n.Type.IsArray() && 740 !(n.Right.Type.IsPtr() && eqtype(n.Right.Type.Elem(), n.Type)) { 741 e.escassignWhyWhere(n.List.Second(), n.Right, "range", n) 742 } else { 743 e.escassignDereference(n.List.Second(), n.Right, e.stepAssignWhere(n.List.Second(), n.Right, "range-deref", n)) 744 } 745 } 746 747 case OSWITCH: 748 if n.Left != nil && n.Left.Op == OTYPESW { 749 for _, cas := range n.List.Slice() { 750 // cases 751 // n.Left.Right is the argument of the .(type), 752 // it.N().Rlist is the variable per case 753 if cas.Rlist.Len() != 0 { 754 e.escassignWhyWhere(cas.Rlist.First(), n.Left.Right, "switch case", n) 755 } 756 } 757 } 758 759 // Filter out the following special case. 760 // 761 // func (b *Buffer) Foo() { 762 // n, m := ... 763 // b.buf = b.buf[n:m] 764 // } 765 // 766 // This assignment is a no-op for escape analysis, 767 // it does not store any new pointers into b that were not already there. 768 // However, without this special case b will escape, because we assign to OIND/ODOTPTR. 769 case OAS, OASOP: 770 if (n.Left.Op == OIND || n.Left.Op == ODOTPTR) && n.Left.Left.Op == ONAME && // dst is ONAME dereference 771 (n.Right.Op == OSLICE || n.Right.Op == OSLICE3 || n.Right.Op == OSLICESTR) && // src is slice operation 772 (n.Right.Left.Op == OIND || n.Right.Left.Op == ODOTPTR) && n.Right.Left.Left.Op == ONAME && // slice is applied to ONAME dereference 773 n.Left.Left == n.Right.Left.Left { // dst and src reference the same base ONAME 774 775 // Here we also assume that the statement will not contain calls, 776 // that is, that order will move any calls to init. 777 // Otherwise base ONAME value could change between the moments 778 // when we evaluate it for dst and for src. 779 // 780 // Note, this optimization does not apply to OSLICEARR, 781 // because it does introduce a new pointer into b that was not already there 782 // (pointer to b itself). After such assignment, if b contents escape, 783 // b escapes as well. If we ignore such OSLICEARR, we will conclude 784 // that b does not escape when b contents do. 785 if Debug['m'] != 0 { 786 Warnl(n.Pos, "%v ignoring self-assignment to %S", e.curfnSym(n), n.Left) 787 } 788 789 break 790 } 791 792 e.escassign(n.Left, n.Right, e.stepAssignWhere(nil, nil, "", n)) 793 794 case OAS2: // x,y = a,b 795 if n.List.Len() == n.Rlist.Len() { 796 rs := n.Rlist.Slice() 797 for i, n := range n.List.Slice() { 798 e.escassignWhyWhere(n, rs[i], "assign-pair", n) 799 } 800 } 801 802 case OAS2RECV: // v, ok = <-ch 803 e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-receive", n) 804 case OAS2MAPR: // v, ok = m[k] 805 e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-mapr", n) 806 case OAS2DOTTYPE: // v, ok = x.(type) 807 e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-dot-type", n) 808 809 case OSEND: // ch <- x 810 e.escassignSinkWhy(n, n.Right, "send") 811 812 case ODEFER: 813 if e.loopdepth == 1 { // top level 814 break 815 } 816 // arguments leak out of scope 817 // TODO: leak to a dummy node instead 818 // defer f(x) - f and x escape 819 e.escassignSinkWhy(n, n.Left.Left, "defer func") 820 e.escassignSinkWhy(n, n.Left.Right, "defer func ...") // ODDDARG for call 821 for _, arg := range n.Left.List.Slice() { 822 e.escassignSinkWhy(n, arg, "defer func arg") 823 } 824 825 case OPROC: 826 // go f(x) - f and x escape 827 e.escassignSinkWhy(n, n.Left.Left, "go func") 828 e.escassignSinkWhy(n, n.Left.Right, "go func ...") // ODDDARG for call 829 for _, arg := range n.Left.List.Slice() { 830 e.escassignSinkWhy(n, arg, "go func arg") 831 } 832 833 case OCALLMETH, OCALLFUNC, OCALLINTER: 834 e.esccall(n, parent) 835 836 // esccall already done on n.Rlist.First(). tie it's Retval to n.List 837 case OAS2FUNC: // x,y = f() 838 rs := e.nodeEscState(n.Rlist.First()).Retval.Slice() 839 for i, n := range n.List.Slice() { 840 if i >= len(rs) { 841 break 842 } 843 e.escassignWhyWhere(n, rs[i], "assign-pair-func-call", n) 844 } 845 if n.List.Len() != len(rs) { 846 Fatalf("esc oas2func") 847 } 848 849 case ORETURN: 850 retList := n.List 851 if retList.Len() == 1 && Curfn.Type.NumResults() > 1 { 852 // OAS2FUNC in disguise 853 // esccall already done on n.List.First() 854 // tie e.nodeEscState(n.List.First()).Retval to Curfn.Func.Dcl PPARAMOUT's 855 retList = e.nodeEscState(n.List.First()).Retval 856 } 857 858 i := 0 859 for _, lrn := range Curfn.Func.Dcl { 860 if i >= retList.Len() { 861 break 862 } 863 if lrn.Op != ONAME || lrn.Class() != PPARAMOUT { 864 continue 865 } 866 e.escassignWhyWhere(lrn, retList.Index(i), "return", n) 867 i++ 868 } 869 870 if i < retList.Len() { 871 Fatalf("esc return list") 872 } 873 874 // Argument could leak through recover. 875 case OPANIC: 876 e.escassignSinkWhy(n, n.Left, "panic") 877 878 case OAPPEND: 879 if !n.Isddd() { 880 for _, nn := range n.List.Slice()[1:] { 881 e.escassignSinkWhy(n, nn, "appended to slice") // lose track of assign to dereference 882 } 883 } else { 884 // append(slice1, slice2...) -- slice2 itself does not escape, but contents do. 885 slice2 := n.List.Second() 886 e.escassignDereference(&e.theSink, slice2, e.stepAssignWhere(n, slice2, "appended slice...", n)) // lose track of assign of dereference 887 if Debug['m'] > 3 { 888 Warnl(n.Pos, "%v special treatment of append(slice1, slice2...) %S", e.curfnSym(n), n) 889 } 890 } 891 e.escassignDereference(&e.theSink, n.List.First(), e.stepAssignWhere(n, n.List.First(), "appendee slice", n)) // The original elements are now leaked, too 892 893 case OCOPY: 894 e.escassignDereference(&e.theSink, n.Right, e.stepAssignWhere(n, n.Right, "copied slice", n)) // lose track of assign of dereference 895 896 case OCONV, OCONVNOP: 897 e.escassignWhyWhere(n, n.Left, "converted", n) 898 899 case OCONVIFACE: 900 e.track(n) 901 e.escassignWhyWhere(n, n.Left, "interface-converted", n) 902 903 case OARRAYLIT: 904 // Link values to array 905 for _, elt := range n.List.Slice() { 906 if elt.Op == OKEY { 907 elt = elt.Right 908 } 909 e.escassign(n, elt, e.stepAssignWhere(n, elt, "array literal element", n)) 910 } 911 912 case OSLICELIT: 913 // Slice is not leaked until proven otherwise 914 e.track(n) 915 // Link values to slice 916 for _, elt := range n.List.Slice() { 917 if elt.Op == OKEY { 918 elt = elt.Right 919 } 920 e.escassign(n, elt, e.stepAssignWhere(n, elt, "slice literal element", n)) 921 } 922 923 // Link values to struct. 924 case OSTRUCTLIT: 925 for _, elt := range n.List.Slice() { 926 e.escassignWhyWhere(n, elt.Left, "struct literal element", n) 927 } 928 929 case OPTRLIT: 930 e.track(n) 931 932 // Link OSTRUCTLIT to OPTRLIT; if OPTRLIT escapes, OSTRUCTLIT elements do too. 933 e.escassignWhyWhere(n, n.Left, "pointer literal [assign]", n) 934 935 case OCALLPART: 936 e.track(n) 937 938 // Contents make it to memory, lose track. 939 e.escassignSinkWhy(n, n.Left, "call part") 940 941 case OMAPLIT: 942 e.track(n) 943 // Keys and values make it to memory, lose track. 944 for _, elt := range n.List.Slice() { 945 e.escassignSinkWhy(n, elt.Left, "map literal key") 946 e.escassignSinkWhy(n, elt.Right, "map literal value") 947 } 948 949 case OCLOSURE: 950 // Link addresses of captured variables to closure. 951 for _, v := range n.Func.Cvars.Slice() { 952 if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs 953 continue 954 } 955 a := v.Name.Defn 956 if !v.Name.Byval() { 957 a = nod(OADDR, a, nil) 958 a.Pos = v.Pos 959 e.nodeEscState(a).Loopdepth = e.loopdepth 960 a = typecheck(a, Erv) 961 } 962 963 e.escassignWhyWhere(n, a, "captured by a closure", n) 964 } 965 fallthrough 966 967 case OMAKECHAN, 968 OMAKEMAP, 969 OMAKESLICE, 970 ONEW, 971 OARRAYRUNESTR, 972 OARRAYBYTESTR, 973 OSTRARRAYRUNE, 974 OSTRARRAYBYTE, 975 ORUNESTR: 976 e.track(n) 977 978 case OADDSTR: 979 e.track(n) 980 // Arguments of OADDSTR do not escape. 981 982 case OADDR: 983 // current loop depth is an upper bound on actual loop depth 984 // of addressed value. 985 e.track(n) 986 987 // for &x, use loop depth of x if known. 988 // it should always be known, but if not, be conservative 989 // and keep the current loop depth. 990 if n.Left.Op == ONAME { 991 switch n.Left.Class() { 992 case PAUTO: 993 nE := e.nodeEscState(n) 994 leftE := e.nodeEscState(n.Left) 995 if leftE.Loopdepth != 0 { 996 nE.Loopdepth = leftE.Loopdepth 997 } 998 999 // PPARAM is loop depth 1 always. 1000 // PPARAMOUT is loop depth 0 for writes 1001 // but considered loop depth 1 for address-of, 1002 // so that writing the address of one result 1003 // to another (or the same) result makes the 1004 // first result move to the heap. 1005 case PPARAM, PPARAMOUT: 1006 nE := e.nodeEscState(n) 1007 nE.Loopdepth = 1 1008 } 1009 } 1010 } 1011 1012 lineno = lno 1013 } 1014 1015 // escassignWhyWhere bundles a common case of 1016 // escassign(e, dst, src, e.stepAssignWhere(dst, src, reason, where)) 1017 func (e *EscState) escassignWhyWhere(dst, src *Node, reason string, where *Node) { 1018 var step *EscStep 1019 if Debug['m'] != 0 { 1020 step = e.stepAssignWhere(dst, src, reason, where) 1021 } 1022 e.escassign(dst, src, step) 1023 } 1024 1025 // escassignSinkWhy bundles a common case of 1026 // escassign(e, &e.theSink, src, e.stepAssign(nil, dst, src, reason)) 1027 func (e *EscState) escassignSinkWhy(dst, src *Node, reason string) { 1028 var step *EscStep 1029 if Debug['m'] != 0 { 1030 step = e.stepAssign(nil, dst, src, reason) 1031 } 1032 e.escassign(&e.theSink, src, step) 1033 } 1034 1035 // escassignSinkWhyWhere is escassignSinkWhy but includes a call site 1036 // for accurate location reporting. 1037 func (e *EscState) escassignSinkWhyWhere(dst, src *Node, reason string, call *Node) { 1038 var step *EscStep 1039 if Debug['m'] != 0 { 1040 step = e.stepAssignWhere(dst, src, reason, call) 1041 } 1042 e.escassign(&e.theSink, src, step) 1043 } 1044 1045 // Assert that expr somehow gets assigned to dst, if non nil. for 1046 // dst==nil, any name node expr still must be marked as being 1047 // evaluated in curfn. For expr==nil, dst must still be examined for 1048 // evaluations inside it (e.g *f(x) = y) 1049 func (e *EscState) escassign(dst, src *Node, step *EscStep) { 1050 if isblank(dst) || dst == nil || src == nil || src.Op == ONONAME || src.Op == OXXX { 1051 return 1052 } 1053 1054 if Debug['m'] > 2 { 1055 fmt.Printf("%v:[%d] %v escassign: %S(%0j)[%v] = %S(%0j)[%v]\n", 1056 linestr(lineno), e.loopdepth, funcSym(Curfn), 1057 dst, dst, dst.Op, 1058 src, src, src.Op) 1059 } 1060 1061 setlineno(dst) 1062 1063 originalDst := dst 1064 dstwhy := "assigned" 1065 1066 // Analyze lhs of assignment. 1067 // Replace dst with &e.theSink if we can't track it. 1068 switch dst.Op { 1069 default: 1070 Dump("dst", dst) 1071 Fatalf("escassign: unexpected dst") 1072 1073 case OARRAYLIT, 1074 OSLICELIT, 1075 OCLOSURE, 1076 OCONV, 1077 OCONVIFACE, 1078 OCONVNOP, 1079 OMAPLIT, 1080 OSTRUCTLIT, 1081 OPTRLIT, 1082 ODDDARG, 1083 OCALLPART: 1084 1085 case ONAME: 1086 if dst.Class() == PEXTERN { 1087 dstwhy = "assigned to top level variable" 1088 dst = &e.theSink 1089 } 1090 1091 case ODOT: // treat "dst.x = src" as "dst = src" 1092 e.escassign(dst.Left, src, e.stepAssign(step, originalDst, src, "dot-equals")) 1093 return 1094 1095 case OINDEX: 1096 if dst.Left.Type.IsArray() { 1097 e.escassign(dst.Left, src, e.stepAssign(step, originalDst, src, "array-element-equals")) 1098 return 1099 } 1100 1101 dstwhy = "slice-element-equals" 1102 dst = &e.theSink // lose track of dereference 1103 1104 case OIND: 1105 dstwhy = "star-equals" 1106 dst = &e.theSink // lose track of dereference 1107 1108 case ODOTPTR: 1109 dstwhy = "star-dot-equals" 1110 dst = &e.theSink // lose track of dereference 1111 1112 // lose track of key and value 1113 case OINDEXMAP: 1114 e.escassign(&e.theSink, dst.Right, e.stepAssign(nil, originalDst, src, "key of map put")) 1115 dstwhy = "value of map put" 1116 dst = &e.theSink 1117 } 1118 1119 lno := setlineno(src) 1120 e.pdepth++ 1121 1122 switch src.Op { 1123 case OADDR, // dst = &x 1124 OIND, // dst = *x 1125 ODOTPTR, // dst = (*x).f 1126 ONAME, 1127 ODDDARG, 1128 OPTRLIT, 1129 OARRAYLIT, 1130 OSLICELIT, 1131 OMAPLIT, 1132 OSTRUCTLIT, 1133 OMAKECHAN, 1134 OMAKEMAP, 1135 OMAKESLICE, 1136 OARRAYRUNESTR, 1137 OARRAYBYTESTR, 1138 OSTRARRAYRUNE, 1139 OSTRARRAYBYTE, 1140 OADDSTR, 1141 ONEW, 1142 OCALLPART, 1143 ORUNESTR, 1144 OCONVIFACE: 1145 e.escflows(dst, src, e.stepAssign(step, originalDst, src, dstwhy)) 1146 1147 case OCLOSURE: 1148 // OCLOSURE is lowered to OPTRLIT, 1149 // insert OADDR to account for the additional indirection. 1150 a := nod(OADDR, src, nil) 1151 a.Pos = src.Pos 1152 e.nodeEscState(a).Loopdepth = e.nodeEscState(src).Loopdepth 1153 a.Type = types.NewPtr(src.Type) 1154 e.escflows(dst, a, e.stepAssign(nil, originalDst, src, dstwhy)) 1155 1156 // Flowing multiple returns to a single dst happens when 1157 // analyzing "go f(g())": here g() flows to sink (issue 4529). 1158 case OCALLMETH, OCALLFUNC, OCALLINTER: 1159 for _, n := range e.nodeEscState(src).Retval.Slice() { 1160 e.escflows(dst, n, e.stepAssign(nil, originalDst, n, dstwhy)) 1161 } 1162 1163 // A non-pointer escaping from a struct does not concern us. 1164 case ODOT: 1165 if src.Type != nil && !types.Haspointers(src.Type) { 1166 break 1167 } 1168 fallthrough 1169 1170 // Conversions, field access, slice all preserve the input value. 1171 case OCONV, 1172 OCONVNOP, 1173 ODOTMETH, 1174 // treat recv.meth as a value with recv in it, only happens in ODEFER and OPROC 1175 // iface.method already leaks iface in esccall, no need to put in extra ODOTINTER edge here 1176 OSLICE, 1177 OSLICE3, 1178 OSLICEARR, 1179 OSLICE3ARR, 1180 OSLICESTR: 1181 // Conversions, field access, slice all preserve the input value. 1182 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1183 1184 case ODOTTYPE, 1185 ODOTTYPE2: 1186 if src.Type != nil && !types.Haspointers(src.Type) { 1187 break 1188 } 1189 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1190 1191 case OAPPEND: 1192 // Append returns first argument. 1193 // Subsequent arguments are already leaked because they are operands to append. 1194 e.escassign(dst, src.List.First(), e.stepAssign(step, dst, src.List.First(), dstwhy)) 1195 1196 case OINDEX: 1197 // Index of array preserves input value. 1198 if src.Left.Type.IsArray() { 1199 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1200 } else { 1201 e.escflows(dst, src, e.stepAssign(step, originalDst, src, dstwhy)) 1202 } 1203 1204 // Might be pointer arithmetic, in which case 1205 // the operands flow into the result. 1206 // TODO(rsc): Decide what the story is here. This is unsettling. 1207 case OADD, 1208 OSUB, 1209 OOR, 1210 OXOR, 1211 OMUL, 1212 ODIV, 1213 OMOD, 1214 OLSH, 1215 ORSH, 1216 OAND, 1217 OANDNOT, 1218 OPLUS, 1219 OMINUS, 1220 OCOM: 1221 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1222 1223 e.escassign(dst, src.Right, e.stepAssign(step, originalDst, src, dstwhy)) 1224 } 1225 1226 e.pdepth-- 1227 lineno = lno 1228 } 1229 1230 // Common case for escapes is 16 bits 000000000xxxEEEE 1231 // where commonest cases for xxx encoding in-to-out pointer 1232 // flow are 000, 001, 010, 011 and EEEE is computed Esc bits. 1233 // Note width of xxx depends on value of constant 1234 // bitsPerOutputInTag -- expect 2 or 3, so in practice the 1235 // tag cache array is 64 or 128 long. Some entries will 1236 // never be populated. 1237 var tags [1 << (bitsPerOutputInTag + EscReturnBits)]string 1238 1239 // mktag returns the string representation for an escape analysis tag. 1240 func mktag(mask int) string { 1241 switch mask & EscMask { 1242 case EscNone, EscReturn: 1243 default: 1244 Fatalf("escape mktag") 1245 } 1246 1247 if mask < len(tags) && tags[mask] != "" { 1248 return tags[mask] 1249 } 1250 1251 s := fmt.Sprintf("esc:0x%x", mask) 1252 if mask < len(tags) { 1253 tags[mask] = s 1254 } 1255 return s 1256 } 1257 1258 // parsetag decodes an escape analysis tag and returns the esc value. 1259 func parsetag(note string) uint16 { 1260 if !strings.HasPrefix(note, "esc:") { 1261 return EscUnknown 1262 } 1263 n, _ := strconv.ParseInt(note[4:], 0, 0) 1264 em := uint16(n) 1265 if em == 0 { 1266 return EscNone 1267 } 1268 return em 1269 } 1270 1271 // describeEscape returns a string describing the escape tag. 1272 // The result is either one of {EscUnknown, EscNone, EscHeap} which all have no further annotation 1273 // or a description of parameter flow, which takes the form of an optional "contentToHeap" 1274 // indicating that the content of this parameter is leaked to the heap, followed by a sequence 1275 // of level encodings separated by spaces, one for each parameter, where _ means no flow, 1276 // = means direct flow, and N asterisks (*) encodes content (obtained by indirection) flow. 1277 // e.g., "contentToHeap _ =" means that a parameter's content (one or more dereferences) 1278 // escapes to the heap, the parameter does not leak to the first output, but does leak directly 1279 // to the second output (and if there are more than two outputs, there is no flow to those.) 1280 func describeEscape(em uint16) string { 1281 var s string 1282 switch em & EscMask { 1283 case EscUnknown: 1284 s = "EscUnknown" 1285 case EscNone: 1286 s = "EscNone" 1287 case EscHeap: 1288 s = "EscHeap" 1289 case EscReturn: 1290 s = "EscReturn" 1291 } 1292 if em&EscContentEscapes != 0 { 1293 if s != "" { 1294 s += " " 1295 } 1296 s += "contentToHeap" 1297 } 1298 for em >>= EscReturnBits; em != 0; em = em >> bitsPerOutputInTag { 1299 // See encoding description above 1300 if s != "" { 1301 s += " " 1302 } 1303 switch embits := em & bitsMaskForTag; embits { 1304 case 0: 1305 s += "_" 1306 case 1: 1307 s += "=" 1308 default: 1309 for i := uint16(0); i < embits-1; i++ { 1310 s += "*" 1311 } 1312 } 1313 1314 } 1315 return s 1316 } 1317 1318 // escassignfromtag models the input-to-output assignment flow of one of a function 1319 // calls arguments, where the flow is encoded in "note". 1320 func (e *EscState) escassignfromtag(note string, dsts Nodes, src, call *Node) uint16 { 1321 em := parsetag(note) 1322 if src.Op == OLITERAL { 1323 return em 1324 } 1325 1326 if Debug['m'] > 3 { 1327 fmt.Printf("%v::assignfromtag:: src=%S, em=%s\n", 1328 linestr(lineno), src, describeEscape(em)) 1329 } 1330 1331 if em == EscUnknown { 1332 e.escassignSinkWhyWhere(src, src, "passed to call[argument escapes]", call) 1333 return em 1334 } 1335 1336 if em == EscNone { 1337 return em 1338 } 1339 1340 // If content inside parameter (reached via indirection) 1341 // escapes to heap, mark as such. 1342 if em&EscContentEscapes != 0 { 1343 e.escassign(&e.theSink, e.addDereference(src), e.stepAssignWhere(src, src, "passed to call[argument content escapes]", call)) 1344 } 1345 1346 em0 := em 1347 dstsi := 0 1348 for em >>= EscReturnBits; em != 0 && dstsi < dsts.Len(); em = em >> bitsPerOutputInTag { 1349 // Prefer the lowest-level path to the reference (for escape purposes). 1350 // Two-bit encoding (for example. 1, 3, and 4 bits are other options) 1351 // 01 = 0-level 1352 // 10 = 1-level, (content escapes), 1353 // 11 = 2-level, (content of content escapes), 1354 embits := em & bitsMaskForTag 1355 if embits > 0 { 1356 n := src 1357 for i := uint16(0); i < embits-1; i++ { 1358 n = e.addDereference(n) // encode level>0 as indirections 1359 } 1360 e.escassign(dsts.Index(dstsi), n, e.stepAssignWhere(dsts.Index(dstsi), src, "passed-to-and-returned-from-call", call)) 1361 } 1362 dstsi++ 1363 } 1364 // If there are too many outputs to fit in the tag, 1365 // that is handled at the encoding end as EscHeap, 1366 // so there is no need to check here. 1367 1368 if em != 0 && dstsi >= dsts.Len() { 1369 Fatalf("corrupt esc tag %q or messed up escretval list\n", note) 1370 } 1371 return em0 1372 } 1373 1374 func (e *EscState) escassignDereference(dst *Node, src *Node, step *EscStep) { 1375 if src.Op == OLITERAL { 1376 return 1377 } 1378 e.escassign(dst, e.addDereference(src), step) 1379 } 1380 1381 // addDereference constructs a suitable OIND note applied to src. 1382 // Because this is for purposes of escape accounting, not execution, 1383 // some semantically dubious node combinations are (currently) possible. 1384 func (e *EscState) addDereference(n *Node) *Node { 1385 ind := nod(OIND, n, nil) 1386 e.nodeEscState(ind).Loopdepth = e.nodeEscState(n).Loopdepth 1387 ind.Pos = n.Pos 1388 t := n.Type 1389 if t.IsKind(types.Tptr) { 1390 // This should model our own sloppy use of OIND to encode 1391 // decreasing levels of indirection; i.e., "indirecting" an array 1392 // might yield the type of an element. To be enhanced... 1393 t = t.Elem() 1394 } 1395 ind.Type = t 1396 return ind 1397 } 1398 1399 // escNoteOutputParamFlow encodes maxEncodedLevel/.../1/0-level flow to the vargen'th parameter. 1400 // Levels greater than maxEncodedLevel are replaced with maxEncodedLevel. 1401 // If the encoding cannot describe the modified input level and output number, then EscHeap is returned. 1402 func escNoteOutputParamFlow(e uint16, vargen int32, level Level) uint16 { 1403 // Flow+level is encoded in two bits. 1404 // 00 = not flow, xx = level+1 for 0 <= level <= maxEncodedLevel 1405 // 16 bits for Esc allows 6x2bits or 4x3bits or 3x4bits if additional information would be useful. 1406 if level.int() <= 0 && level.guaranteedDereference() > 0 { 1407 return escMax(e|EscContentEscapes, EscNone) // At least one deref, thus only content. 1408 } 1409 if level.int() < 0 { 1410 return EscHeap 1411 } 1412 if level.int() > maxEncodedLevel { 1413 // Cannot encode larger values than maxEncodedLevel. 1414 level = levelFrom(maxEncodedLevel) 1415 } 1416 encoded := uint16(level.int() + 1) 1417 1418 shift := uint(bitsPerOutputInTag*(vargen-1) + EscReturnBits) 1419 old := (e >> shift) & bitsMaskForTag 1420 if old == 0 || encoded != 0 && encoded < old { 1421 old = encoded 1422 } 1423 1424 encodedFlow := old << shift 1425 if (encodedFlow>>shift)&bitsMaskForTag != old { 1426 // Encoding failure defaults to heap. 1427 return EscHeap 1428 } 1429 1430 return (e &^ (bitsMaskForTag << shift)) | encodedFlow 1431 } 1432 1433 func (e *EscState) initEscRetval(call *Node, fntype *types.Type) { 1434 cE := e.nodeEscState(call) 1435 cE.Retval.Set(nil) // Suspect this is not nil for indirect calls. 1436 for i, f := range fntype.Results().Fields().Slice() { 1437 buf := fmt.Sprintf(".out%d", i) 1438 ret := newname(lookup(buf)) 1439 ret.SetAddable(false) // TODO(mdempsky): Seems suspicious. 1440 ret.Type = f.Type 1441 ret.SetClass(PAUTO) 1442 ret.Name.Curfn = Curfn 1443 e.nodeEscState(ret).Loopdepth = e.loopdepth 1444 ret.Name.SetUsed(true) 1445 ret.Pos = call.Pos 1446 cE.Retval.Append(ret) 1447 } 1448 } 1449 1450 // This is a bit messier than fortunate, pulled out of esc's big 1451 // switch for clarity. We either have the paramnodes, which may be 1452 // connected to other things through flows or we have the parameter type 1453 // nodes, which may be marked "noescape". Navigating the ast is slightly 1454 // different for methods vs plain functions and for imported vs 1455 // this-package 1456 func (e *EscState) esccall(call *Node, parent *Node) { 1457 var fntype *types.Type 1458 var indirect bool 1459 var fn *Node 1460 switch call.Op { 1461 default: 1462 Fatalf("esccall") 1463 1464 case OCALLFUNC: 1465 fn = call.Left 1466 fntype = fn.Type 1467 indirect = fn.Op != ONAME || fn.Class() != PFUNC 1468 1469 case OCALLMETH: 1470 fn = asNode(call.Left.Sym.Def) 1471 if fn != nil { 1472 fntype = fn.Type 1473 } else { 1474 fntype = call.Left.Type 1475 } 1476 1477 case OCALLINTER: 1478 fntype = call.Left.Type 1479 indirect = true 1480 } 1481 1482 argList := call.List 1483 if argList.Len() == 1 { 1484 arg := argList.First() 1485 if arg.Type.IsFuncArgStruct() { // f(g()) 1486 argList = e.nodeEscState(arg).Retval 1487 } 1488 } 1489 1490 args := argList.Slice() 1491 1492 if indirect { 1493 // We know nothing! 1494 // Leak all the parameters 1495 for _, arg := range args { 1496 e.escassignSinkWhy(call, arg, "parameter to indirect call") 1497 if Debug['m'] > 3 { 1498 fmt.Printf("%v::esccall:: indirect call <- %S, untracked\n", linestr(lineno), arg) 1499 } 1500 } 1501 // Set up bogus outputs 1502 e.initEscRetval(call, fntype) 1503 // If there is a receiver, it also leaks to heap. 1504 if call.Op != OCALLFUNC { 1505 rf := fntype.Recv() 1506 r := call.Left.Left 1507 if types.Haspointers(rf.Type) { 1508 e.escassignSinkWhy(call, r, "receiver in indirect call") 1509 } 1510 } else { // indirect and OCALLFUNC = could be captured variables, too. (#14409) 1511 rets := e.nodeEscState(call).Retval.Slice() 1512 for _, ret := range rets { 1513 e.escassignDereference(ret, fn, e.stepAssignWhere(ret, fn, "captured by called closure", call)) 1514 } 1515 } 1516 return 1517 } 1518 1519 cE := e.nodeEscState(call) 1520 if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && 1521 fn.Name.Defn != nil && fn.Name.Defn.Nbody.Len() != 0 && fn.Name.Param.Ntype != nil && fn.Name.Defn.Esc < EscFuncTagged { 1522 if Debug['m'] > 3 { 1523 fmt.Printf("%v::esccall:: %S in recursive group\n", linestr(lineno), call) 1524 } 1525 1526 // function in same mutually recursive group. Incorporate into flow graph. 1527 // print("esc local fn: %N\n", fn.Func.Ntype); 1528 if fn.Name.Defn.Esc == EscFuncUnknown || cE.Retval.Len() != 0 { 1529 Fatalf("graph inconsistency") 1530 } 1531 1532 sawRcvr := false 1533 for _, n := range fn.Name.Defn.Func.Dcl { 1534 switch n.Class() { 1535 case PPARAM: 1536 if call.Op != OCALLFUNC && !sawRcvr { 1537 e.escassignWhyWhere(n, call.Left.Left, "call receiver", call) 1538 sawRcvr = true 1539 continue 1540 } 1541 if len(args) == 0 { 1542 continue 1543 } 1544 arg := args[0] 1545 if n.Isddd() && !call.Isddd() { 1546 // Introduce ODDDARG node to represent ... allocation. 1547 arg = nod(ODDDARG, nil, nil) 1548 arr := types.NewArray(n.Type.Elem(), int64(len(args))) 1549 arg.Type = types.NewPtr(arr) // make pointer so it will be tracked 1550 arg.Pos = call.Pos 1551 e.track(arg) 1552 call.Right = arg 1553 } 1554 e.escassignWhyWhere(n, arg, "arg to recursive call", call) // TODO this message needs help. 1555 if arg == args[0] { 1556 args = args[1:] 1557 continue 1558 } 1559 // "..." arguments are untracked 1560 for _, a := range args { 1561 if Debug['m'] > 3 { 1562 fmt.Printf("%v::esccall:: ... <- %S, untracked\n", linestr(lineno), a) 1563 } 1564 e.escassignSinkWhyWhere(arg, a, "... arg to recursive call", call) 1565 } 1566 // No more PPARAM processing, but keep 1567 // going for PPARAMOUT. 1568 args = nil 1569 1570 case PPARAMOUT: 1571 cE.Retval.Append(n) 1572 } 1573 } 1574 1575 return 1576 } 1577 1578 // Imported or completely analyzed function. Use the escape tags. 1579 if cE.Retval.Len() != 0 { 1580 Fatalf("esc already decorated call %+v\n", call) 1581 } 1582 1583 if Debug['m'] > 3 { 1584 fmt.Printf("%v::esccall:: %S not recursive\n", linestr(lineno), call) 1585 } 1586 1587 // set up out list on this call node with dummy auto ONAMES in the current (calling) function. 1588 e.initEscRetval(call, fntype) 1589 1590 // print("esc analyzed fn: %#N (%+T) returning (%+H)\n", fn, fntype, e.nodeEscState(call).Retval); 1591 1592 // Receiver. 1593 if call.Op != OCALLFUNC { 1594 rf := fntype.Recv() 1595 r := call.Left.Left 1596 if types.Haspointers(rf.Type) { 1597 e.escassignfromtag(rf.Note, cE.Retval, r, call) 1598 } 1599 } 1600 1601 for i, param := range fntype.Params().FieldSlice() { 1602 note := param.Note 1603 var arg *Node 1604 if param.Isddd() && !call.Isddd() { 1605 rest := args[i:] 1606 if len(rest) == 0 { 1607 break 1608 } 1609 1610 // Introduce ODDDARG node to represent ... allocation. 1611 arg = nod(ODDDARG, nil, nil) 1612 arg.Pos = call.Pos 1613 arr := types.NewArray(param.Type.Elem(), int64(len(rest))) 1614 arg.Type = types.NewPtr(arr) // make pointer so it will be tracked 1615 e.track(arg) 1616 call.Right = arg 1617 1618 // Store arguments into slice for ... arg. 1619 for _, a := range rest { 1620 if Debug['m'] > 3 { 1621 fmt.Printf("%v::esccall:: ... <- %S\n", linestr(lineno), a) 1622 } 1623 if note == uintptrEscapesTag { 1624 e.escassignSinkWhyWhere(arg, a, "arg to uintptrescapes ...", call) 1625 } else { 1626 e.escassignWhyWhere(arg, a, "arg to ...", call) 1627 } 1628 } 1629 } else { 1630 arg = args[i] 1631 if note == uintptrEscapesTag { 1632 e.escassignSinkWhy(arg, arg, "escaping uintptr") 1633 } 1634 } 1635 1636 if types.Haspointers(param.Type) && e.escassignfromtag(note, cE.Retval, arg, call)&EscMask == EscNone && parent.Op != ODEFER && parent.Op != OPROC { 1637 a := arg 1638 for a.Op == OCONVNOP { 1639 a = a.Left 1640 } 1641 switch a.Op { 1642 // The callee has already been analyzed, so its arguments have esc tags. 1643 // The argument is marked as not escaping at all. 1644 // Record that fact so that any temporary used for 1645 // synthesizing this expression can be reclaimed when 1646 // the function returns. 1647 // This 'noescape' is even stronger than the usual esc == EscNone. 1648 // arg.Esc == EscNone means that arg does not escape the current function. 1649 // arg.SetNoescape(true) here means that arg does not escape this statement 1650 // in the current function. 1651 case OCALLPART, OCLOSURE, ODDDARG, OARRAYLIT, OSLICELIT, OPTRLIT, OSTRUCTLIT: 1652 a.SetNoescape(true) 1653 } 1654 } 1655 } 1656 } 1657 1658 // escflows records the link src->dst in dst, throwing out some quick wins, 1659 // and also ensuring that dst is noted as a flow destination. 1660 func (e *EscState) escflows(dst, src *Node, why *EscStep) { 1661 if dst == nil || src == nil || dst == src { 1662 return 1663 } 1664 1665 // Don't bother building a graph for scalars. 1666 if src.Type != nil && !types.Haspointers(src.Type) && !isReflectHeaderDataField(src) { 1667 if Debug['m'] > 3 { 1668 fmt.Printf("%v::NOT flows:: %S <- %S\n", linestr(lineno), dst, src) 1669 } 1670 return 1671 } 1672 1673 if Debug['m'] > 3 { 1674 fmt.Printf("%v::flows:: %S <- %S\n", linestr(lineno), dst, src) 1675 } 1676 1677 dstE := e.nodeEscState(dst) 1678 if len(dstE.Flowsrc) == 0 { 1679 e.dsts = append(e.dsts, dst) 1680 e.dstcount++ 1681 } 1682 1683 e.edgecount++ 1684 1685 if why == nil { 1686 dstE.Flowsrc = append(dstE.Flowsrc, EscStep{src: src}) 1687 } else { 1688 starwhy := *why 1689 starwhy.src = src // TODO: need to reconcile this w/ needs of explanations. 1690 dstE.Flowsrc = append(dstE.Flowsrc, starwhy) 1691 } 1692 } 1693 1694 // Whenever we hit a reference node, the level goes up by one, and whenever 1695 // we hit an OADDR, the level goes down by one. as long as we're on a level > 0 1696 // finding an OADDR just means we're following the upstream of a dereference, 1697 // so this address doesn't leak (yet). 1698 // If level == 0, it means the /value/ of this node can reach the root of this flood. 1699 // so if this node is an OADDR, its argument should be marked as escaping iff 1700 // its currfn/e.loopdepth are different from the flood's root. 1701 // Once an object has been moved to the heap, all of its upstream should be considered 1702 // escaping to the global scope. 1703 func (e *EscState) escflood(dst *Node) { 1704 switch dst.Op { 1705 case ONAME, OCLOSURE: 1706 default: 1707 return 1708 } 1709 1710 dstE := e.nodeEscState(dst) 1711 if Debug['m'] > 2 { 1712 fmt.Printf("\nescflood:%d: dst %S scope:%v[%d]\n", e.walkgen, dst, e.curfnSym(dst), dstE.Loopdepth) 1713 } 1714 1715 for i := range dstE.Flowsrc { 1716 e.walkgen++ 1717 s := &dstE.Flowsrc[i] 1718 s.parent = nil 1719 e.escwalk(levelFrom(0), dst, s.src, s) 1720 } 1721 } 1722 1723 // funcOutputAndInput reports whether dst and src correspond to output and input parameters of the same function. 1724 func funcOutputAndInput(dst, src *Node) bool { 1725 // Note if dst is marked as escaping, then "returned" is too weak. 1726 return dst.Op == ONAME && dst.Class() == PPARAMOUT && 1727 src.Op == ONAME && src.Class() == PPARAM && src.Name.Curfn == dst.Name.Curfn 1728 } 1729 1730 func (es *EscStep) describe(src *Node) { 1731 if Debug['m'] < 2 { 1732 return 1733 } 1734 step0 := es 1735 for step := step0; step != nil && !step.busy; step = step.parent { 1736 // TODO: We get cycles. Trigger is i = &i (where var i interface{}) 1737 step.busy = true 1738 // The trail is a little odd because of how the 1739 // graph is constructed. The link to the current 1740 // Node is parent.src unless parent is nil in which 1741 // case it is step.dst. 1742 nextDest := step.parent 1743 dst := step.dst 1744 where := step.where 1745 if nextDest != nil { 1746 dst = nextDest.src 1747 } 1748 if where == nil { 1749 where = dst 1750 } 1751 Warnl(src.Pos, "\tfrom %v (%s) at %s", dst, step.why, where.Line()) 1752 } 1753 for step := step0; step != nil && step.busy; step = step.parent { 1754 step.busy = false 1755 } 1756 } 1757 1758 const NOTALOOPDEPTH = -1 1759 1760 func (e *EscState) escwalk(level Level, dst *Node, src *Node, step *EscStep) { 1761 e.escwalkBody(level, dst, src, step, NOTALOOPDEPTH) 1762 } 1763 1764 func (e *EscState) escwalkBody(level Level, dst *Node, src *Node, step *EscStep, extraloopdepth int32) { 1765 if src.Op == OLITERAL { 1766 return 1767 } 1768 srcE := e.nodeEscState(src) 1769 if srcE.Walkgen == e.walkgen { 1770 // Esclevels are vectors, do not compare as integers, 1771 // and must use "min" of old and new to guarantee 1772 // convergence. 1773 level = level.min(srcE.Level) 1774 if level == srcE.Level { 1775 // Have we been here already with an extraloopdepth, 1776 // or is the extraloopdepth provided no improvement on 1777 // what's already been seen? 1778 if srcE.Maxextraloopdepth >= extraloopdepth || srcE.Loopdepth >= extraloopdepth { 1779 return 1780 } 1781 srcE.Maxextraloopdepth = extraloopdepth 1782 } 1783 } else { // srcE.Walkgen < e.walkgen -- first time, reset this. 1784 srcE.Maxextraloopdepth = NOTALOOPDEPTH 1785 } 1786 1787 srcE.Walkgen = e.walkgen 1788 srcE.Level = level 1789 modSrcLoopdepth := srcE.Loopdepth 1790 1791 if extraloopdepth > modSrcLoopdepth { 1792 modSrcLoopdepth = extraloopdepth 1793 } 1794 1795 if Debug['m'] > 2 { 1796 fmt.Printf("escwalk: level:%d depth:%d %.*s op=%v %S(%0j) scope:%v[%d] extraloopdepth=%v\n", 1797 level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", src.Op, src, src, e.curfnSym(src), srcE.Loopdepth, extraloopdepth) 1798 } 1799 1800 e.pdepth++ 1801 1802 // Input parameter flowing to output parameter? 1803 var leaks bool 1804 var osrcesc uint16 // used to prevent duplicate error messages 1805 1806 dstE := e.nodeEscState(dst) 1807 if funcOutputAndInput(dst, src) && src.Esc&EscMask < EscHeap && dst.Esc != EscHeap { 1808 // This case handles: 1809 // 1. return in 1810 // 2. return &in 1811 // 3. tmp := in; return &tmp 1812 // 4. return *in 1813 if Debug['m'] != 0 { 1814 if Debug['m'] <= 2 { 1815 Warnl(src.Pos, "leaking param: %S to result %v level=%v", src, dst.Sym, level.int()) 1816 step.describe(src) 1817 } else { 1818 Warnl(src.Pos, "leaking param: %S to result %v level=%v", src, dst.Sym, level) 1819 } 1820 } 1821 if src.Esc&EscMask != EscReturn { 1822 src.Esc = EscReturn | src.Esc&EscContentEscapes 1823 } 1824 src.Esc = escNoteOutputParamFlow(src.Esc, dst.Name.Vargen, level) 1825 goto recurse 1826 } 1827 1828 // If parameter content escapes to heap, set EscContentEscapes 1829 // Note minor confusion around escape from pointer-to-struct vs escape from struct 1830 if dst.Esc == EscHeap && 1831 src.Op == ONAME && src.Class() == PPARAM && src.Esc&EscMask < EscHeap && 1832 level.int() > 0 { 1833 src.Esc = escMax(EscContentEscapes|src.Esc, EscNone) 1834 if Debug['m'] != 0 { 1835 Warnl(src.Pos, "mark escaped content: %S", src) 1836 step.describe(src) 1837 } 1838 } 1839 1840 leaks = level.int() <= 0 && level.guaranteedDereference() <= 0 && dstE.Loopdepth < modSrcLoopdepth 1841 leaks = leaks || level.int() <= 0 && dst.Esc&EscMask == EscHeap 1842 1843 osrcesc = src.Esc 1844 switch src.Op { 1845 case ONAME: 1846 if src.Class() == PPARAM && (leaks || dstE.Loopdepth < 0) && src.Esc&EscMask < EscHeap { 1847 if level.guaranteedDereference() > 0 { 1848 src.Esc = escMax(EscContentEscapes|src.Esc, EscNone) 1849 if Debug['m'] != 0 { 1850 if Debug['m'] <= 2 { 1851 if osrcesc != src.Esc { 1852 Warnl(src.Pos, "leaking param content: %S", src) 1853 step.describe(src) 1854 } 1855 } else { 1856 Warnl(src.Pos, "leaking param content: %S level=%v dst.eld=%v src.eld=%v dst=%S", 1857 src, level, dstE.Loopdepth, modSrcLoopdepth, dst) 1858 } 1859 } 1860 } else { 1861 src.Esc = EscHeap 1862 if Debug['m'] != 0 { 1863 if Debug['m'] <= 2 { 1864 Warnl(src.Pos, "leaking param: %S", src) 1865 step.describe(src) 1866 } else { 1867 Warnl(src.Pos, "leaking param: %S level=%v dst.eld=%v src.eld=%v dst=%S", 1868 src, level, dstE.Loopdepth, modSrcLoopdepth, dst) 1869 } 1870 } 1871 } 1872 } 1873 1874 // Treat a captured closure variable as equivalent to the 1875 // original variable. 1876 if src.IsClosureVar() { 1877 if leaks && Debug['m'] != 0 { 1878 Warnl(src.Pos, "leaking closure reference %S", src) 1879 step.describe(src) 1880 } 1881 e.escwalk(level, dst, src.Name.Defn, e.stepWalk(dst, src.Name.Defn, "closure-var", step)) 1882 } 1883 1884 case OPTRLIT, OADDR: 1885 why := "pointer literal" 1886 if src.Op == OADDR { 1887 why = "address-of" 1888 } 1889 if leaks { 1890 src.Esc = EscHeap 1891 if Debug['m'] != 0 && osrcesc != src.Esc { 1892 p := src 1893 if p.Left.Op == OCLOSURE { 1894 p = p.Left // merely to satisfy error messages in tests 1895 } 1896 if Debug['m'] > 2 { 1897 Warnl(src.Pos, "%S escapes to heap, level=%v, dst=%v dst.eld=%v, src.eld=%v", 1898 p, level, dst, dstE.Loopdepth, modSrcLoopdepth) 1899 } else { 1900 Warnl(src.Pos, "%S escapes to heap", p) 1901 step.describe(src) 1902 } 1903 } 1904 addrescapes(src.Left) 1905 e.escwalkBody(level.dec(), dst, src.Left, e.stepWalk(dst, src.Left, why, step), modSrcLoopdepth) 1906 extraloopdepth = modSrcLoopdepth // passes to recursive case, seems likely a no-op 1907 } else { 1908 e.escwalk(level.dec(), dst, src.Left, e.stepWalk(dst, src.Left, why, step)) 1909 } 1910 1911 case OAPPEND: 1912 e.escwalk(level, dst, src.List.First(), e.stepWalk(dst, src.List.First(), "append-first-arg", step)) 1913 1914 case ODDDARG: 1915 if leaks { 1916 src.Esc = EscHeap 1917 if Debug['m'] != 0 && osrcesc != src.Esc { 1918 Warnl(src.Pos, "%S escapes to heap", src) 1919 step.describe(src) 1920 } 1921 extraloopdepth = modSrcLoopdepth 1922 } 1923 // similar to a slice arraylit and its args. 1924 level = level.dec() 1925 1926 case OSLICELIT: 1927 for _, elt := range src.List.Slice() { 1928 if elt.Op == OKEY { 1929 elt = elt.Right 1930 } 1931 e.escwalk(level.dec(), dst, elt, e.stepWalk(dst, elt, "slice-literal-element", step)) 1932 } 1933 1934 fallthrough 1935 1936 case OMAKECHAN, 1937 OMAKEMAP, 1938 OMAKESLICE, 1939 OARRAYRUNESTR, 1940 OARRAYBYTESTR, 1941 OSTRARRAYRUNE, 1942 OSTRARRAYBYTE, 1943 OADDSTR, 1944 OMAPLIT, 1945 ONEW, 1946 OCLOSURE, 1947 OCALLPART, 1948 ORUNESTR, 1949 OCONVIFACE: 1950 if leaks { 1951 src.Esc = EscHeap 1952 if Debug['m'] != 0 && osrcesc != src.Esc { 1953 Warnl(src.Pos, "%S escapes to heap", src) 1954 step.describe(src) 1955 } 1956 extraloopdepth = modSrcLoopdepth 1957 } 1958 1959 case ODOT, 1960 ODOTTYPE: 1961 e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "dot", step)) 1962 1963 case 1964 OSLICE, 1965 OSLICEARR, 1966 OSLICE3, 1967 OSLICE3ARR, 1968 OSLICESTR: 1969 e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "slice", step)) 1970 1971 case OINDEX: 1972 if src.Left.Type.IsArray() { 1973 e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "fixed-array-index-of", step)) 1974 break 1975 } 1976 fallthrough 1977 1978 case ODOTPTR: 1979 e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "dot of pointer", step)) 1980 case OINDEXMAP: 1981 e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "map index", step)) 1982 case OIND: 1983 e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "indirection", step)) 1984 1985 // In this case a link went directly to a call, but should really go 1986 // to the dummy .outN outputs that were created for the call that 1987 // themselves link to the inputs with levels adjusted. 1988 // See e.g. #10466 1989 // This can only happen with functions returning a single result. 1990 case OCALLMETH, OCALLFUNC, OCALLINTER: 1991 if srcE.Retval.Len() != 0 { 1992 if Debug['m'] > 2 { 1993 fmt.Printf("%v:[%d] dst %S escwalk replace src: %S with %S\n", 1994 linestr(lineno), e.loopdepth, 1995 dst, src, srcE.Retval.First()) 1996 } 1997 src = srcE.Retval.First() 1998 srcE = e.nodeEscState(src) 1999 } 2000 } 2001 2002 recurse: 2003 level = level.copy() 2004 2005 for i := range srcE.Flowsrc { 2006 s := &srcE.Flowsrc[i] 2007 s.parent = step 2008 e.escwalkBody(level, dst, s.src, s, extraloopdepth) 2009 s.parent = nil 2010 } 2011 2012 e.pdepth-- 2013 } 2014 2015 // addrescapes tags node n as having had its address taken 2016 // by "increasing" the "value" of n.Esc to EscHeap. 2017 // Storage is allocated as necessary to allow the address 2018 // to be taken. 2019 func addrescapes(n *Node) { 2020 switch n.Op { 2021 default: 2022 // Unexpected Op, probably due to a previous type error. Ignore. 2023 2024 case OIND, ODOTPTR: 2025 // Nothing to do. 2026 2027 case ONAME: 2028 if n == nodfp { 2029 break 2030 } 2031 2032 // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping. 2033 // on PPARAM it means something different. 2034 if n.Class() == PAUTO && n.Esc == EscNever { 2035 break 2036 } 2037 2038 // If a closure reference escapes, mark the outer variable as escaping. 2039 if n.IsClosureVar() { 2040 addrescapes(n.Name.Defn) 2041 break 2042 } 2043 2044 if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO { 2045 break 2046 } 2047 2048 // This is a plain parameter or local variable that needs to move to the heap, 2049 // but possibly for the function outside the one we're compiling. 2050 // That is, if we have: 2051 // 2052 // func f(x int) { 2053 // func() { 2054 // global = &x 2055 // } 2056 // } 2057 // 2058 // then we're analyzing the inner closure but we need to move x to the 2059 // heap in f, not in the inner closure. Flip over to f before calling moveToHeap. 2060 oldfn := Curfn 2061 Curfn = n.Name.Curfn 2062 if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE { 2063 Curfn = Curfn.Func.Closure 2064 } 2065 ln := lineno 2066 lineno = Curfn.Pos 2067 moveToHeap(n) 2068 Curfn = oldfn 2069 lineno = ln 2070 2071 // ODOTPTR has already been introduced, 2072 // so these are the non-pointer ODOT and OINDEX. 2073 // In &x[0], if x is a slice, then x does not 2074 // escape--the pointer inside x does, but that 2075 // is always a heap pointer anyway. 2076 case ODOT, OINDEX, OPAREN, OCONVNOP: 2077 if !n.Left.Type.IsSlice() { 2078 addrescapes(n.Left) 2079 } 2080 } 2081 } 2082 2083 // moveToHeap records the parameter or local variable n as moved to the heap. 2084 func moveToHeap(n *Node) { 2085 if Debug['r'] != 0 { 2086 Dump("MOVE", n) 2087 } 2088 if compiling_runtime { 2089 yyerror("%v escapes to heap, not allowed in runtime.", n) 2090 } 2091 if n.Class() == PAUTOHEAP { 2092 Dump("n", n) 2093 Fatalf("double move to heap") 2094 } 2095 2096 // Allocate a local stack variable to hold the pointer to the heap copy. 2097 // temp will add it to the function declaration list automatically. 2098 heapaddr := temp(types.NewPtr(n.Type)) 2099 heapaddr.Sym = lookup("&" + n.Sym.Name) 2100 heapaddr.Orig.Sym = heapaddr.Sym 2101 heapaddr.Pos = n.Pos 2102 2103 // Unset AutoTemp to persist the &foo variable name through SSA to 2104 // liveness analysis. 2105 // TODO(mdempsky/drchase): Cleaner solution? 2106 heapaddr.Name.SetAutoTemp(false) 2107 2108 // Parameters have a local stack copy used at function start/end 2109 // in addition to the copy in the heap that may live longer than 2110 // the function. 2111 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 2112 if n.Xoffset == BADWIDTH { 2113 Fatalf("addrescapes before param assignment") 2114 } 2115 2116 // We rewrite n below to be a heap variable (indirection of heapaddr). 2117 // Preserve a copy so we can still write code referring to the original, 2118 // and substitute that copy into the function declaration list 2119 // so that analyses of the local (on-stack) variables use it. 2120 stackcopy := newname(n.Sym) 2121 stackcopy.SetAddable(false) 2122 stackcopy.Type = n.Type 2123 stackcopy.Xoffset = n.Xoffset 2124 stackcopy.SetClass(n.Class()) 2125 stackcopy.Name.Param.Heapaddr = heapaddr 2126 if n.Class() == PPARAMOUT { 2127 // Make sure the pointer to the heap copy is kept live throughout the function. 2128 // The function could panic at any point, and then a defer could recover. 2129 // Thus, we need the pointer to the heap copy always available so the 2130 // post-deferreturn code can copy the return value back to the stack. 2131 // See issue 16095. 2132 heapaddr.SetIsOutputParamHeapAddr(true) 2133 } 2134 n.Name.Param.Stackcopy = stackcopy 2135 2136 // Substitute the stackcopy into the function variable list so that 2137 // liveness and other analyses use the underlying stack slot 2138 // and not the now-pseudo-variable n. 2139 found := false 2140 for i, d := range Curfn.Func.Dcl { 2141 if d == n { 2142 Curfn.Func.Dcl[i] = stackcopy 2143 found = true 2144 break 2145 } 2146 // Parameters are before locals, so can stop early. 2147 // This limits the search even in functions with many local variables. 2148 if d.Class() == PAUTO { 2149 break 2150 } 2151 } 2152 if !found { 2153 Fatalf("cannot find %v in local variable list", n) 2154 } 2155 Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) 2156 } 2157 2158 // Modify n in place so that uses of n now mean indirection of the heapaddr. 2159 n.SetClass(PAUTOHEAP) 2160 n.Xoffset = 0 2161 n.Name.Param.Heapaddr = heapaddr 2162 n.Esc = EscHeap 2163 if Debug['m'] != 0 { 2164 fmt.Printf("%v: moved to heap: %v\n", n.Line(), n) 2165 } 2166 } 2167 2168 // This special tag is applied to uintptr variables 2169 // that we believe may hold unsafe.Pointers for 2170 // calls into assembly functions. 2171 // It is logically a constant, but using a var 2172 // lets us take the address below to get a *string. 2173 var unsafeUintptrTag = "unsafe-uintptr" 2174 2175 // This special tag is applied to uintptr parameters of functions 2176 // marked go:uintptrescapes. 2177 const uintptrEscapesTag = "uintptr-escapes" 2178 2179 func (e *EscState) esctag(fn *Node) { 2180 fn.Esc = EscFuncTagged 2181 2182 name := func(s *types.Sym, narg int) string { 2183 if s != nil { 2184 return s.Name 2185 } 2186 return fmt.Sprintf("arg#%d", narg) 2187 } 2188 2189 // External functions are assumed unsafe, 2190 // unless //go:noescape is given before the declaration. 2191 if fn.Nbody.Len() == 0 { 2192 if fn.Noescape() { 2193 for _, f := range fn.Type.Params().Fields().Slice() { 2194 if types.Haspointers(f.Type) { 2195 f.Note = mktag(EscNone) 2196 } 2197 } 2198 } 2199 2200 // Assume that uintptr arguments must be held live across the call. 2201 // This is most important for syscall.Syscall. 2202 // See golang.org/issue/13372. 2203 // This really doesn't have much to do with escape analysis per se, 2204 // but we are reusing the ability to annotate an individual function 2205 // argument and pass those annotations along to importing code. 2206 narg := 0 2207 for _, f := range fn.Type.Params().Fields().Slice() { 2208 narg++ 2209 if f.Type.Etype == TUINTPTR { 2210 if Debug['m'] != 0 { 2211 Warnl(fn.Pos, "%v assuming %v is unsafe uintptr", funcSym(fn), name(f.Sym, narg)) 2212 } 2213 f.Note = unsafeUintptrTag 2214 } 2215 } 2216 2217 return 2218 } 2219 2220 if fn.Func.Pragma&UintptrEscapes != 0 { 2221 narg := 0 2222 for _, f := range fn.Type.Params().Fields().Slice() { 2223 narg++ 2224 if f.Type.Etype == TUINTPTR { 2225 if Debug['m'] != 0 { 2226 Warnl(fn.Pos, "%v marking %v as escaping uintptr", funcSym(fn), name(f.Sym, narg)) 2227 } 2228 f.Note = uintptrEscapesTag 2229 } 2230 2231 if f.Isddd() && f.Type.Elem().Etype == TUINTPTR { 2232 // final argument is ...uintptr. 2233 if Debug['m'] != 0 { 2234 Warnl(fn.Pos, "%v marking %v as escaping ...uintptr", funcSym(fn), name(f.Sym, narg)) 2235 } 2236 f.Note = uintptrEscapesTag 2237 } 2238 } 2239 } 2240 2241 for _, ln := range fn.Func.Dcl { 2242 if ln.Op != ONAME { 2243 continue 2244 } 2245 2246 switch ln.Esc & EscMask { 2247 case EscNone, // not touched by escflood 2248 EscReturn: 2249 if types.Haspointers(ln.Type) { // don't bother tagging for scalars 2250 if ln.Name.Param.Field.Note != uintptrEscapesTag { 2251 ln.Name.Param.Field.Note = mktag(int(ln.Esc)) 2252 } 2253 } 2254 2255 case EscHeap: // touched by escflood, moved to heap 2256 } 2257 } 2258 2259 // Unnamed parameters are unused and therefore do not escape. 2260 // (Unnamed parameters are not in the Dcl list in the loop above 2261 // so we need to mark them separately.) 2262 for _, f := range fn.Type.Params().Fields().Slice() { 2263 if f.Sym == nil || f.Sym.IsBlank() { 2264 f.Note = mktag(EscNone) 2265 } 2266 } 2267 }