github.com/goproxy0/go@v0.0.0-20171111080102-49cc0c489d2c/src/cmd/compile/internal/gc/esc.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "fmt" 10 "strconv" 11 "strings" 12 ) 13 14 // Run analysis on minimal sets of mutually recursive functions 15 // or single non-recursive functions, bottom up. 16 // 17 // Finding these sets is finding strongly connected components 18 // by reverse topological order in the static call graph. 19 // The algorithm (known as Tarjan's algorithm) for doing that is taken from 20 // Sedgewick, Algorithms, Second Edition, p. 482, with two adaptations. 21 // 22 // First, a hidden closure function (n.Func.IsHiddenClosure()) cannot be the 23 // root of a connected component. Refusing to use it as a root 24 // forces it into the component of the function in which it appears. 25 // This is more convenient for escape analysis. 26 // 27 // Second, each function becomes two virtual nodes in the graph, 28 // with numbers n and n+1. We record the function's node number as n 29 // but search from node n+1. If the search tells us that the component 30 // number (min) is n+1, we know that this is a trivial component: one function 31 // plus its closures. If the search tells us that the component number is 32 // n, then there was a path from node n+1 back to node n, meaning that 33 // the function set is mutually recursive. The escape analysis can be 34 // more precise when analyzing a single non-recursive function than 35 // when analyzing a set of mutually recursive functions. 36 37 type bottomUpVisitor struct { 38 analyze func([]*Node, bool) 39 visitgen uint32 40 nodeID map[*Node]uint32 41 stack []*Node 42 } 43 44 // visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list. 45 // It calls analyze with successive groups of functions, working from 46 // the bottom of the call graph upward. Each time analyze is called with 47 // a list of functions, every function on that list only calls other functions 48 // on the list or functions that have been passed in previous invocations of 49 // analyze. Closures appear in the same list as their outer functions. 50 // The lists are as short as possible while preserving those requirements. 51 // (In a typical program, many invocations of analyze will be passed just 52 // a single function.) The boolean argument 'recursive' passed to analyze 53 // specifies whether the functions on the list are mutually recursive. 54 // If recursive is false, the list consists of only a single function and its closures. 55 // If recursive is true, the list may still contain only a single function, 56 // if that function is itself recursive. 57 func visitBottomUp(list []*Node, analyze func(list []*Node, recursive bool)) { 58 var v bottomUpVisitor 59 v.analyze = analyze 60 v.nodeID = make(map[*Node]uint32) 61 for _, n := range list { 62 if n.Op == ODCLFUNC && !n.Func.IsHiddenClosure() { 63 v.visit(n) 64 } 65 } 66 } 67 68 func (v *bottomUpVisitor) visit(n *Node) uint32 { 69 if id := v.nodeID[n]; id > 0 { 70 // already visited 71 return id 72 } 73 74 v.visitgen++ 75 id := v.visitgen 76 v.nodeID[n] = id 77 v.visitgen++ 78 min := v.visitgen 79 80 v.stack = append(v.stack, n) 81 min = v.visitcodelist(n.Nbody, min) 82 if (min == id || min == id+1) && !n.Func.IsHiddenClosure() { 83 // This node is the root of a strongly connected component. 84 85 // The original min passed to visitcodelist was v.nodeID[n]+1. 86 // If visitcodelist found its way back to v.nodeID[n], then this 87 // block is a set of mutually recursive functions. 88 // Otherwise it's just a lone function that does not recurse. 89 recursive := min == id 90 91 // Remove connected component from stack. 92 // Mark walkgen so that future visits return a large number 93 // so as not to affect the caller's min. 94 95 var i int 96 for i = len(v.stack) - 1; i >= 0; i-- { 97 x := v.stack[i] 98 if x == n { 99 break 100 } 101 v.nodeID[x] = ^uint32(0) 102 } 103 v.nodeID[n] = ^uint32(0) 104 block := v.stack[i:] 105 // Run escape analysis on this set of functions. 106 v.stack = v.stack[:i] 107 v.analyze(block, recursive) 108 } 109 110 return min 111 } 112 113 func (v *bottomUpVisitor) visitcodelist(l Nodes, min uint32) uint32 { 114 for _, n := range l.Slice() { 115 min = v.visitcode(n, min) 116 } 117 return min 118 } 119 120 func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 { 121 if n == nil { 122 return min 123 } 124 125 min = v.visitcodelist(n.Ninit, min) 126 min = v.visitcode(n.Left, min) 127 min = v.visitcode(n.Right, min) 128 min = v.visitcodelist(n.List, min) 129 min = v.visitcodelist(n.Nbody, min) 130 min = v.visitcodelist(n.Rlist, min) 131 132 switch n.Op { 133 case OCALLFUNC, OCALLMETH: 134 fn := asNode(n.Left.Type.Nname()) 135 if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil { 136 m := v.visit(fn.Name.Defn) 137 if m < min { 138 min = m 139 } 140 } 141 142 case OCLOSURE: 143 m := v.visit(n.Func.Closure) 144 if m < min { 145 min = m 146 } 147 } 148 149 return min 150 } 151 152 // Escape analysis. 153 154 // An escape analysis pass for a set of functions. 155 // The analysis assumes that closures and the functions in which they 156 // appear are analyzed together, so that the aliasing between their 157 // variables can be modeled more precisely. 158 // 159 // First escfunc, esc and escassign recurse over the ast of each 160 // function to dig out flow(dst,src) edges between any 161 // pointer-containing nodes and store them in e.nodeEscState(dst).Flowsrc. For 162 // variables assigned to a variable in an outer scope or used as a 163 // return value, they store a flow(theSink, src) edge to a fake node 164 // 'the Sink'. For variables referenced in closures, an edge 165 // flow(closure, &var) is recorded and the flow of a closure itself to 166 // an outer scope is tracked the same way as other variables. 167 // 168 // Then escflood walks the graph starting at theSink and tags all 169 // variables of it can reach an & node as escaping and all function 170 // parameters it can reach as leaking. 171 // 172 // If a value's address is taken but the address does not escape, 173 // then the value can stay on the stack. If the value new(T) does 174 // not escape, then new(T) can be rewritten into a stack allocation. 175 // The same is true of slice literals. 176 // 177 // If optimizations are disabled (-N), this code is not used. 178 // Instead, the compiler assumes that any value whose address 179 // is taken without being immediately dereferenced 180 // needs to be moved to the heap, and new(T) and slice 181 // literals are always real allocations. 182 183 func escapes(all []*Node) { 184 visitBottomUp(all, escAnalyze) 185 } 186 187 const ( 188 EscFuncUnknown = 0 + iota 189 EscFuncPlanned 190 EscFuncStarted 191 EscFuncTagged 192 ) 193 194 // There appear to be some loops in the escape graph, causing 195 // arbitrary recursion into deeper and deeper levels. 196 // Cut this off safely by making minLevel sticky: once you 197 // get that deep, you cannot go down any further but you also 198 // cannot go up any further. This is a conservative fix. 199 // Making minLevel smaller (more negative) would handle more 200 // complex chains of indirections followed by address-of operations, 201 // at the cost of repeating the traversal once for each additional 202 // allowed level when a loop is encountered. Using -2 suffices to 203 // pass all the tests we have written so far, which we assume matches 204 // the level of complexity we want the escape analysis code to handle. 205 const MinLevel = -2 206 207 // A Level encodes the reference state and context applied to 208 // (stack, heap) allocated memory. 209 // 210 // value is the overall sum of *(1) and &(-1) operations encountered 211 // along a path from a destination (sink, return value) to a source 212 // (allocation, parameter). 213 // 214 // suffixValue is the maximum-copy-started-suffix-level applied to a sink. 215 // For example: 216 // sink = x.left.left --> level=2, x is dereferenced twice and does not escape to sink. 217 // sink = &Node{x} --> level=-1, x is accessible from sink via one "address of" 218 // sink = &Node{&Node{x}} --> level=-2, x is accessible from sink via two "address of" 219 // sink = &Node{&Node{x.left}} --> level=-1, but x is NOT accessible from sink because it was indirected and then copied. 220 // (The copy operations are sometimes implicit in the source code; in this case, 221 // value of x.left was copied into a field of a newly allocated Node) 222 // 223 // There's one of these for each Node, and the integer values 224 // rarely exceed even what can be stored in 4 bits, never mind 8. 225 type Level struct { 226 value, suffixValue int8 227 } 228 229 func (l Level) int() int { 230 return int(l.value) 231 } 232 233 func levelFrom(i int) Level { 234 if i <= MinLevel { 235 return Level{value: MinLevel} 236 } 237 return Level{value: int8(i)} 238 } 239 240 func satInc8(x int8) int8 { 241 if x == 127 { 242 return 127 243 } 244 return x + 1 245 } 246 247 func min8(a, b int8) int8 { 248 if a < b { 249 return a 250 } 251 return b 252 } 253 254 func max8(a, b int8) int8 { 255 if a > b { 256 return a 257 } 258 return b 259 } 260 261 // inc returns the level l + 1, representing the effect of an indirect (*) operation. 262 func (l Level) inc() Level { 263 if l.value <= MinLevel { 264 return Level{value: MinLevel} 265 } 266 return Level{value: satInc8(l.value), suffixValue: satInc8(l.suffixValue)} 267 } 268 269 // dec returns the level l - 1, representing the effect of an address-of (&) operation. 270 func (l Level) dec() Level { 271 if l.value <= MinLevel { 272 return Level{value: MinLevel} 273 } 274 return Level{value: l.value - 1, suffixValue: l.suffixValue - 1} 275 } 276 277 // copy returns the level for a copy of a value with level l. 278 func (l Level) copy() Level { 279 return Level{value: l.value, suffixValue: max8(l.suffixValue, 0)} 280 } 281 282 func (l1 Level) min(l2 Level) Level { 283 return Level{ 284 value: min8(l1.value, l2.value), 285 suffixValue: min8(l1.suffixValue, l2.suffixValue)} 286 } 287 288 // guaranteedDereference returns the number of dereferences 289 // applied to a pointer before addresses are taken/generated. 290 // This is the maximum level computed from path suffixes starting 291 // with copies where paths flow from destination to source. 292 func (l Level) guaranteedDereference() int { 293 return int(l.suffixValue) 294 } 295 296 // An EscStep documents one step in the path from memory 297 // that is heap allocated to the (alleged) reason for the 298 // heap allocation. 299 type EscStep struct { 300 src, dst *Node // the endpoints of this edge in the escape-to-heap chain. 301 where *Node // sometimes the endpoints don't match source locations; set 'where' to make that right 302 parent *EscStep // used in flood to record path 303 why string // explanation for this step in the escape-to-heap chain 304 busy bool // used in prevent to snip cycles. 305 } 306 307 type NodeEscState struct { 308 Curfn *Node 309 Flowsrc []EscStep // flow(this, src) 310 Retval Nodes // on OCALLxxx, list of dummy return values 311 Loopdepth int32 // -1: global, 0: return variables, 1:function top level, increased inside function for every loop or label to mark scopes 312 Level Level 313 Walkgen uint32 314 Maxextraloopdepth int32 315 } 316 317 func (e *EscState) nodeEscState(n *Node) *NodeEscState { 318 if nE, ok := n.Opt().(*NodeEscState); ok { 319 return nE 320 } 321 if n.Opt() != nil { 322 Fatalf("nodeEscState: opt in use (%T)", n.Opt()) 323 } 324 nE := &NodeEscState{ 325 Curfn: Curfn, 326 } 327 n.SetOpt(nE) 328 e.opts = append(e.opts, n) 329 return nE 330 } 331 332 func (e *EscState) track(n *Node) { 333 if Curfn == nil { 334 Fatalf("EscState.track: Curfn nil") 335 } 336 n.Esc = EscNone // until proven otherwise 337 nE := e.nodeEscState(n) 338 nE.Loopdepth = e.loopdepth 339 e.noesc = append(e.noesc, n) 340 } 341 342 // Escape constants are numbered in order of increasing "escapiness" 343 // to help make inferences be monotonic. With the exception of 344 // EscNever which is sticky, eX < eY means that eY is more exposed 345 // than eX, and hence replaces it in a conservative analysis. 346 const ( 347 EscUnknown = iota 348 EscNone // Does not escape to heap, result, or parameters. 349 EscReturn // Is returned or reachable from returned. 350 EscHeap // Reachable from the heap 351 EscNever // By construction will not escape. 352 EscBits = 3 353 EscMask = (1 << EscBits) - 1 354 EscContentEscapes = 1 << EscBits // value obtained by indirect of parameter escapes to heap 355 EscReturnBits = EscBits + 1 356 // Node.esc encoding = | escapeReturnEncoding:(width-4) | contentEscapes:1 | escEnum:3 357 ) 358 359 // escMax returns the maximum of an existing escape value 360 // (and its additional parameter flow flags) and a new escape type. 361 func escMax(e, etype uint16) uint16 { 362 if e&EscMask >= EscHeap { 363 // normalize 364 if e&^EscMask != 0 { 365 Fatalf("Escape information had unexpected return encoding bits (w/ EscHeap, EscNever), e&EscMask=%v", e&EscMask) 366 } 367 } 368 if e&EscMask > etype { 369 return e 370 } 371 if etype == EscNone || etype == EscReturn { 372 return (e &^ EscMask) | etype 373 } 374 return etype 375 } 376 377 // For each input parameter to a function, the escapeReturnEncoding describes 378 // how the parameter may leak to the function's outputs. This is currently the 379 // "level" of the leak where level is 0 or larger (negative level means stored into 380 // something whose address is returned -- but that implies stored into the heap, 381 // hence EscHeap, which means that the details are not currently relevant. ) 382 const ( 383 bitsPerOutputInTag = 3 // For each output, the number of bits for a tag 384 bitsMaskForTag = uint16(1<<bitsPerOutputInTag) - 1 // The bit mask to extract a single tag. 385 maxEncodedLevel = int(bitsMaskForTag - 1) // The largest level that can be stored in a tag. 386 ) 387 388 type EscState struct { 389 // Fake node that all 390 // - return values and output variables 391 // - parameters on imported functions not marked 'safe' 392 // - assignments to global variables 393 // flow to. 394 theSink Node 395 396 dsts []*Node // all dst nodes 397 loopdepth int32 // for detecting nested loop scopes 398 pdepth int // for debug printing in recursions. 399 dstcount int // diagnostic 400 edgecount int // diagnostic 401 noesc []*Node // list of possible non-escaping nodes, for printing 402 recursive bool // recursive function or group of mutually recursive functions. 403 opts []*Node // nodes with .Opt initialized 404 walkgen uint32 405 } 406 407 func newEscState(recursive bool) *EscState { 408 e := new(EscState) 409 e.theSink.Op = ONAME 410 e.theSink.Orig = &e.theSink 411 e.theSink.SetClass(PEXTERN) 412 e.theSink.Sym = lookup(".sink") 413 e.nodeEscState(&e.theSink).Loopdepth = -1 414 e.recursive = recursive 415 return e 416 } 417 418 func (e *EscState) stepWalk(dst, src *Node, why string, parent *EscStep) *EscStep { 419 // TODO: keep a cache of these, mark entry/exit in escwalk to avoid allocation 420 // Or perhaps never mind, since it is disabled unless printing is on. 421 // We may want to revisit this, since the EscStep nodes would make 422 // an excellent replacement for the poorly-separated graph-build/graph-flood 423 // stages. 424 if Debug['m'] == 0 { 425 return nil 426 } 427 return &EscStep{src: src, dst: dst, why: why, parent: parent} 428 } 429 430 func (e *EscState) stepAssign(step *EscStep, dst, src *Node, why string) *EscStep { 431 if Debug['m'] == 0 { 432 return nil 433 } 434 if step != nil { // Caller may have known better. 435 if step.why == "" { 436 step.why = why 437 } 438 if step.dst == nil { 439 step.dst = dst 440 } 441 if step.src == nil { 442 step.src = src 443 } 444 return step 445 } 446 return &EscStep{src: src, dst: dst, why: why} 447 } 448 449 func (e *EscState) stepAssignWhere(dst, src *Node, why string, where *Node) *EscStep { 450 if Debug['m'] == 0 { 451 return nil 452 } 453 return &EscStep{src: src, dst: dst, why: why, where: where} 454 } 455 456 // funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way. 457 func funcSym(fn *Node) *types.Sym { 458 if fn == nil || fn.Func.Nname == nil { 459 return nil 460 } 461 return fn.Func.Nname.Sym 462 } 463 464 // curfnSym returns n.Curfn.Nname.Sym if no nils are encountered along the way. 465 func (e *EscState) curfnSym(n *Node) *types.Sym { 466 nE := e.nodeEscState(n) 467 return funcSym(nE.Curfn) 468 } 469 470 func escAnalyze(all []*Node, recursive bool) { 471 e := newEscState(recursive) 472 473 for _, n := range all { 474 if n.Op == ODCLFUNC { 475 n.Esc = EscFuncPlanned 476 if Debug['m'] > 3 { 477 Dump("escAnalyze", n) 478 } 479 480 } 481 } 482 483 // flow-analyze functions 484 for _, n := range all { 485 if n.Op == ODCLFUNC { 486 e.escfunc(n) 487 } 488 } 489 490 // print("escapes: %d e.dsts, %d edges\n", e.dstcount, e.edgecount); 491 492 // visit the upstream of each dst, mark address nodes with 493 // addrescapes, mark parameters unsafe 494 escapes := make([]uint16, len(e.dsts)) 495 for i, n := range e.dsts { 496 escapes[i] = n.Esc 497 } 498 for _, n := range e.dsts { 499 e.escflood(n) 500 } 501 for { 502 done := true 503 for i, n := range e.dsts { 504 if n.Esc != escapes[i] { 505 done = false 506 if Debug['m'] > 2 { 507 Warnl(n.Pos, "Reflooding %v %S", e.curfnSym(n), n) 508 } 509 escapes[i] = n.Esc 510 e.escflood(n) 511 } 512 } 513 if done { 514 break 515 } 516 } 517 518 // for all top level functions, tag the typenodes corresponding to the param nodes 519 for _, n := range all { 520 if n.Op == ODCLFUNC { 521 e.esctag(n) 522 } 523 } 524 525 if Debug['m'] != 0 { 526 for _, n := range e.noesc { 527 if n.Esc == EscNone { 528 Warnl(n.Pos, "%v %S does not escape", e.curfnSym(n), n) 529 } 530 } 531 } 532 533 for _, x := range e.opts { 534 x.SetOpt(nil) 535 } 536 } 537 538 func (e *EscState) escfunc(fn *Node) { 539 // print("escfunc %N %s\n", fn.Func.Nname, e.recursive?"(recursive)":""); 540 if fn.Esc != EscFuncPlanned { 541 Fatalf("repeat escfunc %v", fn.Func.Nname) 542 } 543 fn.Esc = EscFuncStarted 544 545 saveld := e.loopdepth 546 e.loopdepth = 1 547 savefn := Curfn 548 Curfn = fn 549 550 for _, ln := range Curfn.Func.Dcl { 551 if ln.Op != ONAME { 552 continue 553 } 554 lnE := e.nodeEscState(ln) 555 switch ln.Class() { 556 // out params are in a loopdepth between the sink and all local variables 557 case PPARAMOUT: 558 lnE.Loopdepth = 0 559 560 case PPARAM: 561 lnE.Loopdepth = 1 562 if ln.Type != nil && !types.Haspointers(ln.Type) { 563 break 564 } 565 if Curfn.Nbody.Len() == 0 && !Curfn.Noescape() { 566 ln.Esc = EscHeap 567 } else { 568 ln.Esc = EscNone // prime for escflood later 569 } 570 e.noesc = append(e.noesc, ln) 571 } 572 } 573 574 // in a mutually recursive group we lose track of the return values 575 if e.recursive { 576 for _, ln := range Curfn.Func.Dcl { 577 if ln.Op == ONAME && ln.Class() == PPARAMOUT { 578 e.escflows(&e.theSink, ln, e.stepAssign(nil, ln, ln, "returned from recursive function")) 579 } 580 } 581 } 582 583 e.escloopdepthlist(Curfn.Nbody) 584 e.esclist(Curfn.Nbody, Curfn) 585 Curfn = savefn 586 e.loopdepth = saveld 587 } 588 589 // Mark labels that have no backjumps to them as not increasing e.loopdepth. 590 // Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat 591 // and set it to one of the following two. Then in esc we'll clear it again. 592 var ( 593 looping Node 594 nonlooping Node 595 ) 596 597 func (e *EscState) escloopdepthlist(l Nodes) { 598 for _, n := range l.Slice() { 599 e.escloopdepth(n) 600 } 601 } 602 603 func (e *EscState) escloopdepth(n *Node) { 604 if n == nil { 605 return 606 } 607 608 e.escloopdepthlist(n.Ninit) 609 610 switch n.Op { 611 case OLABEL: 612 if n.Left == nil || n.Left.Sym == nil { 613 Fatalf("esc:label without label: %+v", n) 614 } 615 616 // Walk will complain about this label being already defined, but that's not until 617 // after escape analysis. in the future, maybe pull label & goto analysis out of walk and put before esc 618 // if(n.Left.Sym.Label != nil) 619 // fatal("escape analysis messed up analyzing label: %+N", n); 620 n.Left.Sym.Label = asTypesNode(&nonlooping) 621 622 case OGOTO: 623 if n.Left == nil || n.Left.Sym == nil { 624 Fatalf("esc:goto without label: %+v", n) 625 } 626 627 // If we come past one that's uninitialized, this must be a (harmless) forward jump 628 // but if it's set to nonlooping the label must have preceded this goto. 629 if asNode(n.Left.Sym.Label) == &nonlooping { 630 n.Left.Sym.Label = asTypesNode(&looping) 631 } 632 } 633 634 e.escloopdepth(n.Left) 635 e.escloopdepth(n.Right) 636 e.escloopdepthlist(n.List) 637 e.escloopdepthlist(n.Nbody) 638 e.escloopdepthlist(n.Rlist) 639 } 640 641 func (e *EscState) esclist(l Nodes, parent *Node) { 642 for _, n := range l.Slice() { 643 e.esc(n, parent) 644 } 645 } 646 647 func (e *EscState) esc(n *Node, parent *Node) { 648 if n == nil { 649 return 650 } 651 652 lno := setlineno(n) 653 654 // ninit logically runs at a different loopdepth than the rest of the for loop. 655 e.esclist(n.Ninit, n) 656 657 if n.Op == OFOR || n.Op == OFORUNTIL || n.Op == ORANGE { 658 e.loopdepth++ 659 } 660 661 // type switch variables have no ODCL. 662 // process type switch as declaration. 663 // must happen before processing of switch body, 664 // so before recursion. 665 if n.Op == OSWITCH && n.Left != nil && n.Left.Op == OTYPESW { 666 for _, cas := range n.List.Slice() { // cases 667 // it.N().Rlist is the variable per case 668 if cas.Rlist.Len() != 0 { 669 e.nodeEscState(cas.Rlist.First()).Loopdepth = e.loopdepth 670 } 671 } 672 } 673 674 // Big stuff escapes unconditionally 675 // "Big" conditions that were scattered around in walk have been gathered here 676 if n.Esc != EscHeap && n.Type != nil && 677 (n.Type.Width > maxStackVarSize || 678 (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= 1<<16 || 679 n.Op == OMAKESLICE && !isSmallMakeSlice(n)) { 680 if Debug['m'] > 2 { 681 Warnl(n.Pos, "%v is too large for stack", n) 682 } 683 n.Esc = EscHeap 684 addrescapes(n) 685 e.escassignSinkWhy(n, n, "too large for stack") // TODO category: tooLarge 686 } 687 688 e.esc(n.Left, n) 689 e.esc(n.Right, n) 690 e.esclist(n.Nbody, n) 691 e.esclist(n.List, n) 692 e.esclist(n.Rlist, n) 693 694 if n.Op == OFOR || n.Op == OFORUNTIL || n.Op == ORANGE { 695 e.loopdepth-- 696 } 697 698 if Debug['m'] > 2 { 699 fmt.Printf("%v:[%d] %v esc: %v\n", linestr(lineno), e.loopdepth, funcSym(Curfn), n) 700 } 701 702 switch n.Op { 703 // Record loop depth at declaration. 704 case ODCL: 705 if n.Left != nil { 706 e.nodeEscState(n.Left).Loopdepth = e.loopdepth 707 } 708 709 case OLABEL: 710 if asNode(n.Left.Sym.Label) == &nonlooping { 711 if Debug['m'] > 2 { 712 fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n) 713 } 714 } else if asNode(n.Left.Sym.Label) == &looping { 715 if Debug['m'] > 2 { 716 fmt.Printf("%v: %v looping label\n", linestr(lineno), n) 717 } 718 e.loopdepth++ 719 } 720 721 // See case OLABEL in escloopdepth above 722 // else if(n.Left.Sym.Label == nil) 723 // fatal("escape analysis missed or messed up a label: %+N", n); 724 725 n.Left.Sym.Label = nil 726 727 case ORANGE: 728 if n.List.Len() >= 2 { 729 // Everything but fixed array is a dereference. 730 731 // If fixed array is really the address of fixed array, 732 // it is also a dereference, because it is implicitly 733 // dereferenced (see #12588) 734 if n.Type.IsArray() && 735 !(n.Right.Type.IsPtr() && eqtype(n.Right.Type.Elem(), n.Type)) { 736 e.escassignWhyWhere(n.List.Second(), n.Right, "range", n) 737 } else { 738 e.escassignDereference(n.List.Second(), n.Right, e.stepAssignWhere(n.List.Second(), n.Right, "range-deref", n)) 739 } 740 } 741 742 case OSWITCH: 743 if n.Left != nil && n.Left.Op == OTYPESW { 744 for _, cas := range n.List.Slice() { 745 // cases 746 // n.Left.Right is the argument of the .(type), 747 // it.N().Rlist is the variable per case 748 if cas.Rlist.Len() != 0 { 749 e.escassignWhyWhere(cas.Rlist.First(), n.Left.Right, "switch case", n) 750 } 751 } 752 } 753 754 // Filter out the following special case. 755 // 756 // func (b *Buffer) Foo() { 757 // n, m := ... 758 // b.buf = b.buf[n:m] 759 // } 760 // 761 // This assignment is a no-op for escape analysis, 762 // it does not store any new pointers into b that were not already there. 763 // However, without this special case b will escape, because we assign to OIND/ODOTPTR. 764 case OAS, OASOP: 765 if (n.Left.Op == OIND || n.Left.Op == ODOTPTR) && n.Left.Left.Op == ONAME && // dst is ONAME dereference 766 (n.Right.Op == OSLICE || n.Right.Op == OSLICE3 || n.Right.Op == OSLICESTR) && // src is slice operation 767 (n.Right.Left.Op == OIND || n.Right.Left.Op == ODOTPTR) && n.Right.Left.Left.Op == ONAME && // slice is applied to ONAME dereference 768 n.Left.Left == n.Right.Left.Left { // dst and src reference the same base ONAME 769 770 // Here we also assume that the statement will not contain calls, 771 // that is, that order will move any calls to init. 772 // Otherwise base ONAME value could change between the moments 773 // when we evaluate it for dst and for src. 774 // 775 // Note, this optimization does not apply to OSLICEARR, 776 // because it does introduce a new pointer into b that was not already there 777 // (pointer to b itself). After such assignment, if b contents escape, 778 // b escapes as well. If we ignore such OSLICEARR, we will conclude 779 // that b does not escape when b contents do. 780 if Debug['m'] != 0 { 781 Warnl(n.Pos, "%v ignoring self-assignment to %S", e.curfnSym(n), n.Left) 782 } 783 784 break 785 } 786 787 e.escassign(n.Left, n.Right, e.stepAssignWhere(nil, nil, "", n)) 788 789 case OAS2: // x,y = a,b 790 if n.List.Len() == n.Rlist.Len() { 791 rs := n.Rlist.Slice() 792 for i, n := range n.List.Slice() { 793 e.escassignWhyWhere(n, rs[i], "assign-pair", n) 794 } 795 } 796 797 case OAS2RECV: // v, ok = <-ch 798 e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-receive", n) 799 case OAS2MAPR: // v, ok = m[k] 800 e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-mapr", n) 801 case OAS2DOTTYPE: // v, ok = x.(type) 802 e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-dot-type", n) 803 804 case OSEND: // ch <- x 805 e.escassignSinkWhy(n, n.Right, "send") 806 807 case ODEFER: 808 if e.loopdepth == 1 { // top level 809 break 810 } 811 // arguments leak out of scope 812 // TODO: leak to a dummy node instead 813 // defer f(x) - f and x escape 814 e.escassignSinkWhy(n, n.Left.Left, "defer func") 815 e.escassignSinkWhy(n, n.Left.Right, "defer func ...") // ODDDARG for call 816 for _, arg := range n.Left.List.Slice() { 817 e.escassignSinkWhy(n, arg, "defer func arg") 818 } 819 820 case OPROC: 821 // go f(x) - f and x escape 822 e.escassignSinkWhy(n, n.Left.Left, "go func") 823 e.escassignSinkWhy(n, n.Left.Right, "go func ...") // ODDDARG for call 824 for _, arg := range n.Left.List.Slice() { 825 e.escassignSinkWhy(n, arg, "go func arg") 826 } 827 828 case OCALLMETH, OCALLFUNC, OCALLINTER: 829 e.esccall(n, parent) 830 831 // esccall already done on n.Rlist.First(). tie it's Retval to n.List 832 case OAS2FUNC: // x,y = f() 833 rs := e.nodeEscState(n.Rlist.First()).Retval.Slice() 834 for i, n := range n.List.Slice() { 835 if i >= len(rs) { 836 break 837 } 838 e.escassignWhyWhere(n, rs[i], "assign-pair-func-call", n) 839 } 840 if n.List.Len() != len(rs) { 841 Fatalf("esc oas2func") 842 } 843 844 case ORETURN: 845 retList := n.List 846 if retList.Len() == 1 && Curfn.Type.NumResults() > 1 { 847 // OAS2FUNC in disguise 848 // esccall already done on n.List.First() 849 // tie e.nodeEscState(n.List.First()).Retval to Curfn.Func.Dcl PPARAMOUT's 850 retList = e.nodeEscState(n.List.First()).Retval 851 } 852 853 i := 0 854 for _, lrn := range Curfn.Func.Dcl { 855 if i >= retList.Len() { 856 break 857 } 858 if lrn.Op != ONAME || lrn.Class() != PPARAMOUT { 859 continue 860 } 861 e.escassignWhyWhere(lrn, retList.Index(i), "return", n) 862 i++ 863 } 864 865 if i < retList.Len() { 866 Fatalf("esc return list") 867 } 868 869 // Argument could leak through recover. 870 case OPANIC: 871 e.escassignSinkWhy(n, n.Left, "panic") 872 873 case OAPPEND: 874 if !n.Isddd() { 875 for _, nn := range n.List.Slice()[1:] { 876 e.escassignSinkWhy(n, nn, "appended to slice") // lose track of assign to dereference 877 } 878 } else { 879 // append(slice1, slice2...) -- slice2 itself does not escape, but contents do. 880 slice2 := n.List.Second() 881 e.escassignDereference(&e.theSink, slice2, e.stepAssignWhere(n, slice2, "appended slice...", n)) // lose track of assign of dereference 882 if Debug['m'] > 3 { 883 Warnl(n.Pos, "%v special treatment of append(slice1, slice2...) %S", e.curfnSym(n), n) 884 } 885 } 886 e.escassignDereference(&e.theSink, n.List.First(), e.stepAssignWhere(n, n.List.First(), "appendee slice", n)) // The original elements are now leaked, too 887 888 case OCOPY: 889 e.escassignDereference(&e.theSink, n.Right, e.stepAssignWhere(n, n.Right, "copied slice", n)) // lose track of assign of dereference 890 891 case OCONV, OCONVNOP: 892 e.escassignWhyWhere(n, n.Left, "converted", n) 893 894 case OCONVIFACE: 895 e.track(n) 896 e.escassignWhyWhere(n, n.Left, "interface-converted", n) 897 898 case OARRAYLIT: 899 // Link values to array 900 for _, elt := range n.List.Slice() { 901 if elt.Op == OKEY { 902 elt = elt.Right 903 } 904 e.escassign(n, elt, e.stepAssignWhere(n, elt, "array literal element", n)) 905 } 906 907 case OSLICELIT: 908 // Slice is not leaked until proven otherwise 909 e.track(n) 910 // Link values to slice 911 for _, elt := range n.List.Slice() { 912 if elt.Op == OKEY { 913 elt = elt.Right 914 } 915 e.escassign(n, elt, e.stepAssignWhere(n, elt, "slice literal element", n)) 916 } 917 918 // Link values to struct. 919 case OSTRUCTLIT: 920 for _, elt := range n.List.Slice() { 921 e.escassignWhyWhere(n, elt.Left, "struct literal element", n) 922 } 923 924 case OPTRLIT: 925 e.track(n) 926 927 // Link OSTRUCTLIT to OPTRLIT; if OPTRLIT escapes, OSTRUCTLIT elements do too. 928 e.escassignWhyWhere(n, n.Left, "pointer literal [assign]", n) 929 930 case OCALLPART: 931 e.track(n) 932 933 // Contents make it to memory, lose track. 934 e.escassignSinkWhy(n, n.Left, "call part") 935 936 case OMAPLIT: 937 e.track(n) 938 // Keys and values make it to memory, lose track. 939 for _, elt := range n.List.Slice() { 940 e.escassignSinkWhy(n, elt.Left, "map literal key") 941 e.escassignSinkWhy(n, elt.Right, "map literal value") 942 } 943 944 case OCLOSURE: 945 // Link addresses of captured variables to closure. 946 for _, v := range n.Func.Cvars.Slice() { 947 if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs 948 continue 949 } 950 a := v.Name.Defn 951 if !v.Name.Byval() { 952 a = nod(OADDR, a, nil) 953 a.Pos = v.Pos 954 e.nodeEscState(a).Loopdepth = e.loopdepth 955 a = typecheck(a, Erv) 956 } 957 958 e.escassignWhyWhere(n, a, "captured by a closure", n) 959 } 960 fallthrough 961 962 case OMAKECHAN, 963 OMAKEMAP, 964 OMAKESLICE, 965 ONEW, 966 OARRAYRUNESTR, 967 OARRAYBYTESTR, 968 OSTRARRAYRUNE, 969 OSTRARRAYBYTE, 970 ORUNESTR: 971 e.track(n) 972 973 case OADDSTR: 974 e.track(n) 975 // Arguments of OADDSTR do not escape. 976 977 case OADDR: 978 // current loop depth is an upper bound on actual loop depth 979 // of addressed value. 980 e.track(n) 981 982 // for &x, use loop depth of x if known. 983 // it should always be known, but if not, be conservative 984 // and keep the current loop depth. 985 if n.Left.Op == ONAME { 986 switch n.Left.Class() { 987 case PAUTO: 988 nE := e.nodeEscState(n) 989 leftE := e.nodeEscState(n.Left) 990 if leftE.Loopdepth != 0 { 991 nE.Loopdepth = leftE.Loopdepth 992 } 993 994 // PPARAM is loop depth 1 always. 995 // PPARAMOUT is loop depth 0 for writes 996 // but considered loop depth 1 for address-of, 997 // so that writing the address of one result 998 // to another (or the same) result makes the 999 // first result move to the heap. 1000 case PPARAM, PPARAMOUT: 1001 nE := e.nodeEscState(n) 1002 nE.Loopdepth = 1 1003 } 1004 } 1005 } 1006 1007 lineno = lno 1008 } 1009 1010 // escassignWhyWhere bundles a common case of 1011 // escassign(e, dst, src, e.stepAssignWhere(dst, src, reason, where)) 1012 func (e *EscState) escassignWhyWhere(dst, src *Node, reason string, where *Node) { 1013 var step *EscStep 1014 if Debug['m'] != 0 { 1015 step = e.stepAssignWhere(dst, src, reason, where) 1016 } 1017 e.escassign(dst, src, step) 1018 } 1019 1020 // escassignSinkWhy bundles a common case of 1021 // escassign(e, &e.theSink, src, e.stepAssign(nil, dst, src, reason)) 1022 func (e *EscState) escassignSinkWhy(dst, src *Node, reason string) { 1023 var step *EscStep 1024 if Debug['m'] != 0 { 1025 step = e.stepAssign(nil, dst, src, reason) 1026 } 1027 e.escassign(&e.theSink, src, step) 1028 } 1029 1030 // escassignSinkWhyWhere is escassignSinkWhy but includes a call site 1031 // for accurate location reporting. 1032 func (e *EscState) escassignSinkWhyWhere(dst, src *Node, reason string, call *Node) { 1033 var step *EscStep 1034 if Debug['m'] != 0 { 1035 step = e.stepAssignWhere(dst, src, reason, call) 1036 } 1037 e.escassign(&e.theSink, src, step) 1038 } 1039 1040 // Assert that expr somehow gets assigned to dst, if non nil. for 1041 // dst==nil, any name node expr still must be marked as being 1042 // evaluated in curfn. For expr==nil, dst must still be examined for 1043 // evaluations inside it (e.g *f(x) = y) 1044 func (e *EscState) escassign(dst, src *Node, step *EscStep) { 1045 if isblank(dst) || dst == nil || src == nil || src.Op == ONONAME || src.Op == OXXX { 1046 return 1047 } 1048 1049 if Debug['m'] > 2 { 1050 fmt.Printf("%v:[%d] %v escassign: %S(%0j)[%v] = %S(%0j)[%v]\n", 1051 linestr(lineno), e.loopdepth, funcSym(Curfn), 1052 dst, dst, dst.Op, 1053 src, src, src.Op) 1054 } 1055 1056 setlineno(dst) 1057 1058 originalDst := dst 1059 dstwhy := "assigned" 1060 1061 // Analyze lhs of assignment. 1062 // Replace dst with &e.theSink if we can't track it. 1063 switch dst.Op { 1064 default: 1065 Dump("dst", dst) 1066 Fatalf("escassign: unexpected dst") 1067 1068 case OARRAYLIT, 1069 OSLICELIT, 1070 OCLOSURE, 1071 OCONV, 1072 OCONVIFACE, 1073 OCONVNOP, 1074 OMAPLIT, 1075 OSTRUCTLIT, 1076 OPTRLIT, 1077 ODDDARG, 1078 OCALLPART: 1079 1080 case ONAME: 1081 if dst.Class() == PEXTERN { 1082 dstwhy = "assigned to top level variable" 1083 dst = &e.theSink 1084 } 1085 1086 case ODOT: // treat "dst.x = src" as "dst = src" 1087 e.escassign(dst.Left, src, e.stepAssign(step, originalDst, src, "dot-equals")) 1088 return 1089 1090 case OINDEX: 1091 if dst.Left.Type.IsArray() { 1092 e.escassign(dst.Left, src, e.stepAssign(step, originalDst, src, "array-element-equals")) 1093 return 1094 } 1095 1096 dstwhy = "slice-element-equals" 1097 dst = &e.theSink // lose track of dereference 1098 1099 case OIND: 1100 dstwhy = "star-equals" 1101 dst = &e.theSink // lose track of dereference 1102 1103 case ODOTPTR: 1104 dstwhy = "star-dot-equals" 1105 dst = &e.theSink // lose track of dereference 1106 1107 // lose track of key and value 1108 case OINDEXMAP: 1109 e.escassign(&e.theSink, dst.Right, e.stepAssign(nil, originalDst, src, "key of map put")) 1110 dstwhy = "value of map put" 1111 dst = &e.theSink 1112 } 1113 1114 lno := setlineno(src) 1115 e.pdepth++ 1116 1117 switch src.Op { 1118 case OADDR, // dst = &x 1119 OIND, // dst = *x 1120 ODOTPTR, // dst = (*x).f 1121 ONAME, 1122 ODDDARG, 1123 OPTRLIT, 1124 OARRAYLIT, 1125 OSLICELIT, 1126 OMAPLIT, 1127 OSTRUCTLIT, 1128 OMAKECHAN, 1129 OMAKEMAP, 1130 OMAKESLICE, 1131 OARRAYRUNESTR, 1132 OARRAYBYTESTR, 1133 OSTRARRAYRUNE, 1134 OSTRARRAYBYTE, 1135 OADDSTR, 1136 ONEW, 1137 OCALLPART, 1138 ORUNESTR, 1139 OCONVIFACE: 1140 e.escflows(dst, src, e.stepAssign(step, originalDst, src, dstwhy)) 1141 1142 case OCLOSURE: 1143 // OCLOSURE is lowered to OPTRLIT, 1144 // insert OADDR to account for the additional indirection. 1145 a := nod(OADDR, src, nil) 1146 a.Pos = src.Pos 1147 e.nodeEscState(a).Loopdepth = e.nodeEscState(src).Loopdepth 1148 a.Type = types.NewPtr(src.Type) 1149 e.escflows(dst, a, e.stepAssign(nil, originalDst, src, dstwhy)) 1150 1151 // Flowing multiple returns to a single dst happens when 1152 // analyzing "go f(g())": here g() flows to sink (issue 4529). 1153 case OCALLMETH, OCALLFUNC, OCALLINTER: 1154 for _, n := range e.nodeEscState(src).Retval.Slice() { 1155 e.escflows(dst, n, e.stepAssign(nil, originalDst, n, dstwhy)) 1156 } 1157 1158 // A non-pointer escaping from a struct does not concern us. 1159 case ODOT: 1160 if src.Type != nil && !types.Haspointers(src.Type) { 1161 break 1162 } 1163 fallthrough 1164 1165 // Conversions, field access, slice all preserve the input value. 1166 case OCONV, 1167 OCONVNOP, 1168 ODOTMETH, 1169 // treat recv.meth as a value with recv in it, only happens in ODEFER and OPROC 1170 // iface.method already leaks iface in esccall, no need to put in extra ODOTINTER edge here 1171 OSLICE, 1172 OSLICE3, 1173 OSLICEARR, 1174 OSLICE3ARR, 1175 OSLICESTR: 1176 // Conversions, field access, slice all preserve the input value. 1177 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1178 1179 case ODOTTYPE, 1180 ODOTTYPE2: 1181 if src.Type != nil && !types.Haspointers(src.Type) { 1182 break 1183 } 1184 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1185 1186 case OAPPEND: 1187 // Append returns first argument. 1188 // Subsequent arguments are already leaked because they are operands to append. 1189 e.escassign(dst, src.List.First(), e.stepAssign(step, dst, src.List.First(), dstwhy)) 1190 1191 case OINDEX: 1192 // Index of array preserves input value. 1193 if src.Left.Type.IsArray() { 1194 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1195 } else { 1196 e.escflows(dst, src, e.stepAssign(step, originalDst, src, dstwhy)) 1197 } 1198 1199 // Might be pointer arithmetic, in which case 1200 // the operands flow into the result. 1201 // TODO(rsc): Decide what the story is here. This is unsettling. 1202 case OADD, 1203 OSUB, 1204 OOR, 1205 OXOR, 1206 OMUL, 1207 ODIV, 1208 OMOD, 1209 OLSH, 1210 ORSH, 1211 OAND, 1212 OANDNOT, 1213 OPLUS, 1214 OMINUS, 1215 OCOM: 1216 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1217 1218 e.escassign(dst, src.Right, e.stepAssign(step, originalDst, src, dstwhy)) 1219 } 1220 1221 e.pdepth-- 1222 lineno = lno 1223 } 1224 1225 // Common case for escapes is 16 bits 000000000xxxEEEE 1226 // where commonest cases for xxx encoding in-to-out pointer 1227 // flow are 000, 001, 010, 011 and EEEE is computed Esc bits. 1228 // Note width of xxx depends on value of constant 1229 // bitsPerOutputInTag -- expect 2 or 3, so in practice the 1230 // tag cache array is 64 or 128 long. Some entries will 1231 // never be populated. 1232 var tags [1 << (bitsPerOutputInTag + EscReturnBits)]string 1233 1234 // mktag returns the string representation for an escape analysis tag. 1235 func mktag(mask int) string { 1236 switch mask & EscMask { 1237 case EscNone, EscReturn: 1238 default: 1239 Fatalf("escape mktag") 1240 } 1241 1242 if mask < len(tags) && tags[mask] != "" { 1243 return tags[mask] 1244 } 1245 1246 s := fmt.Sprintf("esc:0x%x", mask) 1247 if mask < len(tags) { 1248 tags[mask] = s 1249 } 1250 return s 1251 } 1252 1253 // parsetag decodes an escape analysis tag and returns the esc value. 1254 func parsetag(note string) uint16 { 1255 if !strings.HasPrefix(note, "esc:") { 1256 return EscUnknown 1257 } 1258 n, _ := strconv.ParseInt(note[4:], 0, 0) 1259 em := uint16(n) 1260 if em == 0 { 1261 return EscNone 1262 } 1263 return em 1264 } 1265 1266 // describeEscape returns a string describing the escape tag. 1267 // The result is either one of {EscUnknown, EscNone, EscHeap} which all have no further annotation 1268 // or a description of parameter flow, which takes the form of an optional "contentToHeap" 1269 // indicating that the content of this parameter is leaked to the heap, followed by a sequence 1270 // of level encodings separated by spaces, one for each parameter, where _ means no flow, 1271 // = means direct flow, and N asterisks (*) encodes content (obtained by indirection) flow. 1272 // e.g., "contentToHeap _ =" means that a parameter's content (one or more dereferences) 1273 // escapes to the heap, the parameter does not leak to the first output, but does leak directly 1274 // to the second output (and if there are more than two outputs, there is no flow to those.) 1275 func describeEscape(em uint16) string { 1276 var s string 1277 switch em & EscMask { 1278 case EscUnknown: 1279 s = "EscUnknown" 1280 case EscNone: 1281 s = "EscNone" 1282 case EscHeap: 1283 s = "EscHeap" 1284 case EscReturn: 1285 s = "EscReturn" 1286 } 1287 if em&EscContentEscapes != 0 { 1288 if s != "" { 1289 s += " " 1290 } 1291 s += "contentToHeap" 1292 } 1293 for em >>= EscReturnBits; em != 0; em = em >> bitsPerOutputInTag { 1294 // See encoding description above 1295 if s != "" { 1296 s += " " 1297 } 1298 switch embits := em & bitsMaskForTag; embits { 1299 case 0: 1300 s += "_" 1301 case 1: 1302 s += "=" 1303 default: 1304 for i := uint16(0); i < embits-1; i++ { 1305 s += "*" 1306 } 1307 } 1308 1309 } 1310 return s 1311 } 1312 1313 // escassignfromtag models the input-to-output assignment flow of one of a function 1314 // calls arguments, where the flow is encoded in "note". 1315 func (e *EscState) escassignfromtag(note string, dsts Nodes, src, call *Node) uint16 { 1316 em := parsetag(note) 1317 if src.Op == OLITERAL { 1318 return em 1319 } 1320 1321 if Debug['m'] > 3 { 1322 fmt.Printf("%v::assignfromtag:: src=%S, em=%s\n", 1323 linestr(lineno), src, describeEscape(em)) 1324 } 1325 1326 if em == EscUnknown { 1327 e.escassignSinkWhyWhere(src, src, "passed to call[argument escapes]", call) 1328 return em 1329 } 1330 1331 if em == EscNone { 1332 return em 1333 } 1334 1335 // If content inside parameter (reached via indirection) 1336 // escapes to heap, mark as such. 1337 if em&EscContentEscapes != 0 { 1338 e.escassign(&e.theSink, e.addDereference(src), e.stepAssignWhere(src, src, "passed to call[argument content escapes]", call)) 1339 } 1340 1341 em0 := em 1342 dstsi := 0 1343 for em >>= EscReturnBits; em != 0 && dstsi < dsts.Len(); em = em >> bitsPerOutputInTag { 1344 // Prefer the lowest-level path to the reference (for escape purposes). 1345 // Two-bit encoding (for example. 1, 3, and 4 bits are other options) 1346 // 01 = 0-level 1347 // 10 = 1-level, (content escapes), 1348 // 11 = 2-level, (content of content escapes), 1349 embits := em & bitsMaskForTag 1350 if embits > 0 { 1351 n := src 1352 for i := uint16(0); i < embits-1; i++ { 1353 n = e.addDereference(n) // encode level>0 as indirections 1354 } 1355 e.escassign(dsts.Index(dstsi), n, e.stepAssignWhere(dsts.Index(dstsi), src, "passed-to-and-returned-from-call", call)) 1356 } 1357 dstsi++ 1358 } 1359 // If there are too many outputs to fit in the tag, 1360 // that is handled at the encoding end as EscHeap, 1361 // so there is no need to check here. 1362 1363 if em != 0 && dstsi >= dsts.Len() { 1364 Fatalf("corrupt esc tag %q or messed up escretval list\n", note) 1365 } 1366 return em0 1367 } 1368 1369 func (e *EscState) escassignDereference(dst *Node, src *Node, step *EscStep) { 1370 if src.Op == OLITERAL { 1371 return 1372 } 1373 e.escassign(dst, e.addDereference(src), step) 1374 } 1375 1376 // addDereference constructs a suitable OIND note applied to src. 1377 // Because this is for purposes of escape accounting, not execution, 1378 // some semantically dubious node combinations are (currently) possible. 1379 func (e *EscState) addDereference(n *Node) *Node { 1380 ind := nod(OIND, n, nil) 1381 e.nodeEscState(ind).Loopdepth = e.nodeEscState(n).Loopdepth 1382 ind.Pos = n.Pos 1383 t := n.Type 1384 if t.IsKind(types.Tptr) { 1385 // This should model our own sloppy use of OIND to encode 1386 // decreasing levels of indirection; i.e., "indirecting" an array 1387 // might yield the type of an element. To be enhanced... 1388 t = t.Elem() 1389 } 1390 ind.Type = t 1391 return ind 1392 } 1393 1394 // escNoteOutputParamFlow encodes maxEncodedLevel/.../1/0-level flow to the vargen'th parameter. 1395 // Levels greater than maxEncodedLevel are replaced with maxEncodedLevel. 1396 // If the encoding cannot describe the modified input level and output number, then EscHeap is returned. 1397 func escNoteOutputParamFlow(e uint16, vargen int32, level Level) uint16 { 1398 // Flow+level is encoded in two bits. 1399 // 00 = not flow, xx = level+1 for 0 <= level <= maxEncodedLevel 1400 // 16 bits for Esc allows 6x2bits or 4x3bits or 3x4bits if additional information would be useful. 1401 if level.int() <= 0 && level.guaranteedDereference() > 0 { 1402 return escMax(e|EscContentEscapes, EscNone) // At least one deref, thus only content. 1403 } 1404 if level.int() < 0 { 1405 return EscHeap 1406 } 1407 if level.int() > maxEncodedLevel { 1408 // Cannot encode larger values than maxEncodedLevel. 1409 level = levelFrom(maxEncodedLevel) 1410 } 1411 encoded := uint16(level.int() + 1) 1412 1413 shift := uint(bitsPerOutputInTag*(vargen-1) + EscReturnBits) 1414 old := (e >> shift) & bitsMaskForTag 1415 if old == 0 || encoded != 0 && encoded < old { 1416 old = encoded 1417 } 1418 1419 encodedFlow := old << shift 1420 if (encodedFlow>>shift)&bitsMaskForTag != old { 1421 // Encoding failure defaults to heap. 1422 return EscHeap 1423 } 1424 1425 return (e &^ (bitsMaskForTag << shift)) | encodedFlow 1426 } 1427 1428 func (e *EscState) initEscRetval(call *Node, fntype *types.Type) { 1429 cE := e.nodeEscState(call) 1430 cE.Retval.Set(nil) // Suspect this is not nil for indirect calls. 1431 for i, f := range fntype.Results().Fields().Slice() { 1432 buf := fmt.Sprintf(".out%d", i) 1433 ret := newname(lookup(buf)) 1434 ret.SetAddable(false) // TODO(mdempsky): Seems suspicious. 1435 ret.Type = f.Type 1436 ret.SetClass(PAUTO) 1437 ret.Name.Curfn = Curfn 1438 e.nodeEscState(ret).Loopdepth = e.loopdepth 1439 ret.Name.SetUsed(true) 1440 ret.Pos = call.Pos 1441 cE.Retval.Append(ret) 1442 } 1443 } 1444 1445 // This is a bit messier than fortunate, pulled out of esc's big 1446 // switch for clarity. We either have the paramnodes, which may be 1447 // connected to other things through flows or we have the parameter type 1448 // nodes, which may be marked "noescape". Navigating the ast is slightly 1449 // different for methods vs plain functions and for imported vs 1450 // this-package 1451 func (e *EscState) esccall(call *Node, parent *Node) { 1452 var fntype *types.Type 1453 var indirect bool 1454 var fn *Node 1455 switch call.Op { 1456 default: 1457 Fatalf("esccall") 1458 1459 case OCALLFUNC: 1460 fn = call.Left 1461 fntype = fn.Type 1462 indirect = fn.Op != ONAME || fn.Class() != PFUNC 1463 1464 case OCALLMETH: 1465 fn = asNode(call.Left.Sym.Def) 1466 if fn != nil { 1467 fntype = fn.Type 1468 } else { 1469 fntype = call.Left.Type 1470 } 1471 1472 case OCALLINTER: 1473 fntype = call.Left.Type 1474 indirect = true 1475 } 1476 1477 argList := call.List 1478 if argList.Len() == 1 { 1479 arg := argList.First() 1480 if arg.Type.IsFuncArgStruct() { // f(g()) 1481 argList = e.nodeEscState(arg).Retval 1482 } 1483 } 1484 1485 args := argList.Slice() 1486 1487 if indirect { 1488 // We know nothing! 1489 // Leak all the parameters 1490 for _, arg := range args { 1491 e.escassignSinkWhy(call, arg, "parameter to indirect call") 1492 if Debug['m'] > 3 { 1493 fmt.Printf("%v::esccall:: indirect call <- %S, untracked\n", linestr(lineno), arg) 1494 } 1495 } 1496 // Set up bogus outputs 1497 e.initEscRetval(call, fntype) 1498 // If there is a receiver, it also leaks to heap. 1499 if call.Op != OCALLFUNC { 1500 rf := fntype.Recv() 1501 r := call.Left.Left 1502 if types.Haspointers(rf.Type) { 1503 e.escassignSinkWhy(call, r, "receiver in indirect call") 1504 } 1505 } else { // indirect and OCALLFUNC = could be captured variables, too. (#14409) 1506 rets := e.nodeEscState(call).Retval.Slice() 1507 for _, ret := range rets { 1508 e.escassignDereference(ret, fn, e.stepAssignWhere(ret, fn, "captured by called closure", call)) 1509 } 1510 } 1511 return 1512 } 1513 1514 cE := e.nodeEscState(call) 1515 if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && 1516 fn.Name.Defn != nil && fn.Name.Defn.Nbody.Len() != 0 && fn.Name.Param.Ntype != nil && fn.Name.Defn.Esc < EscFuncTagged { 1517 if Debug['m'] > 3 { 1518 fmt.Printf("%v::esccall:: %S in recursive group\n", linestr(lineno), call) 1519 } 1520 1521 // function in same mutually recursive group. Incorporate into flow graph. 1522 // print("esc local fn: %N\n", fn.Func.Ntype); 1523 if fn.Name.Defn.Esc == EscFuncUnknown || cE.Retval.Len() != 0 { 1524 Fatalf("graph inconsistency") 1525 } 1526 1527 sawRcvr := false 1528 for _, n := range fn.Name.Defn.Func.Dcl { 1529 switch n.Class() { 1530 case PPARAM: 1531 if call.Op != OCALLFUNC && !sawRcvr { 1532 e.escassignWhyWhere(n, call.Left.Left, "call receiver", call) 1533 sawRcvr = true 1534 continue 1535 } 1536 if len(args) == 0 { 1537 continue 1538 } 1539 arg := args[0] 1540 if n.Isddd() && !call.Isddd() { 1541 // Introduce ODDDARG node to represent ... allocation. 1542 arg = nod(ODDDARG, nil, nil) 1543 arr := types.NewArray(n.Type.Elem(), int64(len(args))) 1544 arg.Type = types.NewPtr(arr) // make pointer so it will be tracked 1545 arg.Pos = call.Pos 1546 e.track(arg) 1547 call.Right = arg 1548 } 1549 e.escassignWhyWhere(n, arg, "arg to recursive call", call) // TODO this message needs help. 1550 if arg == args[0] { 1551 args = args[1:] 1552 continue 1553 } 1554 // "..." arguments are untracked 1555 for _, a := range args { 1556 if Debug['m'] > 3 { 1557 fmt.Printf("%v::esccall:: ... <- %S, untracked\n", linestr(lineno), a) 1558 } 1559 e.escassignSinkWhyWhere(arg, a, "... arg to recursive call", call) 1560 } 1561 // No more PPARAM processing, but keep 1562 // going for PPARAMOUT. 1563 args = nil 1564 1565 case PPARAMOUT: 1566 cE.Retval.Append(n) 1567 } 1568 } 1569 1570 return 1571 } 1572 1573 // Imported or completely analyzed function. Use the escape tags. 1574 if cE.Retval.Len() != 0 { 1575 Fatalf("esc already decorated call %+v\n", call) 1576 } 1577 1578 if Debug['m'] > 3 { 1579 fmt.Printf("%v::esccall:: %S not recursive\n", linestr(lineno), call) 1580 } 1581 1582 // set up out list on this call node with dummy auto ONAMES in the current (calling) function. 1583 e.initEscRetval(call, fntype) 1584 1585 // print("esc analyzed fn: %#N (%+T) returning (%+H)\n", fn, fntype, e.nodeEscState(call).Retval); 1586 1587 // Receiver. 1588 if call.Op != OCALLFUNC { 1589 rf := fntype.Recv() 1590 r := call.Left.Left 1591 if types.Haspointers(rf.Type) { 1592 e.escassignfromtag(rf.Note, cE.Retval, r, call) 1593 } 1594 } 1595 1596 for i, param := range fntype.Params().FieldSlice() { 1597 note := param.Note 1598 var arg *Node 1599 if param.Isddd() && !call.Isddd() { 1600 rest := args[i:] 1601 if len(rest) == 0 { 1602 break 1603 } 1604 1605 // Introduce ODDDARG node to represent ... allocation. 1606 arg = nod(ODDDARG, nil, nil) 1607 arg.Pos = call.Pos 1608 arr := types.NewArray(param.Type.Elem(), int64(len(rest))) 1609 arg.Type = types.NewPtr(arr) // make pointer so it will be tracked 1610 e.track(arg) 1611 call.Right = arg 1612 1613 // Store arguments into slice for ... arg. 1614 for _, a := range rest { 1615 if Debug['m'] > 3 { 1616 fmt.Printf("%v::esccall:: ... <- %S\n", linestr(lineno), a) 1617 } 1618 if note == uintptrEscapesTag { 1619 e.escassignSinkWhyWhere(arg, a, "arg to uintptrescapes ...", call) 1620 } else { 1621 e.escassignWhyWhere(arg, a, "arg to ...", call) 1622 } 1623 } 1624 } else { 1625 arg = args[i] 1626 if note == uintptrEscapesTag { 1627 e.escassignSinkWhy(arg, arg, "escaping uintptr") 1628 } 1629 } 1630 1631 if types.Haspointers(param.Type) && e.escassignfromtag(note, cE.Retval, arg, call)&EscMask == EscNone && parent.Op != ODEFER && parent.Op != OPROC { 1632 a := arg 1633 for a.Op == OCONVNOP { 1634 a = a.Left 1635 } 1636 switch a.Op { 1637 // The callee has already been analyzed, so its arguments have esc tags. 1638 // The argument is marked as not escaping at all. 1639 // Record that fact so that any temporary used for 1640 // synthesizing this expression can be reclaimed when 1641 // the function returns. 1642 // This 'noescape' is even stronger than the usual esc == EscNone. 1643 // arg.Esc == EscNone means that arg does not escape the current function. 1644 // arg.SetNoescape(true) here means that arg does not escape this statement 1645 // in the current function. 1646 case OCALLPART, OCLOSURE, ODDDARG, OARRAYLIT, OSLICELIT, OPTRLIT, OSTRUCTLIT: 1647 a.SetNoescape(true) 1648 } 1649 } 1650 } 1651 } 1652 1653 // escflows records the link src->dst in dst, throwing out some quick wins, 1654 // and also ensuring that dst is noted as a flow destination. 1655 func (e *EscState) escflows(dst, src *Node, why *EscStep) { 1656 if dst == nil || src == nil || dst == src { 1657 return 1658 } 1659 1660 // Don't bother building a graph for scalars. 1661 if src.Type != nil && !types.Haspointers(src.Type) && !isReflectHeaderDataField(src) { 1662 if Debug['m'] > 3 { 1663 fmt.Printf("%v::NOT flows:: %S <- %S\n", linestr(lineno), dst, src) 1664 } 1665 return 1666 } 1667 1668 if Debug['m'] > 3 { 1669 fmt.Printf("%v::flows:: %S <- %S\n", linestr(lineno), dst, src) 1670 } 1671 1672 dstE := e.nodeEscState(dst) 1673 if len(dstE.Flowsrc) == 0 { 1674 e.dsts = append(e.dsts, dst) 1675 e.dstcount++ 1676 } 1677 1678 e.edgecount++ 1679 1680 if why == nil { 1681 dstE.Flowsrc = append(dstE.Flowsrc, EscStep{src: src}) 1682 } else { 1683 starwhy := *why 1684 starwhy.src = src // TODO: need to reconcile this w/ needs of explanations. 1685 dstE.Flowsrc = append(dstE.Flowsrc, starwhy) 1686 } 1687 } 1688 1689 // Whenever we hit a reference node, the level goes up by one, and whenever 1690 // we hit an OADDR, the level goes down by one. as long as we're on a level > 0 1691 // finding an OADDR just means we're following the upstream of a dereference, 1692 // so this address doesn't leak (yet). 1693 // If level == 0, it means the /value/ of this node can reach the root of this flood. 1694 // so if this node is an OADDR, its argument should be marked as escaping iff 1695 // its currfn/e.loopdepth are different from the flood's root. 1696 // Once an object has been moved to the heap, all of its upstream should be considered 1697 // escaping to the global scope. 1698 func (e *EscState) escflood(dst *Node) { 1699 switch dst.Op { 1700 case ONAME, OCLOSURE: 1701 default: 1702 return 1703 } 1704 1705 dstE := e.nodeEscState(dst) 1706 if Debug['m'] > 2 { 1707 fmt.Printf("\nescflood:%d: dst %S scope:%v[%d]\n", e.walkgen, dst, e.curfnSym(dst), dstE.Loopdepth) 1708 } 1709 1710 for i := range dstE.Flowsrc { 1711 e.walkgen++ 1712 s := &dstE.Flowsrc[i] 1713 s.parent = nil 1714 e.escwalk(levelFrom(0), dst, s.src, s) 1715 } 1716 } 1717 1718 // funcOutputAndInput reports whether dst and src correspond to output and input parameters of the same function. 1719 func funcOutputAndInput(dst, src *Node) bool { 1720 // Note if dst is marked as escaping, then "returned" is too weak. 1721 return dst.Op == ONAME && dst.Class() == PPARAMOUT && 1722 src.Op == ONAME && src.Class() == PPARAM && src.Name.Curfn == dst.Name.Curfn 1723 } 1724 1725 func (es *EscStep) describe(src *Node) { 1726 if Debug['m'] < 2 { 1727 return 1728 } 1729 step0 := es 1730 for step := step0; step != nil && !step.busy; step = step.parent { 1731 // TODO: We get cycles. Trigger is i = &i (where var i interface{}) 1732 step.busy = true 1733 // The trail is a little odd because of how the 1734 // graph is constructed. The link to the current 1735 // Node is parent.src unless parent is nil in which 1736 // case it is step.dst. 1737 nextDest := step.parent 1738 dst := step.dst 1739 where := step.where 1740 if nextDest != nil { 1741 dst = nextDest.src 1742 } 1743 if where == nil { 1744 where = dst 1745 } 1746 Warnl(src.Pos, "\tfrom %v (%s) at %s", dst, step.why, where.Line()) 1747 } 1748 for step := step0; step != nil && step.busy; step = step.parent { 1749 step.busy = false 1750 } 1751 } 1752 1753 const NOTALOOPDEPTH = -1 1754 1755 func (e *EscState) escwalk(level Level, dst *Node, src *Node, step *EscStep) { 1756 e.escwalkBody(level, dst, src, step, NOTALOOPDEPTH) 1757 } 1758 1759 func (e *EscState) escwalkBody(level Level, dst *Node, src *Node, step *EscStep, extraloopdepth int32) { 1760 if src.Op == OLITERAL { 1761 return 1762 } 1763 srcE := e.nodeEscState(src) 1764 if srcE.Walkgen == e.walkgen { 1765 // Esclevels are vectors, do not compare as integers, 1766 // and must use "min" of old and new to guarantee 1767 // convergence. 1768 level = level.min(srcE.Level) 1769 if level == srcE.Level { 1770 // Have we been here already with an extraloopdepth, 1771 // or is the extraloopdepth provided no improvement on 1772 // what's already been seen? 1773 if srcE.Maxextraloopdepth >= extraloopdepth || srcE.Loopdepth >= extraloopdepth { 1774 return 1775 } 1776 srcE.Maxextraloopdepth = extraloopdepth 1777 } 1778 } else { // srcE.Walkgen < e.walkgen -- first time, reset this. 1779 srcE.Maxextraloopdepth = NOTALOOPDEPTH 1780 } 1781 1782 srcE.Walkgen = e.walkgen 1783 srcE.Level = level 1784 modSrcLoopdepth := srcE.Loopdepth 1785 1786 if extraloopdepth > modSrcLoopdepth { 1787 modSrcLoopdepth = extraloopdepth 1788 } 1789 1790 if Debug['m'] > 2 { 1791 fmt.Printf("escwalk: level:%d depth:%d %.*s op=%v %S(%0j) scope:%v[%d] extraloopdepth=%v\n", 1792 level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", src.Op, src, src, e.curfnSym(src), srcE.Loopdepth, extraloopdepth) 1793 } 1794 1795 e.pdepth++ 1796 1797 // Input parameter flowing to output parameter? 1798 var leaks bool 1799 var osrcesc uint16 // used to prevent duplicate error messages 1800 1801 dstE := e.nodeEscState(dst) 1802 if funcOutputAndInput(dst, src) && src.Esc&EscMask < EscHeap && dst.Esc != EscHeap { 1803 // This case handles: 1804 // 1. return in 1805 // 2. return &in 1806 // 3. tmp := in; return &tmp 1807 // 4. return *in 1808 if Debug['m'] != 0 { 1809 if Debug['m'] <= 2 { 1810 Warnl(src.Pos, "leaking param: %S to result %v level=%v", src, dst.Sym, level.int()) 1811 step.describe(src) 1812 } else { 1813 Warnl(src.Pos, "leaking param: %S to result %v level=%v", src, dst.Sym, level) 1814 } 1815 } 1816 if src.Esc&EscMask != EscReturn { 1817 src.Esc = EscReturn | src.Esc&EscContentEscapes 1818 } 1819 src.Esc = escNoteOutputParamFlow(src.Esc, dst.Name.Vargen, level) 1820 goto recurse 1821 } 1822 1823 // If parameter content escapes to heap, set EscContentEscapes 1824 // Note minor confusion around escape from pointer-to-struct vs escape from struct 1825 if dst.Esc == EscHeap && 1826 src.Op == ONAME && src.Class() == PPARAM && src.Esc&EscMask < EscHeap && 1827 level.int() > 0 { 1828 src.Esc = escMax(EscContentEscapes|src.Esc, EscNone) 1829 if Debug['m'] != 0 { 1830 Warnl(src.Pos, "mark escaped content: %S", src) 1831 step.describe(src) 1832 } 1833 } 1834 1835 leaks = level.int() <= 0 && level.guaranteedDereference() <= 0 && dstE.Loopdepth < modSrcLoopdepth 1836 leaks = leaks || level.int() <= 0 && dst.Esc&EscMask == EscHeap 1837 1838 osrcesc = src.Esc 1839 switch src.Op { 1840 case ONAME: 1841 if src.Class() == PPARAM && (leaks || dstE.Loopdepth < 0) && src.Esc&EscMask < EscHeap { 1842 if level.guaranteedDereference() > 0 { 1843 src.Esc = escMax(EscContentEscapes|src.Esc, EscNone) 1844 if Debug['m'] != 0 { 1845 if Debug['m'] <= 2 { 1846 if osrcesc != src.Esc { 1847 Warnl(src.Pos, "leaking param content: %S", src) 1848 step.describe(src) 1849 } 1850 } else { 1851 Warnl(src.Pos, "leaking param content: %S level=%v dst.eld=%v src.eld=%v dst=%S", 1852 src, level, dstE.Loopdepth, modSrcLoopdepth, dst) 1853 } 1854 } 1855 } else { 1856 src.Esc = EscHeap 1857 if Debug['m'] != 0 { 1858 if Debug['m'] <= 2 { 1859 Warnl(src.Pos, "leaking param: %S", src) 1860 step.describe(src) 1861 } else { 1862 Warnl(src.Pos, "leaking param: %S level=%v dst.eld=%v src.eld=%v dst=%S", 1863 src, level, dstE.Loopdepth, modSrcLoopdepth, dst) 1864 } 1865 } 1866 } 1867 } 1868 1869 // Treat a captured closure variable as equivalent to the 1870 // original variable. 1871 if src.IsClosureVar() { 1872 if leaks && Debug['m'] != 0 { 1873 Warnl(src.Pos, "leaking closure reference %S", src) 1874 step.describe(src) 1875 } 1876 e.escwalk(level, dst, src.Name.Defn, e.stepWalk(dst, src.Name.Defn, "closure-var", step)) 1877 } 1878 1879 case OPTRLIT, OADDR: 1880 why := "pointer literal" 1881 if src.Op == OADDR { 1882 why = "address-of" 1883 } 1884 if leaks { 1885 src.Esc = EscHeap 1886 if Debug['m'] != 0 && osrcesc != src.Esc { 1887 p := src 1888 if p.Left.Op == OCLOSURE { 1889 p = p.Left // merely to satisfy error messages in tests 1890 } 1891 if Debug['m'] > 2 { 1892 Warnl(src.Pos, "%S escapes to heap, level=%v, dst=%v dst.eld=%v, src.eld=%v", 1893 p, level, dst, dstE.Loopdepth, modSrcLoopdepth) 1894 } else { 1895 Warnl(src.Pos, "%S escapes to heap", p) 1896 step.describe(src) 1897 } 1898 } 1899 addrescapes(src.Left) 1900 e.escwalkBody(level.dec(), dst, src.Left, e.stepWalk(dst, src.Left, why, step), modSrcLoopdepth) 1901 extraloopdepth = modSrcLoopdepth // passes to recursive case, seems likely a no-op 1902 } else { 1903 e.escwalk(level.dec(), dst, src.Left, e.stepWalk(dst, src.Left, why, step)) 1904 } 1905 1906 case OAPPEND: 1907 e.escwalk(level, dst, src.List.First(), e.stepWalk(dst, src.List.First(), "append-first-arg", step)) 1908 1909 case ODDDARG: 1910 if leaks { 1911 src.Esc = EscHeap 1912 if Debug['m'] != 0 && osrcesc != src.Esc { 1913 Warnl(src.Pos, "%S escapes to heap", src) 1914 step.describe(src) 1915 } 1916 extraloopdepth = modSrcLoopdepth 1917 } 1918 // similar to a slice arraylit and its args. 1919 level = level.dec() 1920 1921 case OSLICELIT: 1922 for _, elt := range src.List.Slice() { 1923 if elt.Op == OKEY { 1924 elt = elt.Right 1925 } 1926 e.escwalk(level.dec(), dst, elt, e.stepWalk(dst, elt, "slice-literal-element", step)) 1927 } 1928 1929 fallthrough 1930 1931 case OMAKECHAN, 1932 OMAKEMAP, 1933 OMAKESLICE, 1934 OARRAYRUNESTR, 1935 OARRAYBYTESTR, 1936 OSTRARRAYRUNE, 1937 OSTRARRAYBYTE, 1938 OADDSTR, 1939 OMAPLIT, 1940 ONEW, 1941 OCLOSURE, 1942 OCALLPART, 1943 ORUNESTR, 1944 OCONVIFACE: 1945 if leaks { 1946 src.Esc = EscHeap 1947 if Debug['m'] != 0 && osrcesc != src.Esc { 1948 Warnl(src.Pos, "%S escapes to heap", src) 1949 step.describe(src) 1950 } 1951 extraloopdepth = modSrcLoopdepth 1952 } 1953 1954 case ODOT, 1955 ODOTTYPE: 1956 e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "dot", step)) 1957 1958 case 1959 OSLICE, 1960 OSLICEARR, 1961 OSLICE3, 1962 OSLICE3ARR, 1963 OSLICESTR: 1964 e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "slice", step)) 1965 1966 case OINDEX: 1967 if src.Left.Type.IsArray() { 1968 e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "fixed-array-index-of", step)) 1969 break 1970 } 1971 fallthrough 1972 1973 case ODOTPTR: 1974 e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "dot of pointer", step)) 1975 case OINDEXMAP: 1976 e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "map index", step)) 1977 case OIND: 1978 e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "indirection", step)) 1979 1980 // In this case a link went directly to a call, but should really go 1981 // to the dummy .outN outputs that were created for the call that 1982 // themselves link to the inputs with levels adjusted. 1983 // See e.g. #10466 1984 // This can only happen with functions returning a single result. 1985 case OCALLMETH, OCALLFUNC, OCALLINTER: 1986 if srcE.Retval.Len() != 0 { 1987 if Debug['m'] > 2 { 1988 fmt.Printf("%v:[%d] dst %S escwalk replace src: %S with %S\n", 1989 linestr(lineno), e.loopdepth, 1990 dst, src, srcE.Retval.First()) 1991 } 1992 src = srcE.Retval.First() 1993 srcE = e.nodeEscState(src) 1994 } 1995 } 1996 1997 recurse: 1998 level = level.copy() 1999 2000 for i := range srcE.Flowsrc { 2001 s := &srcE.Flowsrc[i] 2002 s.parent = step 2003 e.escwalkBody(level, dst, s.src, s, extraloopdepth) 2004 s.parent = nil 2005 } 2006 2007 e.pdepth-- 2008 } 2009 2010 // addrescapes tags node n as having had its address taken 2011 // by "increasing" the "value" of n.Esc to EscHeap. 2012 // Storage is allocated as necessary to allow the address 2013 // to be taken. 2014 func addrescapes(n *Node) { 2015 switch n.Op { 2016 default: 2017 // Unexpected Op, probably due to a previous type error. Ignore. 2018 2019 case OIND, ODOTPTR: 2020 // Nothing to do. 2021 2022 case ONAME: 2023 if n == nodfp { 2024 break 2025 } 2026 2027 // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping. 2028 // on PPARAM it means something different. 2029 if n.Class() == PAUTO && n.Esc == EscNever { 2030 break 2031 } 2032 2033 // If a closure reference escapes, mark the outer variable as escaping. 2034 if n.IsClosureVar() { 2035 addrescapes(n.Name.Defn) 2036 break 2037 } 2038 2039 if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO { 2040 break 2041 } 2042 2043 // This is a plain parameter or local variable that needs to move to the heap, 2044 // but possibly for the function outside the one we're compiling. 2045 // That is, if we have: 2046 // 2047 // func f(x int) { 2048 // func() { 2049 // global = &x 2050 // } 2051 // } 2052 // 2053 // then we're analyzing the inner closure but we need to move x to the 2054 // heap in f, not in the inner closure. Flip over to f before calling moveToHeap. 2055 oldfn := Curfn 2056 Curfn = n.Name.Curfn 2057 if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE { 2058 Curfn = Curfn.Func.Closure 2059 } 2060 ln := lineno 2061 lineno = Curfn.Pos 2062 moveToHeap(n) 2063 Curfn = oldfn 2064 lineno = ln 2065 2066 // ODOTPTR has already been introduced, 2067 // so these are the non-pointer ODOT and OINDEX. 2068 // In &x[0], if x is a slice, then x does not 2069 // escape--the pointer inside x does, but that 2070 // is always a heap pointer anyway. 2071 case ODOT, OINDEX, OPAREN, OCONVNOP: 2072 if !n.Left.Type.IsSlice() { 2073 addrescapes(n.Left) 2074 } 2075 } 2076 } 2077 2078 // moveToHeap records the parameter or local variable n as moved to the heap. 2079 func moveToHeap(n *Node) { 2080 if Debug['r'] != 0 { 2081 Dump("MOVE", n) 2082 } 2083 if compiling_runtime { 2084 yyerror("%v escapes to heap, not allowed in runtime.", n) 2085 } 2086 if n.Class() == PAUTOHEAP { 2087 Dump("n", n) 2088 Fatalf("double move to heap") 2089 } 2090 2091 // Allocate a local stack variable to hold the pointer to the heap copy. 2092 // temp will add it to the function declaration list automatically. 2093 heapaddr := temp(types.NewPtr(n.Type)) 2094 heapaddr.Sym = lookup("&" + n.Sym.Name) 2095 heapaddr.Orig.Sym = heapaddr.Sym 2096 heapaddr.Pos = n.Pos 2097 2098 // Unset AutoTemp to persist the &foo variable name through SSA to 2099 // liveness analysis. 2100 // TODO(mdempsky/drchase): Cleaner solution? 2101 heapaddr.Name.SetAutoTemp(false) 2102 2103 // Parameters have a local stack copy used at function start/end 2104 // in addition to the copy in the heap that may live longer than 2105 // the function. 2106 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 2107 if n.Xoffset == BADWIDTH { 2108 Fatalf("addrescapes before param assignment") 2109 } 2110 2111 // We rewrite n below to be a heap variable (indirection of heapaddr). 2112 // Preserve a copy so we can still write code referring to the original, 2113 // and substitute that copy into the function declaration list 2114 // so that analyses of the local (on-stack) variables use it. 2115 stackcopy := newname(n.Sym) 2116 stackcopy.SetAddable(false) 2117 stackcopy.Type = n.Type 2118 stackcopy.Xoffset = n.Xoffset 2119 stackcopy.SetClass(n.Class()) 2120 stackcopy.Name.Param.Heapaddr = heapaddr 2121 if n.Class() == PPARAMOUT { 2122 // Make sure the pointer to the heap copy is kept live throughout the function. 2123 // The function could panic at any point, and then a defer could recover. 2124 // Thus, we need the pointer to the heap copy always available so the 2125 // post-deferreturn code can copy the return value back to the stack. 2126 // See issue 16095. 2127 heapaddr.SetIsOutputParamHeapAddr(true) 2128 } 2129 n.Name.Param.Stackcopy = stackcopy 2130 2131 // Substitute the stackcopy into the function variable list so that 2132 // liveness and other analyses use the underlying stack slot 2133 // and not the now-pseudo-variable n. 2134 found := false 2135 for i, d := range Curfn.Func.Dcl { 2136 if d == n { 2137 Curfn.Func.Dcl[i] = stackcopy 2138 found = true 2139 break 2140 } 2141 // Parameters are before locals, so can stop early. 2142 // This limits the search even in functions with many local variables. 2143 if d.Class() == PAUTO { 2144 break 2145 } 2146 } 2147 if !found { 2148 Fatalf("cannot find %v in local variable list", n) 2149 } 2150 Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) 2151 } 2152 2153 // Modify n in place so that uses of n now mean indirection of the heapaddr. 2154 n.SetClass(PAUTOHEAP) 2155 n.Xoffset = 0 2156 n.Name.Param.Heapaddr = heapaddr 2157 n.Esc = EscHeap 2158 if Debug['m'] != 0 { 2159 fmt.Printf("%v: moved to heap: %v\n", n.Line(), n) 2160 } 2161 } 2162 2163 // This special tag is applied to uintptr variables 2164 // that we believe may hold unsafe.Pointers for 2165 // calls into assembly functions. 2166 // It is logically a constant, but using a var 2167 // lets us take the address below to get a *string. 2168 var unsafeUintptrTag = "unsafe-uintptr" 2169 2170 // This special tag is applied to uintptr parameters of functions 2171 // marked go:uintptrescapes. 2172 const uintptrEscapesTag = "uintptr-escapes" 2173 2174 func (e *EscState) esctag(fn *Node) { 2175 fn.Esc = EscFuncTagged 2176 2177 name := func(s *types.Sym, narg int) string { 2178 if s != nil { 2179 return s.Name 2180 } 2181 return fmt.Sprintf("arg#%d", narg) 2182 } 2183 2184 // External functions are assumed unsafe, 2185 // unless //go:noescape is given before the declaration. 2186 if fn.Nbody.Len() == 0 { 2187 if fn.Noescape() { 2188 for _, f := range fn.Type.Params().Fields().Slice() { 2189 if types.Haspointers(f.Type) { 2190 f.Note = mktag(EscNone) 2191 } 2192 } 2193 } 2194 2195 // Assume that uintptr arguments must be held live across the call. 2196 // This is most important for syscall.Syscall. 2197 // See golang.org/issue/13372. 2198 // This really doesn't have much to do with escape analysis per se, 2199 // but we are reusing the ability to annotate an individual function 2200 // argument and pass those annotations along to importing code. 2201 narg := 0 2202 for _, f := range fn.Type.Params().Fields().Slice() { 2203 narg++ 2204 if f.Type.Etype == TUINTPTR { 2205 if Debug['m'] != 0 { 2206 Warnl(fn.Pos, "%v assuming %v is unsafe uintptr", funcSym(fn), name(f.Sym, narg)) 2207 } 2208 f.Note = unsafeUintptrTag 2209 } 2210 } 2211 2212 return 2213 } 2214 2215 if fn.Func.Pragma&UintptrEscapes != 0 { 2216 narg := 0 2217 for _, f := range fn.Type.Params().Fields().Slice() { 2218 narg++ 2219 if f.Type.Etype == TUINTPTR { 2220 if Debug['m'] != 0 { 2221 Warnl(fn.Pos, "%v marking %v as escaping uintptr", funcSym(fn), name(f.Sym, narg)) 2222 } 2223 f.Note = uintptrEscapesTag 2224 } 2225 2226 if f.Isddd() && f.Type.Elem().Etype == TUINTPTR { 2227 // final argument is ...uintptr. 2228 if Debug['m'] != 0 { 2229 Warnl(fn.Pos, "%v marking %v as escaping ...uintptr", funcSym(fn), name(f.Sym, narg)) 2230 } 2231 f.Note = uintptrEscapesTag 2232 } 2233 } 2234 } 2235 2236 for _, ln := range fn.Func.Dcl { 2237 if ln.Op != ONAME { 2238 continue 2239 } 2240 2241 switch ln.Esc & EscMask { 2242 case EscNone, // not touched by escflood 2243 EscReturn: 2244 if types.Haspointers(ln.Type) { // don't bother tagging for scalars 2245 if ln.Name.Param.Field.Note != uintptrEscapesTag { 2246 ln.Name.Param.Field.Note = mktag(int(ln.Esc)) 2247 } 2248 } 2249 2250 case EscHeap: // touched by escflood, moved to heap 2251 } 2252 } 2253 2254 // Unnamed parameters are unused and therefore do not escape. 2255 // (Unnamed parameters are not in the Dcl list in the loop above 2256 // so we need to mark them separately.) 2257 for _, f := range fn.Type.Params().Fields().Slice() { 2258 if f.Sym == nil || f.Sym.IsBlank() { 2259 f.Note = mktag(EscNone) 2260 } 2261 } 2262 }