github.com/hikaru7719/go@v0.0.0-20181025140707-c8b2ac68906a/src/cmd/compile/internal/gc/esc.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "fmt" 10 "strconv" 11 "strings" 12 ) 13 14 // Run analysis on minimal sets of mutually recursive functions 15 // or single non-recursive functions, bottom up. 16 // 17 // Finding these sets is finding strongly connected components 18 // by reverse topological order in the static call graph. 19 // The algorithm (known as Tarjan's algorithm) for doing that is taken from 20 // Sedgewick, Algorithms, Second Edition, p. 482, with two adaptations. 21 // 22 // First, a hidden closure function (n.Func.IsHiddenClosure()) cannot be the 23 // root of a connected component. Refusing to use it as a root 24 // forces it into the component of the function in which it appears. 25 // This is more convenient for escape analysis. 26 // 27 // Second, each function becomes two virtual nodes in the graph, 28 // with numbers n and n+1. We record the function's node number as n 29 // but search from node n+1. If the search tells us that the component 30 // number (min) is n+1, we know that this is a trivial component: one function 31 // plus its closures. If the search tells us that the component number is 32 // n, then there was a path from node n+1 back to node n, meaning that 33 // the function set is mutually recursive. The escape analysis can be 34 // more precise when analyzing a single non-recursive function than 35 // when analyzing a set of mutually recursive functions. 36 37 type bottomUpVisitor struct { 38 analyze func([]*Node, bool) 39 visitgen uint32 40 nodeID map[*Node]uint32 41 stack []*Node 42 } 43 44 // visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list. 45 // It calls analyze with successive groups of functions, working from 46 // the bottom of the call graph upward. Each time analyze is called with 47 // a list of functions, every function on that list only calls other functions 48 // on the list or functions that have been passed in previous invocations of 49 // analyze. Closures appear in the same list as their outer functions. 50 // The lists are as short as possible while preserving those requirements. 51 // (In a typical program, many invocations of analyze will be passed just 52 // a single function.) The boolean argument 'recursive' passed to analyze 53 // specifies whether the functions on the list are mutually recursive. 54 // If recursive is false, the list consists of only a single function and its closures. 55 // If recursive is true, the list may still contain only a single function, 56 // if that function is itself recursive. 57 func visitBottomUp(list []*Node, analyze func(list []*Node, recursive bool)) { 58 var v bottomUpVisitor 59 v.analyze = analyze 60 v.nodeID = make(map[*Node]uint32) 61 for _, n := range list { 62 if n.Op == ODCLFUNC && !n.Func.IsHiddenClosure() { 63 v.visit(n) 64 } 65 } 66 } 67 68 func (v *bottomUpVisitor) visit(n *Node) uint32 { 69 if id := v.nodeID[n]; id > 0 { 70 // already visited 71 return id 72 } 73 74 v.visitgen++ 75 id := v.visitgen 76 v.nodeID[n] = id 77 v.visitgen++ 78 min := v.visitgen 79 80 v.stack = append(v.stack, n) 81 min = v.visitcodelist(n.Nbody, min) 82 if (min == id || min == id+1) && !n.Func.IsHiddenClosure() { 83 // This node is the root of a strongly connected component. 84 85 // The original min passed to visitcodelist was v.nodeID[n]+1. 86 // If visitcodelist found its way back to v.nodeID[n], then this 87 // block is a set of mutually recursive functions. 88 // Otherwise it's just a lone function that does not recurse. 89 recursive := min == id 90 91 // Remove connected component from stack. 92 // Mark walkgen so that future visits return a large number 93 // so as not to affect the caller's min. 94 95 var i int 96 for i = len(v.stack) - 1; i >= 0; i-- { 97 x := v.stack[i] 98 if x == n { 99 break 100 } 101 v.nodeID[x] = ^uint32(0) 102 } 103 v.nodeID[n] = ^uint32(0) 104 block := v.stack[i:] 105 // Run escape analysis on this set of functions. 106 v.stack = v.stack[:i] 107 v.analyze(block, recursive) 108 } 109 110 return min 111 } 112 113 func (v *bottomUpVisitor) visitcodelist(l Nodes, min uint32) uint32 { 114 for _, n := range l.Slice() { 115 min = v.visitcode(n, min) 116 } 117 return min 118 } 119 120 func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 { 121 if n == nil { 122 return min 123 } 124 125 min = v.visitcodelist(n.Ninit, min) 126 min = v.visitcode(n.Left, min) 127 min = v.visitcode(n.Right, min) 128 min = v.visitcodelist(n.List, min) 129 min = v.visitcodelist(n.Nbody, min) 130 min = v.visitcodelist(n.Rlist, min) 131 132 switch n.Op { 133 case OCALLFUNC, OCALLMETH: 134 fn := asNode(n.Left.Type.Nname()) 135 if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil { 136 m := v.visit(fn.Name.Defn) 137 if m < min { 138 min = m 139 } 140 } 141 142 case OCLOSURE: 143 m := v.visit(n.Func.Closure) 144 if m < min { 145 min = m 146 } 147 } 148 149 return min 150 } 151 152 // Escape analysis. 153 154 // An escape analysis pass for a set of functions. The 155 // analysis assumes that closures and the functions in which 156 // they appear are analyzed together, so that the aliasing 157 // between their variables can be modeled more precisely. 158 // 159 // First escfunc, esc and escassign recurse over the ast of 160 // each function to dig out flow(dst,src) edges between any 161 // pointer-containing nodes and store those edges in 162 // e.nodeEscState(dst).Flowsrc. For values assigned to a 163 // variable in an outer scope or used as a return value, 164 // they store a flow(theSink, src) edge to a fake node 'the 165 // Sink'. For variables referenced in closures, an edge 166 // flow(closure, &var) is recorded and the flow of a closure 167 // itself to an outer scope is tracked the same way as other 168 // variables. 169 // 170 // Then escflood walks the graph in destination-to-source 171 // order, starting at theSink, propagating a computed 172 // "escape level", and tags as escaping values it can 173 // reach that are either & (address-taken) nodes or new(T), 174 // and tags pointer-typed or pointer-containing function 175 // parameters it can reach as leaking. 176 // 177 // If a value's address is taken but the address does not escape, 178 // then the value can stay on the stack. If the value new(T) does 179 // not escape, then new(T) can be rewritten into a stack allocation. 180 // The same is true of slice literals. 181 182 func escapes(all []*Node) { 183 visitBottomUp(all, escAnalyze) 184 } 185 186 const ( 187 EscFuncUnknown = 0 + iota 188 EscFuncPlanned 189 EscFuncStarted 190 EscFuncTagged 191 ) 192 193 // A Level encodes the reference state and context applied to 194 // (stack, heap) allocated memory. 195 // 196 // value is the overall sum of *(1) and &(-1) operations encountered 197 // along a path from a destination (sink, return value) to a source 198 // (allocation, parameter). 199 // 200 // suffixValue is the maximum-copy-started-suffix-level on 201 // a flow path from a sink/destination. That is, a value 202 // with suffixValue N is guaranteed to be dereferenced at least 203 // N deep (chained applications of DOTPTR or IND or INDEX) 204 // before the result is assigned to a sink. 205 // 206 // For example, suppose x is a pointer to T, declared type T struct { left, right *T } 207 // sink = x.left.left --> level(x)=2, x is reached via two dereferences (DOTPTR) and does not escape to sink. 208 // sink = &T{right:x} --> level(x)=-1, x is accessible from sink via one "address of" 209 // sink = &T{right:&T{right:x}} --> level(x)=-2, x is accessible from sink via two "address of" 210 // 211 // However, in the next example x's level value and suffixValue differ: 212 // sink = &T{right:&T{right:x.left}} --> level(x).value=-1, level(x).suffixValue=1 213 // The positive suffixValue indicates that x is NOT accessible 214 // from sink. Without a separate suffixValue to capture this, x would 215 // appear to escape because its "value" would be -1. (The copy 216 // operations are sometimes implicit in the source code; in this case, 217 // the value of x.left was copied into a field of an newly allocated T). 218 // 219 // Each node's level (value and suffixValue) is the maximum for 220 // all flow paths from (any) sink to that node. 221 222 // There's one of these for each Node, and the integer values 223 // rarely exceed even what can be stored in 4 bits, never mind 8. 224 type Level struct { 225 value, suffixValue int8 226 } 227 228 // There are loops in the escape graph, 229 // causing arbitrary recursion into deeper and deeper 230 // levels. Cut this off safely by making minLevel sticky: 231 // once you get that deep, you cannot go down any further 232 // but you also cannot go up any further. This is a 233 // conservative fix. Making minLevel smaller (more negative) 234 // would handle more complex chains of indirections followed 235 // by address-of operations, at the cost of repeating the 236 // traversal once for each additional allowed level when a 237 // loop is encountered. Using -2 suffices to pass all the 238 // tests we have written so far, which we assume matches the 239 // level of complexity we want the escape analysis code to 240 // handle. 241 const MinLevel = -2 242 243 func (l Level) int() int { 244 return int(l.value) 245 } 246 247 func levelFrom(i int) Level { 248 if i <= MinLevel { 249 return Level{value: MinLevel} 250 } 251 return Level{value: int8(i)} 252 } 253 254 func satInc8(x int8) int8 { 255 if x == 127 { 256 return 127 257 } 258 return x + 1 259 } 260 261 func min8(a, b int8) int8 { 262 if a < b { 263 return a 264 } 265 return b 266 } 267 268 func max8(a, b int8) int8 { 269 if a > b { 270 return a 271 } 272 return b 273 } 274 275 // inc returns the level l + 1, representing the effect of an indirect (*) operation. 276 func (l Level) inc() Level { 277 if l.value <= MinLevel { 278 return Level{value: MinLevel} 279 } 280 return Level{value: satInc8(l.value), suffixValue: satInc8(l.suffixValue)} 281 } 282 283 // dec returns the level l - 1, representing the effect of an address-of (&) operation. 284 func (l Level) dec() Level { 285 if l.value <= MinLevel { 286 return Level{value: MinLevel} 287 } 288 return Level{value: l.value - 1, suffixValue: l.suffixValue - 1} 289 } 290 291 // copy returns the level for a copy of a value with level l. 292 // The resulting suffixValue is at least zero, or larger if it was already larger. 293 func (l Level) copy() Level { 294 return Level{value: l.value, suffixValue: max8(l.suffixValue, 0)} 295 } 296 297 func (l1 Level) min(l2 Level) Level { 298 return Level{ 299 value: min8(l1.value, l2.value), 300 suffixValue: min8(l1.suffixValue, l2.suffixValue)} 301 } 302 303 // guaranteedDereference returns the number of dereferences 304 // applied to a pointer before addresses are taken/generated. 305 // This is the maximum level computed from path suffixes starting 306 // with copies where paths flow from destination to source. 307 func (l Level) guaranteedDereference() int { 308 return int(l.suffixValue) 309 } 310 311 // An EscStep documents one step in the path from memory 312 // that is heap allocated to the (alleged) reason for the 313 // heap allocation. 314 type EscStep struct { 315 src, dst *Node // the endpoints of this edge in the escape-to-heap chain. 316 where *Node // sometimes the endpoints don't match source locations; set 'where' to make that right 317 parent *EscStep // used in flood to record path 318 why string // explanation for this step in the escape-to-heap chain 319 busy bool // used in prevent to snip cycles. 320 } 321 322 type NodeEscState struct { 323 Curfn *Node 324 Flowsrc []EscStep // flow(this, src) 325 Retval Nodes // on OCALLxxx, list of dummy return values 326 Loopdepth int32 // -1: global, 0: return variables, 1:function top level, increased inside function for every loop or label to mark scopes 327 Level Level 328 Walkgen uint32 329 Maxextraloopdepth int32 330 } 331 332 func (e *EscState) nodeEscState(n *Node) *NodeEscState { 333 if nE, ok := n.Opt().(*NodeEscState); ok { 334 return nE 335 } 336 if n.Opt() != nil { 337 Fatalf("nodeEscState: opt in use (%T)", n.Opt()) 338 } 339 nE := &NodeEscState{ 340 Curfn: Curfn, 341 } 342 n.SetOpt(nE) 343 e.opts = append(e.opts, n) 344 return nE 345 } 346 347 func (e *EscState) track(n *Node) { 348 if Curfn == nil { 349 Fatalf("EscState.track: Curfn nil") 350 } 351 n.Esc = EscNone // until proven otherwise 352 nE := e.nodeEscState(n) 353 nE.Loopdepth = e.loopdepth 354 e.noesc = append(e.noesc, n) 355 } 356 357 // Escape constants are numbered in order of increasing "escapiness" 358 // to help make inferences be monotonic. With the exception of 359 // EscNever which is sticky, eX < eY means that eY is more exposed 360 // than eX, and hence replaces it in a conservative analysis. 361 const ( 362 EscUnknown = iota 363 EscNone // Does not escape to heap, result, or parameters. 364 EscReturn // Is returned or reachable from returned. 365 EscHeap // Reachable from the heap 366 EscNever // By construction will not escape. 367 EscBits = 3 368 EscMask = (1 << EscBits) - 1 369 EscContentEscapes = 1 << EscBits // value obtained by indirect of parameter escapes to heap 370 EscReturnBits = EscBits + 1 371 // Node.esc encoding = | escapeReturnEncoding:(width-4) | contentEscapes:1 | escEnum:3 372 ) 373 374 // escMax returns the maximum of an existing escape value 375 // (and its additional parameter flow flags) and a new escape type. 376 func escMax(e, etype uint16) uint16 { 377 if e&EscMask >= EscHeap { 378 // normalize 379 if e&^EscMask != 0 { 380 Fatalf("Escape information had unexpected return encoding bits (w/ EscHeap, EscNever), e&EscMask=%v", e&EscMask) 381 } 382 } 383 if e&EscMask > etype { 384 return e 385 } 386 if etype == EscNone || etype == EscReturn { 387 return (e &^ EscMask) | etype 388 } 389 return etype 390 } 391 392 // For each input parameter to a function, the escapeReturnEncoding describes 393 // how the parameter may leak to the function's outputs. This is currently the 394 // "level" of the leak where level is 0 or larger (negative level means stored into 395 // something whose address is returned -- but that implies stored into the heap, 396 // hence EscHeap, which means that the details are not currently relevant. ) 397 const ( 398 bitsPerOutputInTag = 3 // For each output, the number of bits for a tag 399 bitsMaskForTag = uint16(1<<bitsPerOutputInTag) - 1 // The bit mask to extract a single tag. 400 maxEncodedLevel = int(bitsMaskForTag - 1) // The largest level that can be stored in a tag. 401 ) 402 403 type EscState struct { 404 // Fake node that all 405 // - return values and output variables 406 // - parameters on imported functions not marked 'safe' 407 // - assignments to global variables 408 // flow to. 409 theSink Node 410 411 dsts []*Node // all dst nodes 412 loopdepth int32 // for detecting nested loop scopes 413 pdepth int // for debug printing in recursions. 414 dstcount int // diagnostic 415 edgecount int // diagnostic 416 noesc []*Node // list of possible non-escaping nodes, for printing 417 recursive bool // recursive function or group of mutually recursive functions. 418 opts []*Node // nodes with .Opt initialized 419 walkgen uint32 420 } 421 422 func newEscState(recursive bool) *EscState { 423 e := new(EscState) 424 e.theSink.Op = ONAME 425 e.theSink.Orig = &e.theSink 426 e.theSink.SetClass(PEXTERN) 427 e.theSink.Sym = lookup(".sink") 428 e.nodeEscState(&e.theSink).Loopdepth = -1 429 e.recursive = recursive 430 return e 431 } 432 433 func (e *EscState) stepWalk(dst, src *Node, why string, parent *EscStep) *EscStep { 434 // TODO: keep a cache of these, mark entry/exit in escwalk to avoid allocation 435 // Or perhaps never mind, since it is disabled unless printing is on. 436 // We may want to revisit this, since the EscStep nodes would make 437 // an excellent replacement for the poorly-separated graph-build/graph-flood 438 // stages. 439 if Debug['m'] == 0 { 440 return nil 441 } 442 return &EscStep{src: src, dst: dst, why: why, parent: parent} 443 } 444 445 func (e *EscState) stepAssign(step *EscStep, dst, src *Node, why string) *EscStep { 446 if Debug['m'] == 0 { 447 return nil 448 } 449 if step != nil { // Caller may have known better. 450 if step.why == "" { 451 step.why = why 452 } 453 if step.dst == nil { 454 step.dst = dst 455 } 456 if step.src == nil { 457 step.src = src 458 } 459 return step 460 } 461 return &EscStep{src: src, dst: dst, why: why} 462 } 463 464 func (e *EscState) stepAssignWhere(dst, src *Node, why string, where *Node) *EscStep { 465 if Debug['m'] == 0 { 466 return nil 467 } 468 return &EscStep{src: src, dst: dst, why: why, where: where} 469 } 470 471 // funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way. 472 func funcSym(fn *Node) *types.Sym { 473 if fn == nil || fn.Func.Nname == nil { 474 return nil 475 } 476 return fn.Func.Nname.Sym 477 } 478 479 // curfnSym returns n.Curfn.Nname.Sym if no nils are encountered along the way. 480 func (e *EscState) curfnSym(n *Node) *types.Sym { 481 nE := e.nodeEscState(n) 482 return funcSym(nE.Curfn) 483 } 484 485 func escAnalyze(all []*Node, recursive bool) { 486 e := newEscState(recursive) 487 488 for _, n := range all { 489 if n.Op == ODCLFUNC { 490 n.Esc = EscFuncPlanned 491 if Debug['m'] > 3 { 492 Dump("escAnalyze", n) 493 } 494 495 } 496 } 497 498 // flow-analyze functions 499 for _, n := range all { 500 if n.Op == ODCLFUNC { 501 e.escfunc(n) 502 } 503 } 504 505 // visit the upstream of each dst, mark address nodes with 506 // addrescapes, mark parameters unsafe 507 escapes := make([]uint16, len(e.dsts)) 508 for i, n := range e.dsts { 509 escapes[i] = n.Esc 510 } 511 for _, n := range e.dsts { 512 e.escflood(n) 513 } 514 for { 515 done := true 516 for i, n := range e.dsts { 517 if n.Esc != escapes[i] { 518 done = false 519 if Debug['m'] > 2 { 520 Warnl(n.Pos, "Reflooding %v %S", e.curfnSym(n), n) 521 } 522 escapes[i] = n.Esc 523 e.escflood(n) 524 } 525 } 526 if done { 527 break 528 } 529 } 530 531 // for all top level functions, tag the typenodes corresponding to the param nodes 532 for _, n := range all { 533 if n.Op == ODCLFUNC { 534 e.esctag(n) 535 } 536 } 537 538 if Debug['m'] != 0 { 539 for _, n := range e.noesc { 540 if n.Esc == EscNone { 541 Warnl(n.Pos, "%v %S does not escape", e.curfnSym(n), n) 542 } 543 } 544 } 545 546 for _, x := range e.opts { 547 x.SetOpt(nil) 548 } 549 } 550 551 func (e *EscState) escfunc(fn *Node) { 552 if fn.Esc != EscFuncPlanned { 553 Fatalf("repeat escfunc %v", fn.Func.Nname) 554 } 555 fn.Esc = EscFuncStarted 556 557 saveld := e.loopdepth 558 e.loopdepth = 1 559 savefn := Curfn 560 Curfn = fn 561 562 for _, ln := range Curfn.Func.Dcl { 563 if ln.Op != ONAME { 564 continue 565 } 566 lnE := e.nodeEscState(ln) 567 switch ln.Class() { 568 // out params are in a loopdepth between the sink and all local variables 569 case PPARAMOUT: 570 lnE.Loopdepth = 0 571 572 case PPARAM: 573 lnE.Loopdepth = 1 574 if ln.Type != nil && !types.Haspointers(ln.Type) { 575 break 576 } 577 if Curfn.Nbody.Len() == 0 && !Curfn.Noescape() { 578 ln.Esc = EscHeap 579 } else { 580 ln.Esc = EscNone // prime for escflood later 581 } 582 e.noesc = append(e.noesc, ln) 583 } 584 } 585 586 // in a mutually recursive group we lose track of the return values 587 if e.recursive { 588 for _, ln := range Curfn.Func.Dcl { 589 if ln.Op == ONAME && ln.Class() == PPARAMOUT { 590 e.escflows(&e.theSink, ln, e.stepAssign(nil, ln, ln, "returned from recursive function")) 591 } 592 } 593 } 594 595 e.escloopdepthlist(Curfn.Nbody) 596 e.esclist(Curfn.Nbody, Curfn) 597 Curfn = savefn 598 e.loopdepth = saveld 599 } 600 601 // Mark labels that have no backjumps to them as not increasing e.loopdepth. 602 // Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat 603 // and set it to one of the following two. Then in esc we'll clear it again. 604 var ( 605 looping Node 606 nonlooping Node 607 ) 608 609 func (e *EscState) escloopdepthlist(l Nodes) { 610 for _, n := range l.Slice() { 611 e.escloopdepth(n) 612 } 613 } 614 615 func (e *EscState) escloopdepth(n *Node) { 616 if n == nil { 617 return 618 } 619 620 e.escloopdepthlist(n.Ninit) 621 622 switch n.Op { 623 case OLABEL: 624 if n.Left == nil || n.Left.Sym == nil { 625 Fatalf("esc:label without label: %+v", n) 626 } 627 628 // Walk will complain about this label being already defined, but that's not until 629 // after escape analysis. in the future, maybe pull label & goto analysis out of walk and put before esc 630 n.Left.Sym.Label = asTypesNode(&nonlooping) 631 632 case OGOTO: 633 if n.Left == nil || n.Left.Sym == nil { 634 Fatalf("esc:goto without label: %+v", n) 635 } 636 637 // If we come past one that's uninitialized, this must be a (harmless) forward jump 638 // but if it's set to nonlooping the label must have preceded this goto. 639 if asNode(n.Left.Sym.Label) == &nonlooping { 640 n.Left.Sym.Label = asTypesNode(&looping) 641 } 642 } 643 644 e.escloopdepth(n.Left) 645 e.escloopdepth(n.Right) 646 e.escloopdepthlist(n.List) 647 e.escloopdepthlist(n.Nbody) 648 e.escloopdepthlist(n.Rlist) 649 } 650 651 func (e *EscState) esclist(l Nodes, parent *Node) { 652 for _, n := range l.Slice() { 653 e.esc(n, parent) 654 } 655 } 656 657 func (e *EscState) isSliceSelfAssign(dst, src *Node) bool { 658 // Detect the following special case. 659 // 660 // func (b *Buffer) Foo() { 661 // n, m := ... 662 // b.buf = b.buf[n:m] 663 // } 664 // 665 // This assignment is a no-op for escape analysis, 666 // it does not store any new pointers into b that were not already there. 667 // However, without this special case b will escape, because we assign to OIND/ODOTPTR. 668 // Here we assume that the statement will not contain calls, 669 // that is, that order will move any calls to init. 670 // Otherwise base ONAME value could change between the moments 671 // when we evaluate it for dst and for src. 672 673 // dst is ONAME dereference. 674 if dst.Op != OIND && dst.Op != ODOTPTR || dst.Left.Op != ONAME { 675 return false 676 } 677 // src is a slice operation. 678 switch src.Op { 679 case OSLICE, OSLICE3, OSLICESTR: 680 // OK. 681 case OSLICEARR, OSLICE3ARR: 682 // Since arrays are embedded into containing object, 683 // slice of non-pointer array will introduce a new pointer into b that was not already there 684 // (pointer to b itself). After such assignment, if b contents escape, 685 // b escapes as well. If we ignore such OSLICEARR, we will conclude 686 // that b does not escape when b contents do. 687 // 688 // Pointer to an array is OK since it's not stored inside b directly. 689 // For slicing an array (not pointer to array), there is an implicit OADDR. 690 // We check that to determine non-pointer array slicing. 691 if src.Left.Op == OADDR { 692 return false 693 } 694 default: 695 return false 696 } 697 // slice is applied to ONAME dereference. 698 if src.Left.Op != OIND && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME { 699 return false 700 } 701 // dst and src reference the same base ONAME. 702 return dst.Left == src.Left.Left 703 } 704 705 // isSelfAssign reports whether assignment from src to dst can 706 // be ignored by the escape analysis as it's effectively a self-assignment. 707 func (e *EscState) isSelfAssign(dst, src *Node) bool { 708 if e.isSliceSelfAssign(dst, src) { 709 return true 710 } 711 712 // Detect trivial assignments that assign back to the same object. 713 // 714 // It covers these cases: 715 // val.x = val.y 716 // val.x[i] = val.y[j] 717 // val.x1.x2 = val.x1.y2 718 // ... etc 719 // 720 // These assignments do not change assigned object lifetime. 721 722 if dst == nil || src == nil || dst.Op != src.Op { 723 return false 724 } 725 726 switch dst.Op { 727 case ODOT, ODOTPTR: 728 // Safe trailing accessors that are permitted to differ. 729 case OINDEX: 730 if e.mayAffectMemory(dst.Right) || e.mayAffectMemory(src.Right) { 731 return false 732 } 733 default: 734 return false 735 } 736 737 // The expression prefix must be both "safe" and identical. 738 return samesafeexpr(dst.Left, src.Left) 739 } 740 741 // mayAffectMemory reports whether n evaluation may affect program memory state. 742 // If expression can't affect it, then it can be safely ignored by the escape analysis. 743 func (e *EscState) mayAffectMemory(n *Node) bool { 744 // We may want to use "memory safe" black list instead of general 745 // "side-effect free", which can include all calls and other ops 746 // that can affect allocate or change global state. 747 // It's safer to start from a whitelist for now. 748 // 749 // We're ignoring things like division by zero, index out of range, 750 // and nil pointer dereference here. 751 switch n.Op { 752 case ONAME, OCLOSUREVAR, OLITERAL: 753 return false 754 755 // Left+Right group. 756 case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD: 757 return e.mayAffectMemory(n.Left) || e.mayAffectMemory(n.Right) 758 759 // Left group. 760 case ODOT, ODOTPTR, OIND, OCONVNOP, OCONV, OLEN, OCAP, 761 ONOT, OCOM, OPLUS, OMINUS, OALIGNOF, OOFFSETOF, OSIZEOF: 762 return e.mayAffectMemory(n.Left) 763 764 default: 765 return true 766 } 767 } 768 769 func (e *EscState) esc(n *Node, parent *Node) { 770 if n == nil { 771 return 772 } 773 774 lno := setlineno(n) 775 776 // ninit logically runs at a different loopdepth than the rest of the for loop. 777 e.esclist(n.Ninit, n) 778 779 if n.Op == OFOR || n.Op == OFORUNTIL || n.Op == ORANGE { 780 e.loopdepth++ 781 } 782 783 // type switch variables have no ODCL. 784 // process type switch as declaration. 785 // must happen before processing of switch body, 786 // so before recursion. 787 if n.Op == OSWITCH && n.Left != nil && n.Left.Op == OTYPESW { 788 for _, cas := range n.List.Slice() { // cases 789 // it.N().Rlist is the variable per case 790 if cas.Rlist.Len() != 0 { 791 e.nodeEscState(cas.Rlist.First()).Loopdepth = e.loopdepth 792 } 793 } 794 } 795 796 // Big stuff and non-constant-sized stuff escapes unconditionally. 797 // "Big" conditions that were scattered around in walk have been 798 // gathered here. 799 if n.Esc != EscHeap && n.Type != nil && 800 (n.Type.Width > maxStackVarSize || 801 (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize || 802 n.Op == OMAKESLICE && !isSmallMakeSlice(n)) { 803 // isSmallMakeSlice returns false for non-constant len/cap. 804 // If that's the case, print a more accurate escape reason. 805 var msgVerb, escapeMsg string 806 if n.Op == OMAKESLICE && (!Isconst(n.Left, CTINT) || !Isconst(n.Right, CTINT)) { 807 msgVerb, escapeMsg = "has ", "non-constant size" 808 } else { 809 msgVerb, escapeMsg = "is ", "too large for stack" 810 } 811 812 if Debug['m'] > 2 { 813 Warnl(n.Pos, "%v "+msgVerb+escapeMsg, n) 814 } 815 n.Esc = EscHeap 816 addrescapes(n) 817 e.escassignSinkWhy(n, n, escapeMsg) // TODO category: tooLarge 818 } 819 820 e.esc(n.Left, n) 821 822 if n.Op == ORANGE { 823 // ORANGE node's Right is evaluated before the loop 824 e.loopdepth-- 825 } 826 827 e.esc(n.Right, n) 828 829 if n.Op == ORANGE { 830 e.loopdepth++ 831 } 832 833 e.esclist(n.Nbody, n) 834 e.esclist(n.List, n) 835 e.esclist(n.Rlist, n) 836 837 if n.Op == OFOR || n.Op == OFORUNTIL || n.Op == ORANGE { 838 e.loopdepth-- 839 } 840 841 if Debug['m'] > 2 { 842 fmt.Printf("%v:[%d] %v esc: %v\n", linestr(lineno), e.loopdepth, funcSym(Curfn), n) 843 } 844 845 opSwitch: 846 switch n.Op { 847 // Record loop depth at declaration. 848 case ODCL: 849 if n.Left != nil { 850 e.nodeEscState(n.Left).Loopdepth = e.loopdepth 851 } 852 853 case OLABEL: 854 if asNode(n.Left.Sym.Label) == &nonlooping { 855 if Debug['m'] > 2 { 856 fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n) 857 } 858 } else if asNode(n.Left.Sym.Label) == &looping { 859 if Debug['m'] > 2 { 860 fmt.Printf("%v: %v looping label\n", linestr(lineno), n) 861 } 862 e.loopdepth++ 863 } 864 865 n.Left.Sym.Label = nil 866 867 case ORANGE: 868 if n.List.Len() >= 2 { 869 // Everything but fixed array is a dereference. 870 871 // If fixed array is really the address of fixed array, 872 // it is also a dereference, because it is implicitly 873 // dereferenced (see #12588) 874 if n.Type.IsArray() && 875 !(n.Right.Type.IsPtr() && types.Identical(n.Right.Type.Elem(), n.Type)) { 876 e.escassignWhyWhere(n.List.Second(), n.Right, "range", n) 877 } else { 878 e.escassignDereference(n.List.Second(), n.Right, e.stepAssignWhere(n.List.Second(), n.Right, "range-deref", n)) 879 } 880 } 881 882 case OSWITCH: 883 if n.Left != nil && n.Left.Op == OTYPESW { 884 for _, cas := range n.List.Slice() { 885 // cases 886 // n.Left.Right is the argument of the .(type), 887 // it.N().Rlist is the variable per case 888 if cas.Rlist.Len() != 0 { 889 e.escassignWhyWhere(cas.Rlist.First(), n.Left.Right, "switch case", n) 890 } 891 } 892 } 893 894 case OAS, OASOP: 895 // Filter out some no-op assignments for escape analysis. 896 if e.isSelfAssign(n.Left, n.Right) { 897 if Debug['m'] != 0 { 898 Warnl(n.Pos, "%v ignoring self-assignment in %S", e.curfnSym(n), n) 899 } 900 break 901 } 902 903 e.escassign(n.Left, n.Right, e.stepAssignWhere(nil, nil, "", n)) 904 905 case OAS2: // x,y = a,b 906 if n.List.Len() == n.Rlist.Len() { 907 rs := n.Rlist.Slice() 908 where := n 909 for i, n := range n.List.Slice() { 910 e.escassignWhyWhere(n, rs[i], "assign-pair", where) 911 } 912 } 913 914 case OAS2RECV: // v, ok = <-ch 915 e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-receive", n) 916 case OAS2MAPR: // v, ok = m[k] 917 e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-mapr", n) 918 case OAS2DOTTYPE: // v, ok = x.(type) 919 e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-dot-type", n) 920 921 case OSEND: // ch <- x 922 e.escassignSinkWhy(n, n.Right, "send") 923 924 case ODEFER: 925 if e.loopdepth == 1 { // top level 926 break 927 } 928 // arguments leak out of scope 929 // TODO: leak to a dummy node instead 930 // defer f(x) - f and x escape 931 e.escassignSinkWhy(n, n.Left.Left, "defer func") 932 e.escassignSinkWhy(n, n.Left.Right, "defer func ...") // ODDDARG for call 933 for _, arg := range n.Left.List.Slice() { 934 e.escassignSinkWhy(n, arg, "defer func arg") 935 } 936 937 case OPROC: 938 // go f(x) - f and x escape 939 e.escassignSinkWhy(n, n.Left.Left, "go func") 940 e.escassignSinkWhy(n, n.Left.Right, "go func ...") // ODDDARG for call 941 for _, arg := range n.Left.List.Slice() { 942 e.escassignSinkWhy(n, arg, "go func arg") 943 } 944 945 case OCALLMETH, OCALLFUNC, OCALLINTER: 946 e.esccall(n, parent) 947 948 // esccall already done on n.Rlist.First() 949 // tie its Retval to n.List 950 case OAS2FUNC: // x,y = f() 951 rs := e.nodeEscState(n.Rlist.First()).Retval.Slice() 952 where := n 953 for i, n := range n.List.Slice() { 954 if i >= len(rs) { 955 break 956 } 957 e.escassignWhyWhere(n, rs[i], "assign-pair-func-call", where) 958 } 959 if n.List.Len() != len(rs) { 960 Fatalf("esc oas2func") 961 } 962 963 case ORETURN: 964 retList := n.List 965 if retList.Len() == 1 && Curfn.Type.NumResults() > 1 { 966 // OAS2FUNC in disguise 967 // esccall already done on n.List.First() 968 // tie e.nodeEscState(n.List.First()).Retval to Curfn.Func.Dcl PPARAMOUT's 969 retList = e.nodeEscState(n.List.First()).Retval 970 } 971 972 i := 0 973 for _, lrn := range Curfn.Func.Dcl { 974 if i >= retList.Len() { 975 break 976 } 977 if lrn.Op != ONAME || lrn.Class() != PPARAMOUT { 978 continue 979 } 980 e.escassignWhyWhere(lrn, retList.Index(i), "return", n) 981 i++ 982 } 983 984 if i < retList.Len() { 985 Fatalf("esc return list") 986 } 987 988 // Argument could leak through recover. 989 case OPANIC: 990 e.escassignSinkWhy(n, n.Left, "panic") 991 992 case OAPPEND: 993 if !n.Isddd() { 994 for _, nn := range n.List.Slice()[1:] { 995 e.escassignSinkWhy(n, nn, "appended to slice") // lose track of assign to dereference 996 } 997 } else { 998 // append(slice1, slice2...) -- slice2 itself does not escape, but contents do. 999 slice2 := n.List.Second() 1000 e.escassignDereference(&e.theSink, slice2, e.stepAssignWhere(n, slice2, "appended slice...", n)) // lose track of assign of dereference 1001 if Debug['m'] > 3 { 1002 Warnl(n.Pos, "%v special treatment of append(slice1, slice2...) %S", e.curfnSym(n), n) 1003 } 1004 } 1005 e.escassignDereference(&e.theSink, n.List.First(), e.stepAssignWhere(n, n.List.First(), "appendee slice", n)) // The original elements are now leaked, too 1006 1007 case OCOPY: 1008 e.escassignDereference(&e.theSink, n.Right, e.stepAssignWhere(n, n.Right, "copied slice", n)) // lose track of assign of dereference 1009 1010 case OCONV, OCONVNOP: 1011 e.escassignWhyWhere(n, n.Left, "converted", n) 1012 1013 case OCONVIFACE: 1014 e.track(n) 1015 e.escassignWhyWhere(n, n.Left, "interface-converted", n) 1016 1017 case OARRAYLIT: 1018 // Link values to array 1019 for _, elt := range n.List.Slice() { 1020 if elt.Op == OKEY { 1021 elt = elt.Right 1022 } 1023 e.escassign(n, elt, e.stepAssignWhere(n, elt, "array literal element", n)) 1024 } 1025 1026 case OSLICELIT: 1027 // Slice is not leaked until proven otherwise 1028 e.track(n) 1029 // Link values to slice 1030 for _, elt := range n.List.Slice() { 1031 if elt.Op == OKEY { 1032 elt = elt.Right 1033 } 1034 e.escassign(n, elt, e.stepAssignWhere(n, elt, "slice literal element", n)) 1035 } 1036 1037 // Link values to struct. 1038 case OSTRUCTLIT: 1039 for _, elt := range n.List.Slice() { 1040 e.escassignWhyWhere(n, elt.Left, "struct literal element", n) 1041 } 1042 1043 case OPTRLIT: 1044 e.track(n) 1045 1046 // Link OSTRUCTLIT to OPTRLIT; if OPTRLIT escapes, OSTRUCTLIT elements do too. 1047 e.escassignWhyWhere(n, n.Left, "pointer literal [assign]", n) 1048 1049 case OCALLPART: 1050 e.track(n) 1051 1052 // Contents make it to memory, lose track. 1053 e.escassignSinkWhy(n, n.Left, "call part") 1054 1055 case OMAPLIT: 1056 e.track(n) 1057 // Keys and values make it to memory, lose track. 1058 for _, elt := range n.List.Slice() { 1059 e.escassignSinkWhy(n, elt.Left, "map literal key") 1060 e.escassignSinkWhy(n, elt.Right, "map literal value") 1061 } 1062 1063 case OCLOSURE: 1064 // Link addresses of captured variables to closure. 1065 for _, v := range n.Func.Closure.Func.Cvars.Slice() { 1066 if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs 1067 continue 1068 } 1069 a := v.Name.Defn 1070 if !v.Name.Byval() { 1071 a = nod(OADDR, a, nil) 1072 a.Pos = v.Pos 1073 e.nodeEscState(a).Loopdepth = e.loopdepth 1074 a = typecheck(a, Erv) 1075 } 1076 1077 e.escassignWhyWhere(n, a, "captured by a closure", n) 1078 } 1079 fallthrough 1080 1081 case OMAKECHAN, 1082 OMAKEMAP, 1083 OMAKESLICE, 1084 ONEW, 1085 OARRAYRUNESTR, 1086 OARRAYBYTESTR, 1087 OSTRARRAYRUNE, 1088 OSTRARRAYBYTE, 1089 ORUNESTR: 1090 e.track(n) 1091 1092 case OADDSTR: 1093 e.track(n) 1094 // Arguments of OADDSTR do not escape. 1095 1096 case OADDR: 1097 // current loop depth is an upper bound on actual loop depth 1098 // of addressed value. 1099 e.track(n) 1100 1101 // for &x, use loop depth of x if known. 1102 // it should always be known, but if not, be conservative 1103 // and keep the current loop depth. 1104 if n.Left.Op == ONAME { 1105 switch n.Left.Class() { 1106 // PPARAM is loop depth 1 always. 1107 // PPARAMOUT is loop depth 0 for writes 1108 // but considered loop depth 1 for address-of, 1109 // so that writing the address of one result 1110 // to another (or the same) result makes the 1111 // first result move to the heap. 1112 case PPARAM, PPARAMOUT: 1113 nE := e.nodeEscState(n) 1114 nE.Loopdepth = 1 1115 break opSwitch 1116 } 1117 } 1118 nE := e.nodeEscState(n) 1119 leftE := e.nodeEscState(n.Left) 1120 if leftE.Loopdepth != 0 { 1121 nE.Loopdepth = leftE.Loopdepth 1122 } 1123 1124 case ODOT, 1125 ODOTPTR, 1126 OINDEX: 1127 // Propagate the loopdepth of t to t.field. 1128 if n.Left.Op != OLITERAL { // OLITERAL node doesn't have esc state 1129 e.nodeEscState(n).Loopdepth = e.nodeEscState(n.Left).Loopdepth 1130 } 1131 } 1132 1133 lineno = lno 1134 } 1135 1136 // escassignWhyWhere bundles a common case of 1137 // escassign(e, dst, src, e.stepAssignWhere(dst, src, reason, where)) 1138 func (e *EscState) escassignWhyWhere(dst, src *Node, reason string, where *Node) { 1139 var step *EscStep 1140 if Debug['m'] != 0 { 1141 step = e.stepAssignWhere(dst, src, reason, where) 1142 } 1143 e.escassign(dst, src, step) 1144 } 1145 1146 // escassignSinkWhy bundles a common case of 1147 // escassign(e, &e.theSink, src, e.stepAssign(nil, dst, src, reason)) 1148 func (e *EscState) escassignSinkWhy(dst, src *Node, reason string) { 1149 var step *EscStep 1150 if Debug['m'] != 0 { 1151 step = e.stepAssign(nil, dst, src, reason) 1152 } 1153 e.escassign(&e.theSink, src, step) 1154 } 1155 1156 // escassignSinkWhyWhere is escassignSinkWhy but includes a call site 1157 // for accurate location reporting. 1158 func (e *EscState) escassignSinkWhyWhere(dst, src *Node, reason string, call *Node) { 1159 var step *EscStep 1160 if Debug['m'] != 0 { 1161 step = e.stepAssignWhere(dst, src, reason, call) 1162 } 1163 e.escassign(&e.theSink, src, step) 1164 } 1165 1166 // Assert that expr somehow gets assigned to dst, if non nil. for 1167 // dst==nil, any name node expr still must be marked as being 1168 // evaluated in curfn. For expr==nil, dst must still be examined for 1169 // evaluations inside it (e.g *f(x) = y) 1170 func (e *EscState) escassign(dst, src *Node, step *EscStep) { 1171 if dst.isBlank() || dst == nil || src == nil || src.Op == ONONAME || src.Op == OXXX { 1172 return 1173 } 1174 1175 if Debug['m'] > 2 { 1176 fmt.Printf("%v:[%d] %v escassign: %S(%0j)[%v] = %S(%0j)[%v]\n", 1177 linestr(lineno), e.loopdepth, funcSym(Curfn), 1178 dst, dst, dst.Op, 1179 src, src, src.Op) 1180 } 1181 1182 setlineno(dst) 1183 1184 originalDst := dst 1185 dstwhy := "assigned" 1186 1187 // Analyze lhs of assignment. 1188 // Replace dst with &e.theSink if we can't track it. 1189 switch dst.Op { 1190 default: 1191 Dump("dst", dst) 1192 Fatalf("escassign: unexpected dst") 1193 1194 case OARRAYLIT, 1195 OSLICELIT, 1196 OCLOSURE, 1197 OCONV, 1198 OCONVIFACE, 1199 OCONVNOP, 1200 OMAPLIT, 1201 OSTRUCTLIT, 1202 OPTRLIT, 1203 ODDDARG, 1204 OCALLPART: 1205 1206 case ONAME: 1207 if dst.Class() == PEXTERN { 1208 dstwhy = "assigned to top level variable" 1209 dst = &e.theSink 1210 } 1211 1212 case ODOT: // treat "dst.x = src" as "dst = src" 1213 e.escassign(dst.Left, src, e.stepAssign(step, originalDst, src, "dot-equals")) 1214 return 1215 1216 case OINDEX: 1217 if dst.Left.Type.IsArray() { 1218 e.escassign(dst.Left, src, e.stepAssign(step, originalDst, src, "array-element-equals")) 1219 return 1220 } 1221 1222 dstwhy = "slice-element-equals" 1223 dst = &e.theSink // lose track of dereference 1224 1225 case OIND: 1226 dstwhy = "star-equals" 1227 dst = &e.theSink // lose track of dereference 1228 1229 case ODOTPTR: 1230 dstwhy = "star-dot-equals" 1231 dst = &e.theSink // lose track of dereference 1232 1233 // lose track of key and value 1234 case OINDEXMAP: 1235 e.escassign(&e.theSink, dst.Right, e.stepAssign(nil, originalDst, src, "key of map put")) 1236 dstwhy = "value of map put" 1237 dst = &e.theSink 1238 } 1239 1240 lno := setlineno(src) 1241 e.pdepth++ 1242 1243 switch src.Op { 1244 case OADDR, // dst = &x 1245 OIND, // dst = *x 1246 ODOTPTR, // dst = (*x).f 1247 ONAME, 1248 ODDDARG, 1249 OPTRLIT, 1250 OARRAYLIT, 1251 OSLICELIT, 1252 OMAPLIT, 1253 OSTRUCTLIT, 1254 OMAKECHAN, 1255 OMAKEMAP, 1256 OMAKESLICE, 1257 OARRAYRUNESTR, 1258 OARRAYBYTESTR, 1259 OSTRARRAYRUNE, 1260 OSTRARRAYBYTE, 1261 OADDSTR, 1262 ONEW, 1263 OCALLPART, 1264 ORUNESTR, 1265 OCONVIFACE: 1266 e.escflows(dst, src, e.stepAssign(step, originalDst, src, dstwhy)) 1267 1268 case OCLOSURE: 1269 // OCLOSURE is lowered to OPTRLIT, 1270 // insert OADDR to account for the additional indirection. 1271 a := nod(OADDR, src, nil) 1272 a.Pos = src.Pos 1273 e.nodeEscState(a).Loopdepth = e.nodeEscState(src).Loopdepth 1274 a.Type = types.NewPtr(src.Type) 1275 e.escflows(dst, a, e.stepAssign(nil, originalDst, src, dstwhy)) 1276 1277 // Flowing multiple returns to a single dst happens when 1278 // analyzing "go f(g())": here g() flows to sink (issue 4529). 1279 case OCALLMETH, OCALLFUNC, OCALLINTER: 1280 for _, n := range e.nodeEscState(src).Retval.Slice() { 1281 e.escflows(dst, n, e.stepAssign(nil, originalDst, n, dstwhy)) 1282 } 1283 1284 // A non-pointer escaping from a struct does not concern us. 1285 case ODOT: 1286 if src.Type != nil && !types.Haspointers(src.Type) { 1287 break 1288 } 1289 fallthrough 1290 1291 // Conversions, field access, slice all preserve the input value. 1292 case OCONV, 1293 OCONVNOP, 1294 ODOTMETH, 1295 // treat recv.meth as a value with recv in it, only happens in ODEFER and OPROC 1296 // iface.method already leaks iface in esccall, no need to put in extra ODOTINTER edge here 1297 OSLICE, 1298 OSLICE3, 1299 OSLICEARR, 1300 OSLICE3ARR, 1301 OSLICESTR: 1302 // Conversions, field access, slice all preserve the input value. 1303 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1304 1305 case ODOTTYPE, 1306 ODOTTYPE2: 1307 if src.Type != nil && !types.Haspointers(src.Type) { 1308 break 1309 } 1310 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1311 1312 case OAPPEND: 1313 // Append returns first argument. 1314 // Subsequent arguments are already leaked because they are operands to append. 1315 e.escassign(dst, src.List.First(), e.stepAssign(step, dst, src.List.First(), dstwhy)) 1316 1317 case OINDEX: 1318 // Index of array preserves input value. 1319 if src.Left.Type.IsArray() { 1320 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1321 } else { 1322 e.escflows(dst, src, e.stepAssign(step, originalDst, src, dstwhy)) 1323 } 1324 1325 // Might be pointer arithmetic, in which case 1326 // the operands flow into the result. 1327 // TODO(rsc): Decide what the story is here. This is unsettling. 1328 case OADD, 1329 OSUB, 1330 OOR, 1331 OXOR, 1332 OMUL, 1333 ODIV, 1334 OMOD, 1335 OLSH, 1336 ORSH, 1337 OAND, 1338 OANDNOT, 1339 OPLUS, 1340 OMINUS, 1341 OCOM: 1342 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1343 1344 e.escassign(dst, src.Right, e.stepAssign(step, originalDst, src, dstwhy)) 1345 } 1346 1347 e.pdepth-- 1348 lineno = lno 1349 } 1350 1351 // Common case for escapes is 16 bits 000000000xxxEEEE 1352 // where commonest cases for xxx encoding in-to-out pointer 1353 // flow are 000, 001, 010, 011 and EEEE is computed Esc bits. 1354 // Note width of xxx depends on value of constant 1355 // bitsPerOutputInTag -- expect 2 or 3, so in practice the 1356 // tag cache array is 64 or 128 long. Some entries will 1357 // never be populated. 1358 var tags [1 << (bitsPerOutputInTag + EscReturnBits)]string 1359 1360 // mktag returns the string representation for an escape analysis tag. 1361 func mktag(mask int) string { 1362 switch mask & EscMask { 1363 case EscNone, EscReturn: 1364 default: 1365 Fatalf("escape mktag") 1366 } 1367 1368 if mask < len(tags) && tags[mask] != "" { 1369 return tags[mask] 1370 } 1371 1372 s := fmt.Sprintf("esc:0x%x", mask) 1373 if mask < len(tags) { 1374 tags[mask] = s 1375 } 1376 return s 1377 } 1378 1379 // parsetag decodes an escape analysis tag and returns the esc value. 1380 func parsetag(note string) uint16 { 1381 if !strings.HasPrefix(note, "esc:") { 1382 return EscUnknown 1383 } 1384 n, _ := strconv.ParseInt(note[4:], 0, 0) 1385 em := uint16(n) 1386 if em == 0 { 1387 return EscNone 1388 } 1389 return em 1390 } 1391 1392 // describeEscape returns a string describing the escape tag. 1393 // The result is either one of {EscUnknown, EscNone, EscHeap} which all have no further annotation 1394 // or a description of parameter flow, which takes the form of an optional "contentToHeap" 1395 // indicating that the content of this parameter is leaked to the heap, followed by a sequence 1396 // of level encodings separated by spaces, one for each parameter, where _ means no flow, 1397 // = means direct flow, and N asterisks (*) encodes content (obtained by indirection) flow. 1398 // e.g., "contentToHeap _ =" means that a parameter's content (one or more dereferences) 1399 // escapes to the heap, the parameter does not leak to the first output, but does leak directly 1400 // to the second output (and if there are more than two outputs, there is no flow to those.) 1401 func describeEscape(em uint16) string { 1402 var s string 1403 switch em & EscMask { 1404 case EscUnknown: 1405 s = "EscUnknown" 1406 case EscNone: 1407 s = "EscNone" 1408 case EscHeap: 1409 s = "EscHeap" 1410 case EscReturn: 1411 s = "EscReturn" 1412 } 1413 if em&EscContentEscapes != 0 { 1414 if s != "" { 1415 s += " " 1416 } 1417 s += "contentToHeap" 1418 } 1419 for em >>= EscReturnBits; em != 0; em >>= bitsPerOutputInTag { 1420 // See encoding description above 1421 if s != "" { 1422 s += " " 1423 } 1424 switch embits := em & bitsMaskForTag; embits { 1425 case 0: 1426 s += "_" 1427 case 1: 1428 s += "=" 1429 default: 1430 for i := uint16(0); i < embits-1; i++ { 1431 s += "*" 1432 } 1433 } 1434 1435 } 1436 return s 1437 } 1438 1439 // escassignfromtag models the input-to-output assignment flow of one of a function 1440 // calls arguments, where the flow is encoded in "note". 1441 func (e *EscState) escassignfromtag(note string, dsts Nodes, src, call *Node) uint16 { 1442 em := parsetag(note) 1443 if src.Op == OLITERAL { 1444 return em 1445 } 1446 1447 if Debug['m'] > 3 { 1448 fmt.Printf("%v::assignfromtag:: src=%S, em=%s\n", 1449 linestr(lineno), src, describeEscape(em)) 1450 } 1451 1452 if em == EscUnknown { 1453 e.escassignSinkWhyWhere(src, src, "passed to call[argument escapes]", call) 1454 return em 1455 } 1456 1457 if em == EscNone { 1458 return em 1459 } 1460 1461 // If content inside parameter (reached via indirection) 1462 // escapes to heap, mark as such. 1463 if em&EscContentEscapes != 0 { 1464 e.escassign(&e.theSink, e.addDereference(src), e.stepAssignWhere(src, src, "passed to call[argument content escapes]", call)) 1465 } 1466 1467 em0 := em 1468 dstsi := 0 1469 for em >>= EscReturnBits; em != 0 && dstsi < dsts.Len(); em >>= bitsPerOutputInTag { 1470 // Prefer the lowest-level path to the reference (for escape purposes). 1471 // Two-bit encoding (for example. 1, 3, and 4 bits are other options) 1472 // 01 = 0-level 1473 // 10 = 1-level, (content escapes), 1474 // 11 = 2-level, (content of content escapes), 1475 embits := em & bitsMaskForTag 1476 if embits > 0 { 1477 n := src 1478 for i := uint16(0); i < embits-1; i++ { 1479 n = e.addDereference(n) // encode level>0 as indirections 1480 } 1481 e.escassign(dsts.Index(dstsi), n, e.stepAssignWhere(dsts.Index(dstsi), src, "passed-to-and-returned-from-call", call)) 1482 } 1483 dstsi++ 1484 } 1485 // If there are too many outputs to fit in the tag, 1486 // that is handled at the encoding end as EscHeap, 1487 // so there is no need to check here. 1488 1489 if em != 0 && dstsi >= dsts.Len() { 1490 Fatalf("corrupt esc tag %q or messed up escretval list\n", note) 1491 } 1492 return em0 1493 } 1494 1495 func (e *EscState) escassignDereference(dst *Node, src *Node, step *EscStep) { 1496 if src.Op == OLITERAL { 1497 return 1498 } 1499 e.escassign(dst, e.addDereference(src), step) 1500 } 1501 1502 // addDereference constructs a suitable OIND note applied to src. 1503 // Because this is for purposes of escape accounting, not execution, 1504 // some semantically dubious node combinations are (currently) possible. 1505 func (e *EscState) addDereference(n *Node) *Node { 1506 ind := nod(OIND, n, nil) 1507 e.nodeEscState(ind).Loopdepth = e.nodeEscState(n).Loopdepth 1508 ind.Pos = n.Pos 1509 t := n.Type 1510 if t.IsPtr() || t.IsSlice() { 1511 // This should model our own sloppy use of OIND to encode 1512 // decreasing levels of indirection; i.e., "indirecting" a slice 1513 // yields the type of an element. 1514 t = t.Elem() 1515 } else if t.IsString() { 1516 t = types.Types[TUINT8] 1517 } 1518 ind.Type = t 1519 return ind 1520 } 1521 1522 // escNoteOutputParamFlow encodes maxEncodedLevel/.../1/0-level flow to the vargen'th parameter. 1523 // Levels greater than maxEncodedLevel are replaced with maxEncodedLevel. 1524 // If the encoding cannot describe the modified input level and output number, then EscHeap is returned. 1525 func escNoteOutputParamFlow(e uint16, vargen int32, level Level) uint16 { 1526 // Flow+level is encoded in two bits. 1527 // 00 = not flow, xx = level+1 for 0 <= level <= maxEncodedLevel 1528 // 16 bits for Esc allows 6x2bits or 4x3bits or 3x4bits if additional information would be useful. 1529 if level.int() <= 0 && level.guaranteedDereference() > 0 { 1530 return escMax(e|EscContentEscapes, EscNone) // At least one deref, thus only content. 1531 } 1532 if level.int() < 0 { 1533 return EscHeap 1534 } 1535 if level.int() > maxEncodedLevel { 1536 // Cannot encode larger values than maxEncodedLevel. 1537 level = levelFrom(maxEncodedLevel) 1538 } 1539 encoded := uint16(level.int() + 1) 1540 1541 shift := uint(bitsPerOutputInTag*(vargen-1) + EscReturnBits) 1542 old := (e >> shift) & bitsMaskForTag 1543 if old == 0 || encoded != 0 && encoded < old { 1544 old = encoded 1545 } 1546 1547 encodedFlow := old << shift 1548 if (encodedFlow>>shift)&bitsMaskForTag != old { 1549 // Encoding failure defaults to heap. 1550 return EscHeap 1551 } 1552 1553 return (e &^ (bitsMaskForTag << shift)) | encodedFlow 1554 } 1555 1556 func (e *EscState) initEscRetval(call *Node, fntype *types.Type) { 1557 cE := e.nodeEscState(call) 1558 cE.Retval.Set(nil) // Suspect this is not nil for indirect calls. 1559 for i, f := range fntype.Results().Fields().Slice() { 1560 buf := fmt.Sprintf(".out%d", i) 1561 ret := newname(lookup(buf)) 1562 ret.SetAddable(false) // TODO(mdempsky): Seems suspicious. 1563 ret.Type = f.Type 1564 ret.SetClass(PAUTO) 1565 ret.Name.Curfn = Curfn 1566 e.nodeEscState(ret).Loopdepth = e.loopdepth 1567 ret.Name.SetUsed(true) 1568 ret.Pos = call.Pos 1569 cE.Retval.Append(ret) 1570 } 1571 } 1572 1573 // This is a bit messier than fortunate, pulled out of esc's big 1574 // switch for clarity. We either have the paramnodes, which may be 1575 // connected to other things through flows or we have the parameter type 1576 // nodes, which may be marked "noescape". Navigating the ast is slightly 1577 // different for methods vs plain functions and for imported vs 1578 // this-package 1579 func (e *EscState) esccall(call *Node, parent *Node) { 1580 var fntype *types.Type 1581 var indirect bool 1582 var fn *Node 1583 switch call.Op { 1584 default: 1585 Fatalf("esccall") 1586 1587 case OCALLFUNC: 1588 fn = call.Left 1589 fntype = fn.Type 1590 indirect = fn.Op != ONAME || fn.Class() != PFUNC 1591 1592 case OCALLMETH: 1593 fn = asNode(call.Left.Sym.Def) 1594 if fn != nil { 1595 fntype = fn.Type 1596 } else { 1597 fntype = call.Left.Type 1598 } 1599 1600 case OCALLINTER: 1601 fntype = call.Left.Type 1602 indirect = true 1603 } 1604 1605 argList := call.List 1606 if argList.Len() == 1 { 1607 arg := argList.First() 1608 if arg.Type.IsFuncArgStruct() { // f(g()) 1609 argList = e.nodeEscState(arg).Retval 1610 } 1611 } 1612 1613 args := argList.Slice() 1614 1615 if indirect { 1616 // We know nothing! 1617 // Leak all the parameters 1618 for _, arg := range args { 1619 e.escassignSinkWhy(call, arg, "parameter to indirect call") 1620 if Debug['m'] > 3 { 1621 fmt.Printf("%v::esccall:: indirect call <- %S, untracked\n", linestr(lineno), arg) 1622 } 1623 } 1624 // Set up bogus outputs 1625 e.initEscRetval(call, fntype) 1626 // If there is a receiver, it also leaks to heap. 1627 if call.Op != OCALLFUNC { 1628 rf := fntype.Recv() 1629 r := call.Left.Left 1630 if types.Haspointers(rf.Type) { 1631 e.escassignSinkWhy(call, r, "receiver in indirect call") 1632 } 1633 } else { // indirect and OCALLFUNC = could be captured variables, too. (#14409) 1634 rets := e.nodeEscState(call).Retval.Slice() 1635 for _, ret := range rets { 1636 e.escassignDereference(ret, fn, e.stepAssignWhere(ret, fn, "captured by called closure", call)) 1637 } 1638 } 1639 return 1640 } 1641 1642 cE := e.nodeEscState(call) 1643 if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && 1644 fn.Name.Defn != nil && fn.Name.Defn.Nbody.Len() != 0 && fn.Name.Param.Ntype != nil && fn.Name.Defn.Esc < EscFuncTagged { 1645 // function in same mutually recursive group. Incorporate into flow graph. 1646 if Debug['m'] > 3 { 1647 fmt.Printf("%v::esccall:: %S in recursive group\n", linestr(lineno), call) 1648 } 1649 1650 if fn.Name.Defn.Esc == EscFuncUnknown || cE.Retval.Len() != 0 { 1651 Fatalf("graph inconsistency") 1652 } 1653 1654 sawRcvr := false 1655 for _, n := range fn.Name.Defn.Func.Dcl { 1656 switch n.Class() { 1657 case PPARAM: 1658 if call.Op != OCALLFUNC && !sawRcvr { 1659 e.escassignWhyWhere(n, call.Left.Left, "call receiver", call) 1660 sawRcvr = true 1661 continue 1662 } 1663 if len(args) == 0 { 1664 continue 1665 } 1666 arg := args[0] 1667 if n.Isddd() && !call.Isddd() { 1668 // Introduce ODDDARG node to represent ... allocation. 1669 arg = nod(ODDDARG, nil, nil) 1670 arr := types.NewArray(n.Type.Elem(), int64(len(args))) 1671 arg.Type = types.NewPtr(arr) // make pointer so it will be tracked 1672 arg.Pos = call.Pos 1673 e.track(arg) 1674 call.Right = arg 1675 } 1676 e.escassignWhyWhere(n, arg, "arg to recursive call", call) // TODO this message needs help. 1677 if arg == args[0] { 1678 args = args[1:] 1679 continue 1680 } 1681 // "..." arguments are untracked 1682 for _, a := range args { 1683 if Debug['m'] > 3 { 1684 fmt.Printf("%v::esccall:: ... <- %S, untracked\n", linestr(lineno), a) 1685 } 1686 e.escassignSinkWhyWhere(arg, a, "... arg to recursive call", call) 1687 } 1688 // No more PPARAM processing, but keep 1689 // going for PPARAMOUT. 1690 args = nil 1691 1692 case PPARAMOUT: 1693 cE.Retval.Append(n) 1694 } 1695 } 1696 1697 return 1698 } 1699 1700 // Imported or completely analyzed function. Use the escape tags. 1701 if cE.Retval.Len() != 0 { 1702 Fatalf("esc already decorated call %+v\n", call) 1703 } 1704 1705 if Debug['m'] > 3 { 1706 fmt.Printf("%v::esccall:: %S not recursive\n", linestr(lineno), call) 1707 } 1708 1709 // set up out list on this call node with dummy auto ONAMES in the current (calling) function. 1710 e.initEscRetval(call, fntype) 1711 1712 // Receiver. 1713 if call.Op != OCALLFUNC { 1714 rf := fntype.Recv() 1715 r := call.Left.Left 1716 if types.Haspointers(rf.Type) { 1717 e.escassignfromtag(rf.Note, cE.Retval, r, call) 1718 } 1719 } 1720 1721 for i, param := range fntype.Params().FieldSlice() { 1722 note := param.Note 1723 var arg *Node 1724 if param.Isddd() && !call.Isddd() { 1725 rest := args[i:] 1726 if len(rest) == 0 { 1727 break 1728 } 1729 1730 // Introduce ODDDARG node to represent ... allocation. 1731 arg = nod(ODDDARG, nil, nil) 1732 arg.Pos = call.Pos 1733 arr := types.NewArray(param.Type.Elem(), int64(len(rest))) 1734 arg.Type = types.NewPtr(arr) // make pointer so it will be tracked 1735 e.track(arg) 1736 call.Right = arg 1737 1738 // Store arguments into slice for ... arg. 1739 for _, a := range rest { 1740 if Debug['m'] > 3 { 1741 fmt.Printf("%v::esccall:: ... <- %S\n", linestr(lineno), a) 1742 } 1743 if note == uintptrEscapesTag { 1744 e.escassignSinkWhyWhere(arg, a, "arg to uintptrescapes ...", call) 1745 } else { 1746 e.escassignWhyWhere(arg, a, "arg to ...", call) 1747 } 1748 } 1749 } else { 1750 arg = args[i] 1751 if note == uintptrEscapesTag { 1752 e.escassignSinkWhy(arg, arg, "escaping uintptr") 1753 } 1754 } 1755 1756 if types.Haspointers(param.Type) && e.escassignfromtag(note, cE.Retval, arg, call)&EscMask == EscNone && parent.Op != ODEFER && parent.Op != OPROC { 1757 a := arg 1758 for a.Op == OCONVNOP { 1759 a = a.Left 1760 } 1761 switch a.Op { 1762 // The callee has already been analyzed, so its arguments have esc tags. 1763 // The argument is marked as not escaping at all. 1764 // Record that fact so that any temporary used for 1765 // synthesizing this expression can be reclaimed when 1766 // the function returns. 1767 // This 'noescape' is even stronger than the usual esc == EscNone. 1768 // arg.Esc == EscNone means that arg does not escape the current function. 1769 // arg.SetNoescape(true) here means that arg does not escape this statement 1770 // in the current function. 1771 case OCALLPART, OCLOSURE, ODDDARG, OARRAYLIT, OSLICELIT, OPTRLIT, OSTRUCTLIT: 1772 a.SetNoescape(true) 1773 } 1774 } 1775 } 1776 } 1777 1778 // escflows records the link src->dst in dst, throwing out some quick wins, 1779 // and also ensuring that dst is noted as a flow destination. 1780 func (e *EscState) escflows(dst, src *Node, why *EscStep) { 1781 if dst == nil || src == nil || dst == src { 1782 return 1783 } 1784 1785 // Don't bother building a graph for scalars. 1786 if src.Type != nil && !types.Haspointers(src.Type) && !isReflectHeaderDataField(src) { 1787 if Debug['m'] > 3 { 1788 fmt.Printf("%v::NOT flows:: %S <- %S\n", linestr(lineno), dst, src) 1789 } 1790 return 1791 } 1792 1793 if Debug['m'] > 3 { 1794 fmt.Printf("%v::flows:: %S <- %S\n", linestr(lineno), dst, src) 1795 } 1796 1797 dstE := e.nodeEscState(dst) 1798 if len(dstE.Flowsrc) == 0 { 1799 e.dsts = append(e.dsts, dst) 1800 e.dstcount++ 1801 } 1802 1803 e.edgecount++ 1804 1805 if why == nil { 1806 dstE.Flowsrc = append(dstE.Flowsrc, EscStep{src: src}) 1807 } else { 1808 starwhy := *why 1809 starwhy.src = src // TODO: need to reconcile this w/ needs of explanations. 1810 dstE.Flowsrc = append(dstE.Flowsrc, starwhy) 1811 } 1812 } 1813 1814 // Whenever we hit a reference node, the level goes up by one, and whenever 1815 // we hit an OADDR, the level goes down by one. as long as we're on a level > 0 1816 // finding an OADDR just means we're following the upstream of a dereference, 1817 // so this address doesn't leak (yet). 1818 // If level == 0, it means the /value/ of this node can reach the root of this flood. 1819 // so if this node is an OADDR, its argument should be marked as escaping iff 1820 // its currfn/e.loopdepth are different from the flood's root. 1821 // Once an object has been moved to the heap, all of its upstream should be considered 1822 // escaping to the global scope. 1823 func (e *EscState) escflood(dst *Node) { 1824 switch dst.Op { 1825 case ONAME, OCLOSURE: 1826 default: 1827 return 1828 } 1829 1830 dstE := e.nodeEscState(dst) 1831 if Debug['m'] > 2 { 1832 fmt.Printf("\nescflood:%d: dst %S scope:%v[%d]\n", e.walkgen, dst, e.curfnSym(dst), dstE.Loopdepth) 1833 } 1834 1835 for i := range dstE.Flowsrc { 1836 e.walkgen++ 1837 s := &dstE.Flowsrc[i] 1838 s.parent = nil 1839 e.escwalk(levelFrom(0), dst, s.src, s) 1840 } 1841 } 1842 1843 // funcOutputAndInput reports whether dst and src correspond to output and input parameters of the same function. 1844 func funcOutputAndInput(dst, src *Node) bool { 1845 // Note if dst is marked as escaping, then "returned" is too weak. 1846 return dst.Op == ONAME && dst.Class() == PPARAMOUT && 1847 src.Op == ONAME && src.Class() == PPARAM && src.Name.Curfn == dst.Name.Curfn 1848 } 1849 1850 func (es *EscStep) describe(src *Node) { 1851 if Debug['m'] < 2 { 1852 return 1853 } 1854 step0 := es 1855 for step := step0; step != nil && !step.busy; step = step.parent { 1856 // TODO: We get cycles. Trigger is i = &i (where var i interface{}) 1857 step.busy = true 1858 // The trail is a little odd because of how the 1859 // graph is constructed. The link to the current 1860 // Node is parent.src unless parent is nil in which 1861 // case it is step.dst. 1862 nextDest := step.parent 1863 dst := step.dst 1864 where := step.where 1865 if nextDest != nil { 1866 dst = nextDest.src 1867 } 1868 if where == nil { 1869 where = dst 1870 } 1871 Warnl(src.Pos, "\tfrom %v (%s) at %s", dst, step.why, where.Line()) 1872 } 1873 for step := step0; step != nil && step.busy; step = step.parent { 1874 step.busy = false 1875 } 1876 } 1877 1878 const NOTALOOPDEPTH = -1 1879 1880 func (e *EscState) escwalk(level Level, dst *Node, src *Node, step *EscStep) { 1881 e.escwalkBody(level, dst, src, step, NOTALOOPDEPTH) 1882 } 1883 1884 func (e *EscState) escwalkBody(level Level, dst *Node, src *Node, step *EscStep, extraloopdepth int32) { 1885 if src.Op == OLITERAL { 1886 return 1887 } 1888 srcE := e.nodeEscState(src) 1889 if srcE.Walkgen == e.walkgen { 1890 // Esclevels are vectors, do not compare as integers, 1891 // and must use "min" of old and new to guarantee 1892 // convergence. 1893 level = level.min(srcE.Level) 1894 if level == srcE.Level { 1895 // Have we been here already with an extraloopdepth, 1896 // or is the extraloopdepth provided no improvement on 1897 // what's already been seen? 1898 if srcE.Maxextraloopdepth >= extraloopdepth || srcE.Loopdepth >= extraloopdepth { 1899 return 1900 } 1901 srcE.Maxextraloopdepth = extraloopdepth 1902 } 1903 } else { // srcE.Walkgen < e.walkgen -- first time, reset this. 1904 srcE.Maxextraloopdepth = NOTALOOPDEPTH 1905 } 1906 1907 srcE.Walkgen = e.walkgen 1908 srcE.Level = level 1909 modSrcLoopdepth := srcE.Loopdepth 1910 1911 if extraloopdepth > modSrcLoopdepth { 1912 modSrcLoopdepth = extraloopdepth 1913 } 1914 1915 if Debug['m'] > 2 { 1916 fmt.Printf("escwalk: level:%d depth:%d %.*s op=%v %S(%0j) scope:%v[%d] extraloopdepth=%v\n", 1917 level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", src.Op, src, src, e.curfnSym(src), srcE.Loopdepth, extraloopdepth) 1918 } 1919 1920 e.pdepth++ 1921 1922 // Input parameter flowing to output parameter? 1923 var leaks bool 1924 var osrcesc uint16 // used to prevent duplicate error messages 1925 1926 dstE := e.nodeEscState(dst) 1927 if funcOutputAndInput(dst, src) && src.Esc&EscMask < EscHeap && dst.Esc != EscHeap { 1928 // This case handles: 1929 // 1. return in 1930 // 2. return &in 1931 // 3. tmp := in; return &tmp 1932 // 4. return *in 1933 if Debug['m'] != 0 { 1934 if Debug['m'] <= 2 { 1935 Warnl(src.Pos, "leaking param: %S to result %v level=%v", src, dst.Sym, level.int()) 1936 step.describe(src) 1937 } else { 1938 Warnl(src.Pos, "leaking param: %S to result %v level=%v", src, dst.Sym, level) 1939 } 1940 } 1941 if src.Esc&EscMask != EscReturn { 1942 src.Esc = EscReturn | src.Esc&EscContentEscapes 1943 } 1944 src.Esc = escNoteOutputParamFlow(src.Esc, dst.Name.Vargen, level) 1945 goto recurse 1946 } 1947 1948 // If parameter content escapes to heap, set EscContentEscapes 1949 // Note minor confusion around escape from pointer-to-struct vs escape from struct 1950 if dst.Esc == EscHeap && 1951 src.Op == ONAME && src.Class() == PPARAM && src.Esc&EscMask < EscHeap && 1952 level.int() > 0 { 1953 src.Esc = escMax(EscContentEscapes|src.Esc, EscNone) 1954 if Debug['m'] != 0 { 1955 Warnl(src.Pos, "mark escaped content: %S", src) 1956 step.describe(src) 1957 } 1958 } 1959 1960 leaks = level.int() <= 0 && level.guaranteedDereference() <= 0 && dstE.Loopdepth < modSrcLoopdepth 1961 leaks = leaks || level.int() <= 0 && dst.Esc&EscMask == EscHeap 1962 1963 osrcesc = src.Esc 1964 switch src.Op { 1965 case ONAME: 1966 if src.Class() == PPARAM && (leaks || dstE.Loopdepth < 0) && src.Esc&EscMask < EscHeap { 1967 if level.guaranteedDereference() > 0 { 1968 src.Esc = escMax(EscContentEscapes|src.Esc, EscNone) 1969 if Debug['m'] != 0 { 1970 if Debug['m'] <= 2 { 1971 if osrcesc != src.Esc { 1972 Warnl(src.Pos, "leaking param content: %S", src) 1973 step.describe(src) 1974 } 1975 } else { 1976 Warnl(src.Pos, "leaking param content: %S level=%v dst.eld=%v src.eld=%v dst=%S", 1977 src, level, dstE.Loopdepth, modSrcLoopdepth, dst) 1978 } 1979 } 1980 } else { 1981 src.Esc = EscHeap 1982 if Debug['m'] != 0 { 1983 if Debug['m'] <= 2 { 1984 Warnl(src.Pos, "leaking param: %S", src) 1985 step.describe(src) 1986 } else { 1987 Warnl(src.Pos, "leaking param: %S level=%v dst.eld=%v src.eld=%v dst=%S", 1988 src, level, dstE.Loopdepth, modSrcLoopdepth, dst) 1989 } 1990 } 1991 } 1992 } 1993 1994 // Treat a captured closure variable as equivalent to the 1995 // original variable. 1996 if src.IsClosureVar() { 1997 if leaks && Debug['m'] != 0 { 1998 Warnl(src.Pos, "leaking closure reference %S", src) 1999 step.describe(src) 2000 } 2001 e.escwalk(level, dst, src.Name.Defn, e.stepWalk(dst, src.Name.Defn, "closure-var", step)) 2002 } 2003 2004 case OPTRLIT, OADDR: 2005 why := "pointer literal" 2006 if src.Op == OADDR { 2007 why = "address-of" 2008 } 2009 if leaks { 2010 src.Esc = EscHeap 2011 if Debug['m'] != 0 && osrcesc != src.Esc { 2012 p := src 2013 if p.Left.Op == OCLOSURE { 2014 p = p.Left // merely to satisfy error messages in tests 2015 } 2016 if Debug['m'] > 2 { 2017 Warnl(src.Pos, "%S escapes to heap, level=%v, dst=%v dst.eld=%v, src.eld=%v", 2018 p, level, dst, dstE.Loopdepth, modSrcLoopdepth) 2019 } else { 2020 Warnl(src.Pos, "%S escapes to heap", p) 2021 step.describe(src) 2022 } 2023 } 2024 addrescapes(src.Left) 2025 e.escwalkBody(level.dec(), dst, src.Left, e.stepWalk(dst, src.Left, why, step), modSrcLoopdepth) 2026 extraloopdepth = modSrcLoopdepth // passes to recursive case, seems likely a no-op 2027 } else { 2028 e.escwalk(level.dec(), dst, src.Left, e.stepWalk(dst, src.Left, why, step)) 2029 } 2030 2031 case OAPPEND: 2032 e.escwalk(level, dst, src.List.First(), e.stepWalk(dst, src.List.First(), "append-first-arg", step)) 2033 2034 case ODDDARG: 2035 if leaks { 2036 src.Esc = EscHeap 2037 if Debug['m'] != 0 && osrcesc != src.Esc { 2038 Warnl(src.Pos, "%S escapes to heap", src) 2039 step.describe(src) 2040 } 2041 extraloopdepth = modSrcLoopdepth 2042 } 2043 // similar to a slice arraylit and its args. 2044 level = level.dec() 2045 2046 case OSLICELIT: 2047 for _, elt := range src.List.Slice() { 2048 if elt.Op == OKEY { 2049 elt = elt.Right 2050 } 2051 e.escwalk(level.dec(), dst, elt, e.stepWalk(dst, elt, "slice-literal-element", step)) 2052 } 2053 2054 fallthrough 2055 2056 case OMAKECHAN, 2057 OMAKEMAP, 2058 OMAKESLICE, 2059 OARRAYRUNESTR, 2060 OARRAYBYTESTR, 2061 OSTRARRAYRUNE, 2062 OSTRARRAYBYTE, 2063 OADDSTR, 2064 OMAPLIT, 2065 ONEW, 2066 OCLOSURE, 2067 OCALLPART, 2068 ORUNESTR, 2069 OCONVIFACE: 2070 if leaks { 2071 src.Esc = EscHeap 2072 if Debug['m'] != 0 && osrcesc != src.Esc { 2073 Warnl(src.Pos, "%S escapes to heap", src) 2074 step.describe(src) 2075 } 2076 extraloopdepth = modSrcLoopdepth 2077 } 2078 2079 case ODOT, 2080 ODOTTYPE: 2081 e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "dot", step)) 2082 2083 case 2084 OSLICE, 2085 OSLICEARR, 2086 OSLICE3, 2087 OSLICE3ARR, 2088 OSLICESTR: 2089 e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "slice", step)) 2090 2091 case OINDEX: 2092 if src.Left.Type.IsArray() { 2093 e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "fixed-array-index-of", step)) 2094 break 2095 } 2096 fallthrough 2097 2098 case ODOTPTR: 2099 e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "dot of pointer", step)) 2100 case OINDEXMAP: 2101 e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "map index", step)) 2102 case OIND: 2103 e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "indirection", step)) 2104 2105 // In this case a link went directly to a call, but should really go 2106 // to the dummy .outN outputs that were created for the call that 2107 // themselves link to the inputs with levels adjusted. 2108 // See e.g. #10466 2109 // This can only happen with functions returning a single result. 2110 case OCALLMETH, OCALLFUNC, OCALLINTER: 2111 if srcE.Retval.Len() != 0 { 2112 if Debug['m'] > 2 { 2113 fmt.Printf("%v:[%d] dst %S escwalk replace src: %S with %S\n", 2114 linestr(lineno), e.loopdepth, 2115 dst, src, srcE.Retval.First()) 2116 } 2117 src = srcE.Retval.First() 2118 srcE = e.nodeEscState(src) 2119 } 2120 } 2121 2122 recurse: 2123 level = level.copy() 2124 2125 for i := range srcE.Flowsrc { 2126 s := &srcE.Flowsrc[i] 2127 s.parent = step 2128 e.escwalkBody(level, dst, s.src, s, extraloopdepth) 2129 s.parent = nil 2130 } 2131 2132 e.pdepth-- 2133 } 2134 2135 // addrescapes tags node n as having had its address taken 2136 // by "increasing" the "value" of n.Esc to EscHeap. 2137 // Storage is allocated as necessary to allow the address 2138 // to be taken. 2139 func addrescapes(n *Node) { 2140 switch n.Op { 2141 default: 2142 // Unexpected Op, probably due to a previous type error. Ignore. 2143 2144 case OIND, ODOTPTR: 2145 // Nothing to do. 2146 2147 case ONAME: 2148 if n == nodfp { 2149 break 2150 } 2151 2152 // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping. 2153 // on PPARAM it means something different. 2154 if n.Class() == PAUTO && n.Esc == EscNever { 2155 break 2156 } 2157 2158 // If a closure reference escapes, mark the outer variable as escaping. 2159 if n.IsClosureVar() { 2160 addrescapes(n.Name.Defn) 2161 break 2162 } 2163 2164 if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO { 2165 break 2166 } 2167 2168 // This is a plain parameter or local variable that needs to move to the heap, 2169 // but possibly for the function outside the one we're compiling. 2170 // That is, if we have: 2171 // 2172 // func f(x int) { 2173 // func() { 2174 // global = &x 2175 // } 2176 // } 2177 // 2178 // then we're analyzing the inner closure but we need to move x to the 2179 // heap in f, not in the inner closure. Flip over to f before calling moveToHeap. 2180 oldfn := Curfn 2181 Curfn = n.Name.Curfn 2182 if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE { 2183 Curfn = Curfn.Func.Closure 2184 } 2185 ln := lineno 2186 lineno = Curfn.Pos 2187 moveToHeap(n) 2188 Curfn = oldfn 2189 lineno = ln 2190 2191 // ODOTPTR has already been introduced, 2192 // so these are the non-pointer ODOT and OINDEX. 2193 // In &x[0], if x is a slice, then x does not 2194 // escape--the pointer inside x does, but that 2195 // is always a heap pointer anyway. 2196 case ODOT, OINDEX, OPAREN, OCONVNOP: 2197 if !n.Left.Type.IsSlice() { 2198 addrescapes(n.Left) 2199 } 2200 } 2201 } 2202 2203 // moveToHeap records the parameter or local variable n as moved to the heap. 2204 func moveToHeap(n *Node) { 2205 if Debug['r'] != 0 { 2206 Dump("MOVE", n) 2207 } 2208 if compiling_runtime { 2209 yyerror("%v escapes to heap, not allowed in runtime.", n) 2210 } 2211 if n.Class() == PAUTOHEAP { 2212 Dump("n", n) 2213 Fatalf("double move to heap") 2214 } 2215 2216 // Allocate a local stack variable to hold the pointer to the heap copy. 2217 // temp will add it to the function declaration list automatically. 2218 heapaddr := temp(types.NewPtr(n.Type)) 2219 heapaddr.Sym = lookup("&" + n.Sym.Name) 2220 heapaddr.Orig.Sym = heapaddr.Sym 2221 heapaddr.Pos = n.Pos 2222 2223 // Unset AutoTemp to persist the &foo variable name through SSA to 2224 // liveness analysis. 2225 // TODO(mdempsky/drchase): Cleaner solution? 2226 heapaddr.Name.SetAutoTemp(false) 2227 2228 // Parameters have a local stack copy used at function start/end 2229 // in addition to the copy in the heap that may live longer than 2230 // the function. 2231 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 2232 if n.Xoffset == BADWIDTH { 2233 Fatalf("addrescapes before param assignment") 2234 } 2235 2236 // We rewrite n below to be a heap variable (indirection of heapaddr). 2237 // Preserve a copy so we can still write code referring to the original, 2238 // and substitute that copy into the function declaration list 2239 // so that analyses of the local (on-stack) variables use it. 2240 stackcopy := newname(n.Sym) 2241 stackcopy.SetAddable(false) 2242 stackcopy.Type = n.Type 2243 stackcopy.Xoffset = n.Xoffset 2244 stackcopy.SetClass(n.Class()) 2245 stackcopy.Name.Param.Heapaddr = heapaddr 2246 if n.Class() == PPARAMOUT { 2247 // Make sure the pointer to the heap copy is kept live throughout the function. 2248 // The function could panic at any point, and then a defer could recover. 2249 // Thus, we need the pointer to the heap copy always available so the 2250 // post-deferreturn code can copy the return value back to the stack. 2251 // See issue 16095. 2252 heapaddr.SetIsOutputParamHeapAddr(true) 2253 } 2254 n.Name.Param.Stackcopy = stackcopy 2255 2256 // Substitute the stackcopy into the function variable list so that 2257 // liveness and other analyses use the underlying stack slot 2258 // and not the now-pseudo-variable n. 2259 found := false 2260 for i, d := range Curfn.Func.Dcl { 2261 if d == n { 2262 Curfn.Func.Dcl[i] = stackcopy 2263 found = true 2264 break 2265 } 2266 // Parameters are before locals, so can stop early. 2267 // This limits the search even in functions with many local variables. 2268 if d.Class() == PAUTO { 2269 break 2270 } 2271 } 2272 if !found { 2273 Fatalf("cannot find %v in local variable list", n) 2274 } 2275 Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) 2276 } 2277 2278 // Modify n in place so that uses of n now mean indirection of the heapaddr. 2279 n.SetClass(PAUTOHEAP) 2280 n.Xoffset = 0 2281 n.Name.Param.Heapaddr = heapaddr 2282 n.Esc = EscHeap 2283 if Debug['m'] != 0 { 2284 fmt.Printf("%v: moved to heap: %v\n", n.Line(), n) 2285 } 2286 } 2287 2288 // This special tag is applied to uintptr variables 2289 // that we believe may hold unsafe.Pointers for 2290 // calls into assembly functions. 2291 const unsafeUintptrTag = "unsafe-uintptr" 2292 2293 // This special tag is applied to uintptr parameters of functions 2294 // marked go:uintptrescapes. 2295 const uintptrEscapesTag = "uintptr-escapes" 2296 2297 func (e *EscState) esctag(fn *Node) { 2298 fn.Esc = EscFuncTagged 2299 2300 name := func(s *types.Sym, narg int) string { 2301 if s != nil { 2302 return s.Name 2303 } 2304 return fmt.Sprintf("arg#%d", narg) 2305 } 2306 2307 // External functions are assumed unsafe, 2308 // unless //go:noescape is given before the declaration. 2309 if fn.Nbody.Len() == 0 { 2310 if fn.Noescape() { 2311 for _, f := range fn.Type.Params().Fields().Slice() { 2312 if types.Haspointers(f.Type) { 2313 f.Note = mktag(EscNone) 2314 } 2315 } 2316 } 2317 2318 // Assume that uintptr arguments must be held live across the call. 2319 // This is most important for syscall.Syscall. 2320 // See golang.org/issue/13372. 2321 // This really doesn't have much to do with escape analysis per se, 2322 // but we are reusing the ability to annotate an individual function 2323 // argument and pass those annotations along to importing code. 2324 narg := 0 2325 for _, f := range fn.Type.Params().Fields().Slice() { 2326 narg++ 2327 if f.Type.Etype == TUINTPTR { 2328 if Debug['m'] != 0 { 2329 Warnl(fn.Pos, "%v assuming %v is unsafe uintptr", funcSym(fn), name(f.Sym, narg)) 2330 } 2331 f.Note = unsafeUintptrTag 2332 } 2333 } 2334 2335 return 2336 } 2337 2338 if fn.Func.Pragma&UintptrEscapes != 0 { 2339 narg := 0 2340 for _, f := range fn.Type.Params().Fields().Slice() { 2341 narg++ 2342 if f.Type.Etype == TUINTPTR { 2343 if Debug['m'] != 0 { 2344 Warnl(fn.Pos, "%v marking %v as escaping uintptr", funcSym(fn), name(f.Sym, narg)) 2345 } 2346 f.Note = uintptrEscapesTag 2347 } 2348 2349 if f.Isddd() && f.Type.Elem().Etype == TUINTPTR { 2350 // final argument is ...uintptr. 2351 if Debug['m'] != 0 { 2352 Warnl(fn.Pos, "%v marking %v as escaping ...uintptr", funcSym(fn), name(f.Sym, narg)) 2353 } 2354 f.Note = uintptrEscapesTag 2355 } 2356 } 2357 } 2358 2359 for _, fs := range types.RecvsParams { 2360 for _, f := range fs(fn.Type).Fields().Slice() { 2361 if !types.Haspointers(f.Type) { // don't bother tagging for scalars 2362 continue 2363 } 2364 if f.Note == uintptrEscapesTag { 2365 // Note is already set in the loop above. 2366 continue 2367 } 2368 2369 // Unnamed parameters are unused and therefore do not escape. 2370 if f.Sym == nil || f.Sym.IsBlank() { 2371 f.Note = mktag(EscNone) 2372 continue 2373 } 2374 2375 switch esc := asNode(f.Nname).Esc; esc & EscMask { 2376 case EscNone, // not touched by escflood 2377 EscReturn: 2378 f.Note = mktag(int(esc)) 2379 2380 case EscHeap: // touched by escflood, moved to heap 2381 } 2382 } 2383 } 2384 }