github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/cmd/compile/internal/gc/esc.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "fmt" 10 "strconv" 11 "strings" 12 ) 13 14 // Run analysis on minimal sets of mutually recursive functions 15 // or single non-recursive functions, bottom up. 16 // 17 // Finding these sets is finding strongly connected components 18 // by reverse topological order in the static call graph. 19 // The algorithm (known as Tarjan's algorithm) for doing that is taken from 20 // Sedgewick, Algorithms, Second Edition, p. 482, with two adaptations. 21 // 22 // First, a hidden closure function (n.Func.IsHiddenClosure()) cannot be the 23 // root of a connected component. Refusing to use it as a root 24 // forces it into the component of the function in which it appears. 25 // This is more convenient for escape analysis. 26 // 27 // Second, each function becomes two virtual nodes in the graph, 28 // with numbers n and n+1. We record the function's node number as n 29 // but search from node n+1. If the search tells us that the component 30 // number (min) is n+1, we know that this is a trivial component: one function 31 // plus its closures. If the search tells us that the component number is 32 // n, then there was a path from node n+1 back to node n, meaning that 33 // the function set is mutually recursive. The escape analysis can be 34 // more precise when analyzing a single non-recursive function than 35 // when analyzing a set of mutually recursive functions. 36 37 type bottomUpVisitor struct { 38 analyze func([]*Node, bool) 39 visitgen uint32 40 nodeID map[*Node]uint32 41 stack []*Node 42 } 43 44 // visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list. 45 // It calls analyze with successive groups of functions, working from 46 // the bottom of the call graph upward. Each time analyze is called with 47 // a list of functions, every function on that list only calls other functions 48 // on the list or functions that have been passed in previous invocations of 49 // analyze. Closures appear in the same list as their outer functions. 50 // The lists are as short as possible while preserving those requirements. 51 // (In a typical program, many invocations of analyze will be passed just 52 // a single function.) The boolean argument 'recursive' passed to analyze 53 // specifies whether the functions on the list are mutually recursive. 54 // If recursive is false, the list consists of only a single function and its closures. 55 // If recursive is true, the list may still contain only a single function, 56 // if that function is itself recursive. 57 func visitBottomUp(list []*Node, analyze func(list []*Node, recursive bool)) { 58 var v bottomUpVisitor 59 v.analyze = analyze 60 v.nodeID = make(map[*Node]uint32) 61 for _, n := range list { 62 if n.Op == ODCLFUNC && !n.Func.IsHiddenClosure() { 63 v.visit(n) 64 } 65 } 66 } 67 68 func (v *bottomUpVisitor) visit(n *Node) uint32 { 69 if id := v.nodeID[n]; id > 0 { 70 // already visited 71 return id 72 } 73 74 v.visitgen++ 75 id := v.visitgen 76 v.nodeID[n] = id 77 v.visitgen++ 78 min := v.visitgen 79 80 v.stack = append(v.stack, n) 81 min = v.visitcodelist(n.Nbody, min) 82 if (min == id || min == id+1) && !n.Func.IsHiddenClosure() { 83 // This node is the root of a strongly connected component. 84 85 // The original min passed to visitcodelist was v.nodeID[n]+1. 86 // If visitcodelist found its way back to v.nodeID[n], then this 87 // block is a set of mutually recursive functions. 88 // Otherwise it's just a lone function that does not recurse. 89 recursive := min == id 90 91 // Remove connected component from stack. 92 // Mark walkgen so that future visits return a large number 93 // so as not to affect the caller's min. 94 95 var i int 96 for i = len(v.stack) - 1; i >= 0; i-- { 97 x := v.stack[i] 98 if x == n { 99 break 100 } 101 v.nodeID[x] = ^uint32(0) 102 } 103 v.nodeID[n] = ^uint32(0) 104 block := v.stack[i:] 105 // Run escape analysis on this set of functions. 106 v.stack = v.stack[:i] 107 v.analyze(block, recursive) 108 } 109 110 return min 111 } 112 113 func (v *bottomUpVisitor) visitcodelist(l Nodes, min uint32) uint32 { 114 for _, n := range l.Slice() { 115 min = v.visitcode(n, min) 116 } 117 return min 118 } 119 120 func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 { 121 if n == nil { 122 return min 123 } 124 125 min = v.visitcodelist(n.Ninit, min) 126 min = v.visitcode(n.Left, min) 127 min = v.visitcode(n.Right, min) 128 min = v.visitcodelist(n.List, min) 129 min = v.visitcodelist(n.Nbody, min) 130 min = v.visitcodelist(n.Rlist, min) 131 132 switch n.Op { 133 case OCALLFUNC, OCALLMETH: 134 fn := asNode(n.Left.Type.Nname()) 135 if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil { 136 m := v.visit(fn.Name.Defn) 137 if m < min { 138 min = m 139 } 140 } 141 142 case OCLOSURE: 143 m := v.visit(n.Func.Closure) 144 if m < min { 145 min = m 146 } 147 } 148 149 return min 150 } 151 152 // Escape analysis. 153 154 // An escape analysis pass for a set of functions. The 155 // analysis assumes that closures and the functions in which 156 // they appear are analyzed together, so that the aliasing 157 // between their variables can be modeled more precisely. 158 // 159 // First escfunc, esc and escassign recurse over the ast of 160 // each function to dig out flow(dst,src) edges between any 161 // pointer-containing nodes and store those edges in 162 // e.nodeEscState(dst).Flowsrc. For values assigned to a 163 // variable in an outer scope or used as a return value, 164 // they store a flow(theSink, src) edge to a fake node 'the 165 // Sink'. For variables referenced in closures, an edge 166 // flow(closure, &var) is recorded and the flow of a closure 167 // itself to an outer scope is tracked the same way as other 168 // variables. 169 // 170 // Then escflood walks the graph in destination-to-source 171 // order, starting at theSink, propagating a computed 172 // "escape level", and tags as escaping values it can 173 // reach that are either & (address-taken) nodes or new(T), 174 // and tags pointer-typed or pointer-containing function 175 // parameters it can reach as leaking. 176 // 177 // If a value's address is taken but the address does not escape, 178 // then the value can stay on the stack. If the value new(T) does 179 // not escape, then new(T) can be rewritten into a stack allocation. 180 // The same is true of slice literals. 181 182 func escapes(all []*Node) { 183 visitBottomUp(all, escAnalyze) 184 } 185 186 const ( 187 EscFuncUnknown = 0 + iota 188 EscFuncPlanned 189 EscFuncStarted 190 EscFuncTagged 191 ) 192 193 // A Level encodes the reference state and context applied to 194 // (stack, heap) allocated memory. 195 // 196 // value is the overall sum of *(1) and &(-1) operations encountered 197 // along a path from a destination (sink, return value) to a source 198 // (allocation, parameter). 199 // 200 // suffixValue is the maximum-copy-started-suffix-level on 201 // a flow path from a sink/destination. That is, a value 202 // with suffixValue N is guaranteed to be dereferenced at least 203 // N deep (chained applications of DOTPTR or IND or INDEX) 204 // before the result is assigned to a sink. 205 // 206 // For example, suppose x is a pointer to T, declared type T struct { left, right *T } 207 // sink = x.left.left --> level(x)=2, x is reached via two dereferences (DOTPTR) and does not escape to sink. 208 // sink = &T{right:x} --> level(x)=-1, x is accessible from sink via one "address of" 209 // sink = &T{right:&T{right:x}} --> level(x)=-2, x is accessible from sink via two "address of" 210 // 211 // However, in the next example x's level value and suffixValue differ: 212 // sink = &T{right:&T{right:x.left}} --> level(x).value=-1, level(x).suffixValue=1 213 // The positive suffixValue indicates that x is NOT accessible 214 // from sink. Without a separate suffixValue to capture this, x would 215 // appear to escape because its "value" would be -1. (The copy 216 // operations are sometimes implicit in the source code; in this case, 217 // the value of x.left was copied into a field of an newly allocated T). 218 // 219 // Each node's level (value and suffixValue) is the maximum for 220 // all flow paths from (any) sink to that node. 221 222 // There's one of these for each Node, and the integer values 223 // rarely exceed even what can be stored in 4 bits, never mind 8. 224 type Level struct { 225 value, suffixValue int8 226 } 227 228 // There are loops in the escape graph, 229 // causing arbitrary recursion into deeper and deeper 230 // levels. Cut this off safely by making minLevel sticky: 231 // once you get that deep, you cannot go down any further 232 // but you also cannot go up any further. This is a 233 // conservative fix. Making minLevel smaller (more negative) 234 // would handle more complex chains of indirections followed 235 // by address-of operations, at the cost of repeating the 236 // traversal once for each additional allowed level when a 237 // loop is encountered. Using -2 suffices to pass all the 238 // tests we have written so far, which we assume matches the 239 // level of complexity we want the escape analysis code to 240 // handle. 241 const MinLevel = -2 242 243 func (l Level) int() int { 244 return int(l.value) 245 } 246 247 func levelFrom(i int) Level { 248 if i <= MinLevel { 249 return Level{value: MinLevel} 250 } 251 return Level{value: int8(i)} 252 } 253 254 func satInc8(x int8) int8 { 255 if x == 127 { 256 return 127 257 } 258 return x + 1 259 } 260 261 func min8(a, b int8) int8 { 262 if a < b { 263 return a 264 } 265 return b 266 } 267 268 func max8(a, b int8) int8 { 269 if a > b { 270 return a 271 } 272 return b 273 } 274 275 // inc returns the level l + 1, representing the effect of an indirect (*) operation. 276 func (l Level) inc() Level { 277 if l.value <= MinLevel { 278 return Level{value: MinLevel} 279 } 280 return Level{value: satInc8(l.value), suffixValue: satInc8(l.suffixValue)} 281 } 282 283 // dec returns the level l - 1, representing the effect of an address-of (&) operation. 284 func (l Level) dec() Level { 285 if l.value <= MinLevel { 286 return Level{value: MinLevel} 287 } 288 return Level{value: l.value - 1, suffixValue: l.suffixValue - 1} 289 } 290 291 // copy returns the level for a copy of a value with level l. 292 // The resulting suffixValue is at least zero, or larger if it was already larger. 293 func (l Level) copy() Level { 294 return Level{value: l.value, suffixValue: max8(l.suffixValue, 0)} 295 } 296 297 func (l1 Level) min(l2 Level) Level { 298 return Level{ 299 value: min8(l1.value, l2.value), 300 suffixValue: min8(l1.suffixValue, l2.suffixValue)} 301 } 302 303 // guaranteedDereference returns the number of dereferences 304 // applied to a pointer before addresses are taken/generated. 305 // This is the maximum level computed from path suffixes starting 306 // with copies where paths flow from destination to source. 307 func (l Level) guaranteedDereference() int { 308 return int(l.suffixValue) 309 } 310 311 // An EscStep documents one step in the path from memory 312 // that is heap allocated to the (alleged) reason for the 313 // heap allocation. 314 type EscStep struct { 315 src, dst *Node // the endpoints of this edge in the escape-to-heap chain. 316 where *Node // sometimes the endpoints don't match source locations; set 'where' to make that right 317 parent *EscStep // used in flood to record path 318 why string // explanation for this step in the escape-to-heap chain 319 busy bool // used in prevent to snip cycles. 320 } 321 322 type NodeEscState struct { 323 Curfn *Node 324 Flowsrc []EscStep // flow(this, src) 325 Retval Nodes // on OCALLxxx, list of dummy return values 326 Loopdepth int32 // -1: global, 0: return variables, 1:function top level, increased inside function for every loop or label to mark scopes 327 Level Level 328 Walkgen uint32 329 Maxextraloopdepth int32 330 } 331 332 func (e *EscState) nodeEscState(n *Node) *NodeEscState { 333 if nE, ok := n.Opt().(*NodeEscState); ok { 334 return nE 335 } 336 if n.Opt() != nil { 337 Fatalf("nodeEscState: opt in use (%T)", n.Opt()) 338 } 339 nE := &NodeEscState{ 340 Curfn: Curfn, 341 } 342 n.SetOpt(nE) 343 e.opts = append(e.opts, n) 344 return nE 345 } 346 347 func (e *EscState) track(n *Node) { 348 if Curfn == nil { 349 Fatalf("EscState.track: Curfn nil") 350 } 351 n.Esc = EscNone // until proven otherwise 352 nE := e.nodeEscState(n) 353 nE.Loopdepth = e.loopdepth 354 e.noesc = append(e.noesc, n) 355 } 356 357 // Escape constants are numbered in order of increasing "escapiness" 358 // to help make inferences be monotonic. With the exception of 359 // EscNever which is sticky, eX < eY means that eY is more exposed 360 // than eX, and hence replaces it in a conservative analysis. 361 const ( 362 EscUnknown = iota 363 EscNone // Does not escape to heap, result, or parameters. 364 EscReturn // Is returned or reachable from returned. 365 EscHeap // Reachable from the heap 366 EscNever // By construction will not escape. 367 EscBits = 3 368 EscMask = (1 << EscBits) - 1 369 EscContentEscapes = 1 << EscBits // value obtained by indirect of parameter escapes to heap 370 EscReturnBits = EscBits + 1 371 // Node.esc encoding = | escapeReturnEncoding:(width-4) | contentEscapes:1 | escEnum:3 372 ) 373 374 // escMax returns the maximum of an existing escape value 375 // (and its additional parameter flow flags) and a new escape type. 376 func escMax(e, etype uint16) uint16 { 377 if e&EscMask >= EscHeap { 378 // normalize 379 if e&^EscMask != 0 { 380 Fatalf("Escape information had unexpected return encoding bits (w/ EscHeap, EscNever), e&EscMask=%v", e&EscMask) 381 } 382 } 383 if e&EscMask > etype { 384 return e 385 } 386 if etype == EscNone || etype == EscReturn { 387 return (e &^ EscMask) | etype 388 } 389 return etype 390 } 391 392 // For each input parameter to a function, the escapeReturnEncoding describes 393 // how the parameter may leak to the function's outputs. This is currently the 394 // "level" of the leak where level is 0 or larger (negative level means stored into 395 // something whose address is returned -- but that implies stored into the heap, 396 // hence EscHeap, which means that the details are not currently relevant. ) 397 const ( 398 bitsPerOutputInTag = 3 // For each output, the number of bits for a tag 399 bitsMaskForTag = uint16(1<<bitsPerOutputInTag) - 1 // The bit mask to extract a single tag. 400 maxEncodedLevel = int(bitsMaskForTag - 1) // The largest level that can be stored in a tag. 401 ) 402 403 type EscState struct { 404 // Fake node that all 405 // - return values and output variables 406 // - parameters on imported functions not marked 'safe' 407 // - assignments to global variables 408 // flow to. 409 theSink Node 410 411 dsts []*Node // all dst nodes 412 loopdepth int32 // for detecting nested loop scopes 413 pdepth int // for debug printing in recursions. 414 dstcount int // diagnostic 415 edgecount int // diagnostic 416 noesc []*Node // list of possible non-escaping nodes, for printing 417 recursive bool // recursive function or group of mutually recursive functions. 418 opts []*Node // nodes with .Opt initialized 419 walkgen uint32 420 } 421 422 func newEscState(recursive bool) *EscState { 423 e := new(EscState) 424 e.theSink.Op = ONAME 425 e.theSink.Orig = &e.theSink 426 e.theSink.SetClass(PEXTERN) 427 e.theSink.Sym = lookup(".sink") 428 e.nodeEscState(&e.theSink).Loopdepth = -1 429 e.recursive = recursive 430 return e 431 } 432 433 func (e *EscState) stepWalk(dst, src *Node, why string, parent *EscStep) *EscStep { 434 // TODO: keep a cache of these, mark entry/exit in escwalk to avoid allocation 435 // Or perhaps never mind, since it is disabled unless printing is on. 436 // We may want to revisit this, since the EscStep nodes would make 437 // an excellent replacement for the poorly-separated graph-build/graph-flood 438 // stages. 439 if Debug['m'] == 0 { 440 return nil 441 } 442 return &EscStep{src: src, dst: dst, why: why, parent: parent} 443 } 444 445 func (e *EscState) stepAssign(step *EscStep, dst, src *Node, why string) *EscStep { 446 if Debug['m'] == 0 { 447 return nil 448 } 449 if step != nil { // Caller may have known better. 450 if step.why == "" { 451 step.why = why 452 } 453 if step.dst == nil { 454 step.dst = dst 455 } 456 if step.src == nil { 457 step.src = src 458 } 459 return step 460 } 461 return &EscStep{src: src, dst: dst, why: why} 462 } 463 464 func (e *EscState) stepAssignWhere(dst, src *Node, why string, where *Node) *EscStep { 465 if Debug['m'] == 0 { 466 return nil 467 } 468 return &EscStep{src: src, dst: dst, why: why, where: where} 469 } 470 471 // funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way. 472 func funcSym(fn *Node) *types.Sym { 473 if fn == nil || fn.Func.Nname == nil { 474 return nil 475 } 476 return fn.Func.Nname.Sym 477 } 478 479 // curfnSym returns n.Curfn.Nname.Sym if no nils are encountered along the way. 480 func (e *EscState) curfnSym(n *Node) *types.Sym { 481 nE := e.nodeEscState(n) 482 return funcSym(nE.Curfn) 483 } 484 485 func escAnalyze(all []*Node, recursive bool) { 486 e := newEscState(recursive) 487 488 for _, n := range all { 489 if n.Op == ODCLFUNC { 490 n.Esc = EscFuncPlanned 491 if Debug['m'] > 3 { 492 Dump("escAnalyze", n) 493 } 494 495 } 496 } 497 498 // flow-analyze functions 499 for _, n := range all { 500 if n.Op == ODCLFUNC { 501 e.escfunc(n) 502 } 503 } 504 505 // visit the upstream of each dst, mark address nodes with 506 // addrescapes, mark parameters unsafe 507 escapes := make([]uint16, len(e.dsts)) 508 for i, n := range e.dsts { 509 escapes[i] = n.Esc 510 } 511 for _, n := range e.dsts { 512 e.escflood(n) 513 } 514 for { 515 done := true 516 for i, n := range e.dsts { 517 if n.Esc != escapes[i] { 518 done = false 519 if Debug['m'] > 2 { 520 Warnl(n.Pos, "Reflooding %v %S", e.curfnSym(n), n) 521 } 522 escapes[i] = n.Esc 523 e.escflood(n) 524 } 525 } 526 if done { 527 break 528 } 529 } 530 531 // for all top level functions, tag the typenodes corresponding to the param nodes 532 for _, n := range all { 533 if n.Op == ODCLFUNC { 534 e.esctag(n) 535 } 536 } 537 538 if Debug['m'] != 0 { 539 for _, n := range e.noesc { 540 if n.Esc == EscNone { 541 Warnl(n.Pos, "%v %S does not escape", e.curfnSym(n), n) 542 } 543 } 544 } 545 546 for _, x := range e.opts { 547 x.SetOpt(nil) 548 } 549 } 550 551 func (e *EscState) escfunc(fn *Node) { 552 if fn.Esc != EscFuncPlanned { 553 Fatalf("repeat escfunc %v", fn.Func.Nname) 554 } 555 fn.Esc = EscFuncStarted 556 557 saveld := e.loopdepth 558 e.loopdepth = 1 559 savefn := Curfn 560 Curfn = fn 561 562 for _, ln := range Curfn.Func.Dcl { 563 if ln.Op != ONAME { 564 continue 565 } 566 lnE := e.nodeEscState(ln) 567 switch ln.Class() { 568 // out params are in a loopdepth between the sink and all local variables 569 case PPARAMOUT: 570 lnE.Loopdepth = 0 571 572 case PPARAM: 573 lnE.Loopdepth = 1 574 if ln.Type != nil && !types.Haspointers(ln.Type) { 575 break 576 } 577 if Curfn.Nbody.Len() == 0 && !Curfn.Noescape() { 578 ln.Esc = EscHeap 579 } else { 580 ln.Esc = EscNone // prime for escflood later 581 } 582 e.noesc = append(e.noesc, ln) 583 } 584 } 585 586 // in a mutually recursive group we lose track of the return values 587 if e.recursive { 588 for _, ln := range Curfn.Func.Dcl { 589 if ln.Op == ONAME && ln.Class() == PPARAMOUT { 590 e.escflows(&e.theSink, ln, e.stepAssign(nil, ln, ln, "returned from recursive function")) 591 } 592 } 593 } 594 595 e.escloopdepthlist(Curfn.Nbody) 596 e.esclist(Curfn.Nbody, Curfn) 597 Curfn = savefn 598 e.loopdepth = saveld 599 } 600 601 // Mark labels that have no backjumps to them as not increasing e.loopdepth. 602 // Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat 603 // and set it to one of the following two. Then in esc we'll clear it again. 604 var ( 605 looping Node 606 nonlooping Node 607 ) 608 609 func (e *EscState) escloopdepthlist(l Nodes) { 610 for _, n := range l.Slice() { 611 e.escloopdepth(n) 612 } 613 } 614 615 func (e *EscState) escloopdepth(n *Node) { 616 if n == nil { 617 return 618 } 619 620 e.escloopdepthlist(n.Ninit) 621 622 switch n.Op { 623 case OLABEL: 624 if n.Sym == nil { 625 Fatalf("esc:label without label: %+v", n) 626 } 627 628 // Walk will complain about this label being already defined, but that's not until 629 // after escape analysis. in the future, maybe pull label & goto analysis out of walk and put before esc 630 n.Sym.Label = asTypesNode(&nonlooping) 631 632 case OGOTO: 633 if n.Sym == nil { 634 Fatalf("esc:goto without label: %+v", n) 635 } 636 637 // If we come past one that's uninitialized, this must be a (harmless) forward jump 638 // but if it's set to nonlooping the label must have preceded this goto. 639 if asNode(n.Sym.Label) == &nonlooping { 640 n.Sym.Label = asTypesNode(&looping) 641 } 642 } 643 644 e.escloopdepth(n.Left) 645 e.escloopdepth(n.Right) 646 e.escloopdepthlist(n.List) 647 e.escloopdepthlist(n.Nbody) 648 e.escloopdepthlist(n.Rlist) 649 } 650 651 func (e *EscState) esclist(l Nodes, parent *Node) { 652 for _, n := range l.Slice() { 653 e.esc(n, parent) 654 } 655 } 656 657 func (e *EscState) isSliceSelfAssign(dst, src *Node) bool { 658 // Detect the following special case. 659 // 660 // func (b *Buffer) Foo() { 661 // n, m := ... 662 // b.buf = b.buf[n:m] 663 // } 664 // 665 // This assignment is a no-op for escape analysis, 666 // it does not store any new pointers into b that were not already there. 667 // However, without this special case b will escape, because we assign to OIND/ODOTPTR. 668 // Here we assume that the statement will not contain calls, 669 // that is, that order will move any calls to init. 670 // Otherwise base ONAME value could change between the moments 671 // when we evaluate it for dst and for src. 672 673 // dst is ONAME dereference. 674 if dst.Op != ODEREF && dst.Op != ODOTPTR || dst.Left.Op != ONAME { 675 return false 676 } 677 // src is a slice operation. 678 switch src.Op { 679 case OSLICE, OSLICE3, OSLICESTR: 680 // OK. 681 case OSLICEARR, OSLICE3ARR: 682 // Since arrays are embedded into containing object, 683 // slice of non-pointer array will introduce a new pointer into b that was not already there 684 // (pointer to b itself). After such assignment, if b contents escape, 685 // b escapes as well. If we ignore such OSLICEARR, we will conclude 686 // that b does not escape when b contents do. 687 // 688 // Pointer to an array is OK since it's not stored inside b directly. 689 // For slicing an array (not pointer to array), there is an implicit OADDR. 690 // We check that to determine non-pointer array slicing. 691 if src.Left.Op == OADDR { 692 return false 693 } 694 default: 695 return false 696 } 697 // slice is applied to ONAME dereference. 698 if src.Left.Op != ODEREF && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME { 699 return false 700 } 701 // dst and src reference the same base ONAME. 702 return dst.Left == src.Left.Left 703 } 704 705 // isSelfAssign reports whether assignment from src to dst can 706 // be ignored by the escape analysis as it's effectively a self-assignment. 707 func (e *EscState) isSelfAssign(dst, src *Node) bool { 708 if e.isSliceSelfAssign(dst, src) { 709 return true 710 } 711 712 // Detect trivial assignments that assign back to the same object. 713 // 714 // It covers these cases: 715 // val.x = val.y 716 // val.x[i] = val.y[j] 717 // val.x1.x2 = val.x1.y2 718 // ... etc 719 // 720 // These assignments do not change assigned object lifetime. 721 722 if dst == nil || src == nil || dst.Op != src.Op { 723 return false 724 } 725 726 switch dst.Op { 727 case ODOT, ODOTPTR: 728 // Safe trailing accessors that are permitted to differ. 729 case OINDEX: 730 if e.mayAffectMemory(dst.Right) || e.mayAffectMemory(src.Right) { 731 return false 732 } 733 default: 734 return false 735 } 736 737 // The expression prefix must be both "safe" and identical. 738 return samesafeexpr(dst.Left, src.Left) 739 } 740 741 // mayAffectMemory reports whether n evaluation may affect program memory state. 742 // If expression can't affect it, then it can be safely ignored by the escape analysis. 743 func (e *EscState) mayAffectMemory(n *Node) bool { 744 // We may want to use "memory safe" black list instead of general 745 // "side-effect free", which can include all calls and other ops 746 // that can affect allocate or change global state. 747 // It's safer to start from a whitelist for now. 748 // 749 // We're ignoring things like division by zero, index out of range, 750 // and nil pointer dereference here. 751 switch n.Op { 752 case ONAME, OCLOSUREVAR, OLITERAL: 753 return false 754 755 // Left+Right group. 756 case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD: 757 return e.mayAffectMemory(n.Left) || e.mayAffectMemory(n.Right) 758 759 // Left group. 760 case ODOT, ODOTPTR, ODEREF, OCONVNOP, OCONV, OLEN, OCAP, 761 ONOT, OBITNOT, OPLUS, ONEG, OALIGNOF, OOFFSETOF, OSIZEOF: 762 return e.mayAffectMemory(n.Left) 763 764 default: 765 return true 766 } 767 } 768 769 func (e *EscState) esc(n *Node, parent *Node) { 770 if n == nil { 771 return 772 } 773 774 lno := setlineno(n) 775 776 // ninit logically runs at a different loopdepth than the rest of the for loop. 777 e.esclist(n.Ninit, n) 778 779 if n.Op == OFOR || n.Op == OFORUNTIL || n.Op == ORANGE { 780 e.loopdepth++ 781 } 782 783 // type switch variables have no ODCL. 784 // process type switch as declaration. 785 // must happen before processing of switch body, 786 // so before recursion. 787 if n.Op == OSWITCH && n.Left != nil && n.Left.Op == OTYPESW { 788 for _, cas := range n.List.Slice() { // cases 789 // it.N().Rlist is the variable per case 790 if cas.Rlist.Len() != 0 { 791 e.nodeEscState(cas.Rlist.First()).Loopdepth = e.loopdepth 792 } 793 } 794 } 795 796 // Big stuff and non-constant-sized stuff escapes unconditionally. 797 // "Big" conditions that were scattered around in walk have been 798 // gathered here. 799 if n.Esc != EscHeap && n.Type != nil && 800 (n.Type.Width > maxStackVarSize || 801 (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize || 802 n.Op == OMAKESLICE && !isSmallMakeSlice(n)) { 803 // isSmallMakeSlice returns false for non-constant len/cap. 804 // If that's the case, print a more accurate escape reason. 805 var msgVerb, escapeMsg string 806 if n.Op == OMAKESLICE && (!Isconst(n.Left, CTINT) || !Isconst(n.Right, CTINT)) { 807 msgVerb, escapeMsg = "has ", "non-constant size" 808 } else { 809 msgVerb, escapeMsg = "is ", "too large for stack" 810 } 811 812 if Debug['m'] > 2 { 813 Warnl(n.Pos, "%v "+msgVerb+escapeMsg, n) 814 } 815 n.Esc = EscHeap 816 addrescapes(n) 817 e.escassignSinkWhy(n, n, escapeMsg) // TODO category: tooLarge 818 } 819 820 e.esc(n.Left, n) 821 822 if n.Op == ORANGE { 823 // ORANGE node's Right is evaluated before the loop 824 e.loopdepth-- 825 } 826 827 e.esc(n.Right, n) 828 829 if n.Op == ORANGE { 830 e.loopdepth++ 831 } 832 833 e.esclist(n.Nbody, n) 834 e.esclist(n.List, n) 835 e.esclist(n.Rlist, n) 836 837 if n.Op == OFOR || n.Op == OFORUNTIL || n.Op == ORANGE { 838 e.loopdepth-- 839 } 840 841 if Debug['m'] > 2 { 842 fmt.Printf("%v:[%d] %v esc: %v\n", linestr(lineno), e.loopdepth, funcSym(Curfn), n) 843 } 844 845 opSwitch: 846 switch n.Op { 847 // Record loop depth at declaration. 848 case ODCL: 849 if n.Left != nil { 850 e.nodeEscState(n.Left).Loopdepth = e.loopdepth 851 } 852 853 case OLABEL: 854 switch asNode(n.Sym.Label) { 855 case &nonlooping: 856 if Debug['m'] > 2 { 857 fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n) 858 } 859 case &looping: 860 if Debug['m'] > 2 { 861 fmt.Printf("%v: %v looping label\n", linestr(lineno), n) 862 } 863 e.loopdepth++ 864 } 865 866 n.Sym.Label = nil 867 868 case ORANGE: 869 if n.List.Len() >= 2 { 870 // Everything but fixed array is a dereference. 871 872 // If fixed array is really the address of fixed array, 873 // it is also a dereference, because it is implicitly 874 // dereferenced (see #12588) 875 if n.Type.IsArray() && 876 !(n.Right.Type.IsPtr() && types.Identical(n.Right.Type.Elem(), n.Type)) { 877 e.escassignWhyWhere(n.List.Second(), n.Right, "range", n) 878 } else { 879 e.escassignDereference(n.List.Second(), n.Right, e.stepAssignWhere(n.List.Second(), n.Right, "range-deref", n)) 880 } 881 } 882 883 case OSWITCH: 884 if n.Left != nil && n.Left.Op == OTYPESW { 885 for _, cas := range n.List.Slice() { 886 // cases 887 // n.Left.Right is the argument of the .(type), 888 // it.N().Rlist is the variable per case 889 if cas.Rlist.Len() != 0 { 890 e.escassignWhyWhere(cas.Rlist.First(), n.Left.Right, "switch case", n) 891 } 892 } 893 } 894 895 case OAS, OASOP: 896 // Filter out some no-op assignments for escape analysis. 897 if e.isSelfAssign(n.Left, n.Right) { 898 if Debug['m'] != 0 { 899 Warnl(n.Pos, "%v ignoring self-assignment in %S", e.curfnSym(n), n) 900 } 901 break 902 } 903 904 e.escassign(n.Left, n.Right, e.stepAssignWhere(nil, nil, "", n)) 905 906 case OAS2: // x,y = a,b 907 if n.List.Len() == n.Rlist.Len() { 908 rs := n.Rlist.Slice() 909 where := n 910 for i, n := range n.List.Slice() { 911 e.escassignWhyWhere(n, rs[i], "assign-pair", where) 912 } 913 } 914 915 case OAS2RECV: // v, ok = <-ch 916 e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-receive", n) 917 case OAS2MAPR: // v, ok = m[k] 918 e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-mapr", n) 919 case OAS2DOTTYPE: // v, ok = x.(type) 920 e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-dot-type", n) 921 922 case OSEND: // ch <- x 923 e.escassignSinkWhy(n, n.Right, "send") 924 925 case ODEFER: 926 if e.loopdepth == 1 { // top level 927 break 928 } 929 // arguments leak out of scope 930 // TODO: leak to a dummy node instead 931 // defer f(x) - f and x escape 932 e.escassignSinkWhy(n, n.Left.Left, "defer func") 933 e.escassignSinkWhy(n, n.Left.Right, "defer func ...") // ODDDARG for call 934 for _, arg := range n.Left.List.Slice() { 935 e.escassignSinkWhy(n, arg, "defer func arg") 936 } 937 938 case OGO: 939 // go f(x) - f and x escape 940 e.escassignSinkWhy(n, n.Left.Left, "go func") 941 e.escassignSinkWhy(n, n.Left.Right, "go func ...") // ODDDARG for call 942 for _, arg := range n.Left.List.Slice() { 943 e.escassignSinkWhy(n, arg, "go func arg") 944 } 945 946 case OCALLMETH, OCALLFUNC, OCALLINTER: 947 e.esccall(n, parent) 948 949 // esccall already done on n.Rlist.First() 950 // tie its Retval to n.List 951 case OAS2FUNC: // x,y = f() 952 rs := e.nodeEscState(n.Rlist.First()).Retval.Slice() 953 where := n 954 for i, n := range n.List.Slice() { 955 if i >= len(rs) { 956 break 957 } 958 e.escassignWhyWhere(n, rs[i], "assign-pair-func-call", where) 959 } 960 if n.List.Len() != len(rs) { 961 Fatalf("esc oas2func") 962 } 963 964 case ORETURN: 965 retList := n.List 966 if retList.Len() == 1 && Curfn.Type.NumResults() > 1 { 967 // OAS2FUNC in disguise 968 // esccall already done on n.List.First() 969 // tie e.nodeEscState(n.List.First()).Retval to Curfn.Func.Dcl PPARAMOUT's 970 retList = e.nodeEscState(n.List.First()).Retval 971 } 972 973 i := 0 974 for _, lrn := range Curfn.Func.Dcl { 975 if i >= retList.Len() { 976 break 977 } 978 if lrn.Op != ONAME || lrn.Class() != PPARAMOUT { 979 continue 980 } 981 e.escassignWhyWhere(lrn, retList.Index(i), "return", n) 982 i++ 983 } 984 985 if i < retList.Len() { 986 Fatalf("esc return list") 987 } 988 989 // Argument could leak through recover. 990 case OPANIC: 991 e.escassignSinkWhy(n, n.Left, "panic") 992 993 case OAPPEND: 994 if !n.IsDDD() { 995 for _, nn := range n.List.Slice()[1:] { 996 e.escassignSinkWhy(n, nn, "appended to slice") // lose track of assign to dereference 997 } 998 } else { 999 // append(slice1, slice2...) -- slice2 itself does not escape, but contents do. 1000 slice2 := n.List.Second() 1001 e.escassignDereference(&e.theSink, slice2, e.stepAssignWhere(n, slice2, "appended slice...", n)) // lose track of assign of dereference 1002 if Debug['m'] > 3 { 1003 Warnl(n.Pos, "%v special treatment of append(slice1, slice2...) %S", e.curfnSym(n), n) 1004 } 1005 } 1006 e.escassignDereference(&e.theSink, n.List.First(), e.stepAssignWhere(n, n.List.First(), "appendee slice", n)) // The original elements are now leaked, too 1007 1008 case OCOPY: 1009 e.escassignDereference(&e.theSink, n.Right, e.stepAssignWhere(n, n.Right, "copied slice", n)) // lose track of assign of dereference 1010 1011 case OCONV, OCONVNOP: 1012 e.escassignWhyWhere(n, n.Left, "converted", n) 1013 1014 case OCONVIFACE: 1015 e.track(n) 1016 e.escassignWhyWhere(n, n.Left, "interface-converted", n) 1017 1018 case OARRAYLIT: 1019 // Link values to array 1020 for _, elt := range n.List.Slice() { 1021 if elt.Op == OKEY { 1022 elt = elt.Right 1023 } 1024 e.escassign(n, elt, e.stepAssignWhere(n, elt, "array literal element", n)) 1025 } 1026 1027 case OSLICELIT: 1028 // Slice is not leaked until proven otherwise 1029 e.track(n) 1030 // Link values to slice 1031 for _, elt := range n.List.Slice() { 1032 if elt.Op == OKEY { 1033 elt = elt.Right 1034 } 1035 e.escassign(n, elt, e.stepAssignWhere(n, elt, "slice literal element", n)) 1036 } 1037 1038 // Link values to struct. 1039 case OSTRUCTLIT: 1040 for _, elt := range n.List.Slice() { 1041 e.escassignWhyWhere(n, elt.Left, "struct literal element", n) 1042 } 1043 1044 case OPTRLIT: 1045 e.track(n) 1046 1047 // Link OSTRUCTLIT to OPTRLIT; if OPTRLIT escapes, OSTRUCTLIT elements do too. 1048 e.escassignWhyWhere(n, n.Left, "pointer literal [assign]", n) 1049 1050 case OCALLPART: 1051 e.track(n) 1052 1053 // Contents make it to memory, lose track. 1054 e.escassignSinkWhy(n, n.Left, "call part") 1055 1056 case OMAPLIT: 1057 e.track(n) 1058 // Keys and values make it to memory, lose track. 1059 for _, elt := range n.List.Slice() { 1060 e.escassignSinkWhy(n, elt.Left, "map literal key") 1061 e.escassignSinkWhy(n, elt.Right, "map literal value") 1062 } 1063 1064 case OCLOSURE: 1065 // Link addresses of captured variables to closure. 1066 for _, v := range n.Func.Closure.Func.Cvars.Slice() { 1067 if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs 1068 continue 1069 } 1070 a := v.Name.Defn 1071 if !v.Name.Byval() { 1072 a = nod(OADDR, a, nil) 1073 a.Pos = v.Pos 1074 e.nodeEscState(a).Loopdepth = e.loopdepth 1075 a = typecheck(a, ctxExpr) 1076 } 1077 1078 e.escassignWhyWhere(n, a, "captured by a closure", n) 1079 } 1080 fallthrough 1081 1082 case OMAKECHAN, 1083 OMAKEMAP, 1084 OMAKESLICE, 1085 ONEW, 1086 ORUNES2STR, 1087 OBYTES2STR, 1088 OSTR2RUNES, 1089 OSTR2BYTES, 1090 ORUNESTR: 1091 e.track(n) 1092 1093 case OADDSTR: 1094 e.track(n) 1095 // Arguments of OADDSTR do not escape. 1096 1097 case OADDR: 1098 // current loop depth is an upper bound on actual loop depth 1099 // of addressed value. 1100 e.track(n) 1101 1102 // for &x, use loop depth of x if known. 1103 // it should always be known, but if not, be conservative 1104 // and keep the current loop depth. 1105 if n.Left.Op == ONAME { 1106 switch n.Left.Class() { 1107 // PPARAM is loop depth 1 always. 1108 // PPARAMOUT is loop depth 0 for writes 1109 // but considered loop depth 1 for address-of, 1110 // so that writing the address of one result 1111 // to another (or the same) result makes the 1112 // first result move to the heap. 1113 case PPARAM, PPARAMOUT: 1114 nE := e.nodeEscState(n) 1115 nE.Loopdepth = 1 1116 break opSwitch 1117 } 1118 } 1119 nE := e.nodeEscState(n) 1120 leftE := e.nodeEscState(n.Left) 1121 if leftE.Loopdepth != 0 { 1122 nE.Loopdepth = leftE.Loopdepth 1123 } 1124 1125 case ODOT, 1126 ODOTPTR, 1127 OINDEX: 1128 // Propagate the loopdepth of t to t.field. 1129 if n.Left.Op != OLITERAL { // OLITERAL node doesn't have esc state 1130 e.nodeEscState(n).Loopdepth = e.nodeEscState(n.Left).Loopdepth 1131 } 1132 } 1133 1134 lineno = lno 1135 } 1136 1137 // escassignWhyWhere bundles a common case of 1138 // escassign(e, dst, src, e.stepAssignWhere(dst, src, reason, where)) 1139 func (e *EscState) escassignWhyWhere(dst, src *Node, reason string, where *Node) { 1140 var step *EscStep 1141 if Debug['m'] != 0 { 1142 step = e.stepAssignWhere(dst, src, reason, where) 1143 } 1144 e.escassign(dst, src, step) 1145 } 1146 1147 // escassignSinkWhy bundles a common case of 1148 // escassign(e, &e.theSink, src, e.stepAssign(nil, dst, src, reason)) 1149 func (e *EscState) escassignSinkWhy(dst, src *Node, reason string) { 1150 var step *EscStep 1151 if Debug['m'] != 0 { 1152 step = e.stepAssign(nil, dst, src, reason) 1153 } 1154 e.escassign(&e.theSink, src, step) 1155 } 1156 1157 // escassignSinkWhyWhere is escassignSinkWhy but includes a call site 1158 // for accurate location reporting. 1159 func (e *EscState) escassignSinkWhyWhere(dst, src *Node, reason string, call *Node) { 1160 var step *EscStep 1161 if Debug['m'] != 0 { 1162 step = e.stepAssignWhere(dst, src, reason, call) 1163 } 1164 e.escassign(&e.theSink, src, step) 1165 } 1166 1167 // Assert that expr somehow gets assigned to dst, if non nil. for 1168 // dst==nil, any name node expr still must be marked as being 1169 // evaluated in curfn. For expr==nil, dst must still be examined for 1170 // evaluations inside it (e.g *f(x) = y) 1171 func (e *EscState) escassign(dst, src *Node, step *EscStep) { 1172 if dst.isBlank() || dst == nil || src == nil || src.Op == ONONAME || src.Op == OXXX { 1173 return 1174 } 1175 1176 if Debug['m'] > 2 { 1177 fmt.Printf("%v:[%d] %v escassign: %S(%0j)[%v] = %S(%0j)[%v]\n", 1178 linestr(lineno), e.loopdepth, funcSym(Curfn), 1179 dst, dst, dst.Op, 1180 src, src, src.Op) 1181 } 1182 1183 setlineno(dst) 1184 1185 originalDst := dst 1186 dstwhy := "assigned" 1187 1188 // Analyze lhs of assignment. 1189 // Replace dst with &e.theSink if we can't track it. 1190 switch dst.Op { 1191 default: 1192 Dump("dst", dst) 1193 Fatalf("escassign: unexpected dst") 1194 1195 case OARRAYLIT, 1196 OSLICELIT, 1197 OCLOSURE, 1198 OCONV, 1199 OCONVIFACE, 1200 OCONVNOP, 1201 OMAPLIT, 1202 OSTRUCTLIT, 1203 OPTRLIT, 1204 ODDDARG, 1205 OCALLPART: 1206 1207 case ONAME: 1208 if dst.Class() == PEXTERN { 1209 dstwhy = "assigned to top level variable" 1210 dst = &e.theSink 1211 } 1212 1213 case ODOT: // treat "dst.x = src" as "dst = src" 1214 e.escassign(dst.Left, src, e.stepAssign(step, originalDst, src, "dot-equals")) 1215 return 1216 1217 case OINDEX: 1218 if dst.Left.Type.IsArray() { 1219 e.escassign(dst.Left, src, e.stepAssign(step, originalDst, src, "array-element-equals")) 1220 return 1221 } 1222 1223 dstwhy = "slice-element-equals" 1224 dst = &e.theSink // lose track of dereference 1225 1226 case ODEREF: 1227 dstwhy = "star-equals" 1228 dst = &e.theSink // lose track of dereference 1229 1230 case ODOTPTR: 1231 dstwhy = "star-dot-equals" 1232 dst = &e.theSink // lose track of dereference 1233 1234 // lose track of key and value 1235 case OINDEXMAP: 1236 e.escassign(&e.theSink, dst.Right, e.stepAssign(nil, originalDst, src, "key of map put")) 1237 dstwhy = "value of map put" 1238 dst = &e.theSink 1239 } 1240 1241 lno := setlineno(src) 1242 e.pdepth++ 1243 1244 switch src.Op { 1245 case OADDR, // dst = &x 1246 ODEREF, // dst = *x 1247 ODOTPTR, // dst = (*x).f 1248 ONAME, 1249 ODDDARG, 1250 OPTRLIT, 1251 OARRAYLIT, 1252 OSLICELIT, 1253 OMAPLIT, 1254 OSTRUCTLIT, 1255 OMAKECHAN, 1256 OMAKEMAP, 1257 OMAKESLICE, 1258 ORUNES2STR, 1259 OBYTES2STR, 1260 OSTR2RUNES, 1261 OSTR2BYTES, 1262 OADDSTR, 1263 ONEW, 1264 OCALLPART, 1265 ORUNESTR, 1266 OCONVIFACE: 1267 e.escflows(dst, src, e.stepAssign(step, originalDst, src, dstwhy)) 1268 1269 case OCLOSURE: 1270 // OCLOSURE is lowered to OPTRLIT, 1271 // insert OADDR to account for the additional indirection. 1272 a := nod(OADDR, src, nil) 1273 a.Pos = src.Pos 1274 e.nodeEscState(a).Loopdepth = e.nodeEscState(src).Loopdepth 1275 a.Type = types.NewPtr(src.Type) 1276 e.escflows(dst, a, e.stepAssign(nil, originalDst, src, dstwhy)) 1277 1278 // Flowing multiple returns to a single dst happens when 1279 // analyzing "go f(g())": here g() flows to sink (issue 4529). 1280 case OCALLMETH, OCALLFUNC, OCALLINTER: 1281 for _, n := range e.nodeEscState(src).Retval.Slice() { 1282 e.escflows(dst, n, e.stepAssign(nil, originalDst, n, dstwhy)) 1283 } 1284 1285 // A non-pointer escaping from a struct does not concern us. 1286 case ODOT: 1287 if src.Type != nil && !types.Haspointers(src.Type) { 1288 break 1289 } 1290 fallthrough 1291 1292 // Conversions, field access, slice all preserve the input value. 1293 case OCONV, 1294 OCONVNOP, 1295 ODOTMETH, 1296 // treat recv.meth as a value with recv in it, only happens in ODEFER and OGO 1297 // iface.method already leaks iface in esccall, no need to put in extra ODOTINTER edge here 1298 OSLICE, 1299 OSLICE3, 1300 OSLICEARR, 1301 OSLICE3ARR, 1302 OSLICESTR: 1303 // Conversions, field access, slice all preserve the input value. 1304 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1305 1306 case ODOTTYPE, 1307 ODOTTYPE2: 1308 if src.Type != nil && !types.Haspointers(src.Type) { 1309 break 1310 } 1311 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1312 1313 case OAPPEND: 1314 // Append returns first argument. 1315 // Subsequent arguments are already leaked because they are operands to append. 1316 e.escassign(dst, src.List.First(), e.stepAssign(step, dst, src.List.First(), dstwhy)) 1317 1318 case OINDEX: 1319 // Index of array preserves input value. 1320 if src.Left.Type.IsArray() { 1321 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1322 } else { 1323 e.escflows(dst, src, e.stepAssign(step, originalDst, src, dstwhy)) 1324 } 1325 1326 // Might be pointer arithmetic, in which case 1327 // the operands flow into the result. 1328 // TODO(rsc): Decide what the story is here. This is unsettling. 1329 case OADD, 1330 OSUB, 1331 OOR, 1332 OXOR, 1333 OMUL, 1334 ODIV, 1335 OMOD, 1336 OLSH, 1337 ORSH, 1338 OAND, 1339 OANDNOT, 1340 OPLUS, 1341 ONEG, 1342 OBITNOT: 1343 e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy)) 1344 1345 e.escassign(dst, src.Right, e.stepAssign(step, originalDst, src, dstwhy)) 1346 } 1347 1348 e.pdepth-- 1349 lineno = lno 1350 } 1351 1352 // Common case for escapes is 16 bits 000000000xxxEEEE 1353 // where commonest cases for xxx encoding in-to-out pointer 1354 // flow are 000, 001, 010, 011 and EEEE is computed Esc bits. 1355 // Note width of xxx depends on value of constant 1356 // bitsPerOutputInTag -- expect 2 or 3, so in practice the 1357 // tag cache array is 64 or 128 long. Some entries will 1358 // never be populated. 1359 var tags [1 << (bitsPerOutputInTag + EscReturnBits)]string 1360 1361 // mktag returns the string representation for an escape analysis tag. 1362 func mktag(mask int) string { 1363 switch mask & EscMask { 1364 case EscNone, EscReturn: 1365 default: 1366 Fatalf("escape mktag") 1367 } 1368 1369 if mask < len(tags) && tags[mask] != "" { 1370 return tags[mask] 1371 } 1372 1373 s := fmt.Sprintf("esc:0x%x", mask) 1374 if mask < len(tags) { 1375 tags[mask] = s 1376 } 1377 return s 1378 } 1379 1380 // parsetag decodes an escape analysis tag and returns the esc value. 1381 func parsetag(note string) uint16 { 1382 if !strings.HasPrefix(note, "esc:") { 1383 return EscUnknown 1384 } 1385 n, _ := strconv.ParseInt(note[4:], 0, 0) 1386 em := uint16(n) 1387 if em == 0 { 1388 return EscNone 1389 } 1390 return em 1391 } 1392 1393 // describeEscape returns a string describing the escape tag. 1394 // The result is either one of {EscUnknown, EscNone, EscHeap} which all have no further annotation 1395 // or a description of parameter flow, which takes the form of an optional "contentToHeap" 1396 // indicating that the content of this parameter is leaked to the heap, followed by a sequence 1397 // of level encodings separated by spaces, one for each parameter, where _ means no flow, 1398 // = means direct flow, and N asterisks (*) encodes content (obtained by indirection) flow. 1399 // e.g., "contentToHeap _ =" means that a parameter's content (one or more dereferences) 1400 // escapes to the heap, the parameter does not leak to the first output, but does leak directly 1401 // to the second output (and if there are more than two outputs, there is no flow to those.) 1402 func describeEscape(em uint16) string { 1403 var s string 1404 switch em & EscMask { 1405 case EscUnknown: 1406 s = "EscUnknown" 1407 case EscNone: 1408 s = "EscNone" 1409 case EscHeap: 1410 s = "EscHeap" 1411 case EscReturn: 1412 s = "EscReturn" 1413 } 1414 if em&EscContentEscapes != 0 { 1415 if s != "" { 1416 s += " " 1417 } 1418 s += "contentToHeap" 1419 } 1420 for em >>= EscReturnBits; em != 0; em >>= bitsPerOutputInTag { 1421 // See encoding description above 1422 if s != "" { 1423 s += " " 1424 } 1425 switch embits := em & bitsMaskForTag; embits { 1426 case 0: 1427 s += "_" 1428 case 1: 1429 s += "=" 1430 default: 1431 for i := uint16(0); i < embits-1; i++ { 1432 s += "*" 1433 } 1434 } 1435 1436 } 1437 return s 1438 } 1439 1440 // escassignfromtag models the input-to-output assignment flow of one of a function 1441 // calls arguments, where the flow is encoded in "note". 1442 func (e *EscState) escassignfromtag(note string, dsts Nodes, src, call *Node) uint16 { 1443 em := parsetag(note) 1444 if src.Op == OLITERAL { 1445 return em 1446 } 1447 1448 if Debug['m'] > 3 { 1449 fmt.Printf("%v::assignfromtag:: src=%S, em=%s\n", 1450 linestr(lineno), src, describeEscape(em)) 1451 } 1452 1453 if em == EscUnknown { 1454 e.escassignSinkWhyWhere(src, src, "passed to call[argument escapes]", call) 1455 return em 1456 } 1457 1458 if em == EscNone { 1459 return em 1460 } 1461 1462 // If content inside parameter (reached via indirection) 1463 // escapes to heap, mark as such. 1464 if em&EscContentEscapes != 0 { 1465 e.escassign(&e.theSink, e.addDereference(src), e.stepAssignWhere(src, src, "passed to call[argument content escapes]", call)) 1466 } 1467 1468 em0 := em 1469 dstsi := 0 1470 for em >>= EscReturnBits; em != 0 && dstsi < dsts.Len(); em >>= bitsPerOutputInTag { 1471 // Prefer the lowest-level path to the reference (for escape purposes). 1472 // Two-bit encoding (for example. 1, 3, and 4 bits are other options) 1473 // 01 = 0-level 1474 // 10 = 1-level, (content escapes), 1475 // 11 = 2-level, (content of content escapes), 1476 embits := em & bitsMaskForTag 1477 if embits > 0 { 1478 n := src 1479 for i := uint16(0); i < embits-1; i++ { 1480 n = e.addDereference(n) // encode level>0 as indirections 1481 } 1482 e.escassign(dsts.Index(dstsi), n, e.stepAssignWhere(dsts.Index(dstsi), src, "passed-to-and-returned-from-call", call)) 1483 } 1484 dstsi++ 1485 } 1486 // If there are too many outputs to fit in the tag, 1487 // that is handled at the encoding end as EscHeap, 1488 // so there is no need to check here. 1489 1490 if em != 0 && dstsi >= dsts.Len() { 1491 Fatalf("corrupt esc tag %q or messed up escretval list\n", note) 1492 } 1493 return em0 1494 } 1495 1496 func (e *EscState) escassignDereference(dst *Node, src *Node, step *EscStep) { 1497 if src.Op == OLITERAL { 1498 return 1499 } 1500 e.escassign(dst, e.addDereference(src), step) 1501 } 1502 1503 // addDereference constructs a suitable ODEREF note applied to src. 1504 // Because this is for purposes of escape accounting, not execution, 1505 // some semantically dubious node combinations are (currently) possible. 1506 func (e *EscState) addDereference(n *Node) *Node { 1507 ind := nod(ODEREF, n, nil) 1508 e.nodeEscState(ind).Loopdepth = e.nodeEscState(n).Loopdepth 1509 ind.Pos = n.Pos 1510 t := n.Type 1511 if t.IsPtr() || t.IsSlice() { 1512 // This should model our own sloppy use of ODEREF to encode 1513 // decreasing levels of indirection; i.e., "indirecting" a slice 1514 // yields the type of an element. 1515 t = t.Elem() 1516 } else if t.IsString() { 1517 t = types.Types[TUINT8] 1518 } 1519 ind.Type = t 1520 return ind 1521 } 1522 1523 // escNoteOutputParamFlow encodes maxEncodedLevel/.../1/0-level flow to the vargen'th parameter. 1524 // Levels greater than maxEncodedLevel are replaced with maxEncodedLevel. 1525 // If the encoding cannot describe the modified input level and output number, then EscHeap is returned. 1526 func escNoteOutputParamFlow(e uint16, vargen int32, level Level) uint16 { 1527 // Flow+level is encoded in two bits. 1528 // 00 = not flow, xx = level+1 for 0 <= level <= maxEncodedLevel 1529 // 16 bits for Esc allows 6x2bits or 4x3bits or 3x4bits if additional information would be useful. 1530 if level.int() <= 0 && level.guaranteedDereference() > 0 { 1531 return escMax(e|EscContentEscapes, EscNone) // At least one deref, thus only content. 1532 } 1533 if level.int() < 0 { 1534 return EscHeap 1535 } 1536 if level.int() > maxEncodedLevel { 1537 // Cannot encode larger values than maxEncodedLevel. 1538 level = levelFrom(maxEncodedLevel) 1539 } 1540 encoded := uint16(level.int() + 1) 1541 1542 shift := uint(bitsPerOutputInTag*(vargen-1) + EscReturnBits) 1543 old := (e >> shift) & bitsMaskForTag 1544 if old == 0 || encoded != 0 && encoded < old { 1545 old = encoded 1546 } 1547 1548 encodedFlow := old << shift 1549 if (encodedFlow>>shift)&bitsMaskForTag != old { 1550 // Encoding failure defaults to heap. 1551 return EscHeap 1552 } 1553 1554 return (e &^ (bitsMaskForTag << shift)) | encodedFlow 1555 } 1556 1557 func (e *EscState) initEscRetval(call *Node, fntype *types.Type) { 1558 cE := e.nodeEscState(call) 1559 cE.Retval.Set(nil) // Suspect this is not nil for indirect calls. 1560 for i, f := range fntype.Results().Fields().Slice() { 1561 buf := fmt.Sprintf(".out%d", i) 1562 ret := newname(lookup(buf)) 1563 ret.SetAddable(false) // TODO(mdempsky): Seems suspicious. 1564 ret.Type = f.Type 1565 ret.SetClass(PAUTO) 1566 ret.Name.Curfn = Curfn 1567 e.nodeEscState(ret).Loopdepth = e.loopdepth 1568 ret.Name.SetUsed(true) 1569 ret.Pos = call.Pos 1570 cE.Retval.Append(ret) 1571 } 1572 } 1573 1574 // This is a bit messier than fortunate, pulled out of esc's big 1575 // switch for clarity. We either have the paramnodes, which may be 1576 // connected to other things through flows or we have the parameter type 1577 // nodes, which may be marked "noescape". Navigating the ast is slightly 1578 // different for methods vs plain functions and for imported vs 1579 // this-package 1580 func (e *EscState) esccall(call *Node, parent *Node) { 1581 var fntype *types.Type 1582 var indirect bool 1583 var fn *Node 1584 switch call.Op { 1585 default: 1586 Fatalf("esccall") 1587 1588 case OCALLFUNC: 1589 fn = call.Left 1590 fntype = fn.Type 1591 indirect = fn.Op != ONAME || fn.Class() != PFUNC 1592 1593 case OCALLMETH: 1594 fn = asNode(call.Left.Sym.Def) 1595 if fn != nil { 1596 fntype = fn.Type 1597 } else { 1598 fntype = call.Left.Type 1599 } 1600 1601 case OCALLINTER: 1602 fntype = call.Left.Type 1603 indirect = true 1604 } 1605 1606 argList := call.List 1607 if argList.Len() == 1 { 1608 arg := argList.First() 1609 if arg.Type.IsFuncArgStruct() { // f(g()) 1610 argList = e.nodeEscState(arg).Retval 1611 } 1612 } 1613 1614 args := argList.Slice() 1615 1616 if indirect { 1617 // We know nothing! 1618 // Leak all the parameters 1619 for _, arg := range args { 1620 e.escassignSinkWhy(call, arg, "parameter to indirect call") 1621 if Debug['m'] > 3 { 1622 fmt.Printf("%v::esccall:: indirect call <- %S, untracked\n", linestr(lineno), arg) 1623 } 1624 } 1625 // Set up bogus outputs 1626 e.initEscRetval(call, fntype) 1627 // If there is a receiver, it also leaks to heap. 1628 if call.Op != OCALLFUNC { 1629 rf := fntype.Recv() 1630 r := call.Left.Left 1631 if types.Haspointers(rf.Type) { 1632 e.escassignSinkWhy(call, r, "receiver in indirect call") 1633 } 1634 } else { // indirect and OCALLFUNC = could be captured variables, too. (#14409) 1635 rets := e.nodeEscState(call).Retval.Slice() 1636 for _, ret := range rets { 1637 e.escassignDereference(ret, fn, e.stepAssignWhere(ret, fn, "captured by called closure", call)) 1638 } 1639 } 1640 return 1641 } 1642 1643 cE := e.nodeEscState(call) 1644 if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && 1645 fn.Name.Defn != nil && fn.Name.Defn.Nbody.Len() != 0 && fn.Name.Param.Ntype != nil && fn.Name.Defn.Esc < EscFuncTagged { 1646 // function in same mutually recursive group. Incorporate into flow graph. 1647 if Debug['m'] > 3 { 1648 fmt.Printf("%v::esccall:: %S in recursive group\n", linestr(lineno), call) 1649 } 1650 1651 if fn.Name.Defn.Esc == EscFuncUnknown || cE.Retval.Len() != 0 { 1652 Fatalf("graph inconsistency") 1653 } 1654 1655 i := 0 1656 1657 // Receiver. 1658 if call.Op != OCALLFUNC { 1659 rf := fntype.Recv() 1660 if rf.Sym != nil && !rf.Sym.IsBlank() { 1661 n := fn.Name.Defn.Func.Dcl[0] 1662 i++ 1663 if n.Class() != PPARAM { 1664 Fatalf("esccall: not a parameter %+v", n) 1665 } 1666 e.escassignWhyWhere(n, call.Left.Left, "recursive call receiver", call) 1667 } 1668 } 1669 1670 // Parameters. 1671 for _, param := range fntype.Params().FieldSlice() { 1672 if param.Sym == nil || param.Sym.IsBlank() { 1673 // Unnamed parameter is not listed in Func.Dcl. 1674 // But we need to consume the arg. 1675 if param.IsDDD() && !call.IsDDD() { 1676 args = nil 1677 } else { 1678 args = args[1:] 1679 } 1680 continue 1681 } 1682 1683 n := fn.Name.Defn.Func.Dcl[i] 1684 i++ 1685 if n.Class() != PPARAM { 1686 Fatalf("esccall: not a parameter %+v", n) 1687 } 1688 if len(args) == 0 { 1689 continue 1690 } 1691 arg := args[0] 1692 if n.IsDDD() && !call.IsDDD() { 1693 // Introduce ODDDARG node to represent ... allocation. 1694 arg = nod(ODDDARG, nil, nil) 1695 arr := types.NewArray(n.Type.Elem(), int64(len(args))) 1696 arg.Type = types.NewPtr(arr) // make pointer so it will be tracked 1697 arg.Pos = call.Pos 1698 e.track(arg) 1699 call.Right = arg 1700 } 1701 e.escassignWhyWhere(n, arg, "arg to recursive call", call) // TODO this message needs help. 1702 if arg == args[0] { 1703 args = args[1:] 1704 continue 1705 } 1706 // "..." arguments are untracked 1707 for _, a := range args { 1708 if Debug['m'] > 3 { 1709 fmt.Printf("%v::esccall:: ... <- %S, untracked\n", linestr(lineno), a) 1710 } 1711 e.escassignSinkWhyWhere(arg, a, "... arg to recursive call", call) 1712 } 1713 // ... arg consumes all remaining arguments 1714 args = nil 1715 } 1716 1717 // Results. 1718 for _, n := range fn.Name.Defn.Func.Dcl[i:] { 1719 if n.Class() == PPARAMOUT { 1720 cE.Retval.Append(n) 1721 } 1722 } 1723 1724 // Sanity check: all arguments must be consumed. 1725 if len(args) != 0 { 1726 Fatalf("esccall not consumed all args %+v\n", call) 1727 } 1728 return 1729 } 1730 1731 // Imported or completely analyzed function. Use the escape tags. 1732 if cE.Retval.Len() != 0 { 1733 Fatalf("esc already decorated call %+v\n", call) 1734 } 1735 1736 if Debug['m'] > 3 { 1737 fmt.Printf("%v::esccall:: %S not recursive\n", linestr(lineno), call) 1738 } 1739 1740 // set up out list on this call node with dummy auto ONAMES in the current (calling) function. 1741 e.initEscRetval(call, fntype) 1742 1743 // Receiver. 1744 if call.Op != OCALLFUNC { 1745 rf := fntype.Recv() 1746 r := call.Left.Left 1747 if types.Haspointers(rf.Type) { 1748 e.escassignfromtag(rf.Note, cE.Retval, r, call) 1749 } 1750 } 1751 1752 for i, param := range fntype.Params().FieldSlice() { 1753 note := param.Note 1754 var arg *Node 1755 if param.IsDDD() && !call.IsDDD() { 1756 rest := args[i:] 1757 if len(rest) == 0 { 1758 break 1759 } 1760 1761 // Introduce ODDDARG node to represent ... allocation. 1762 arg = nod(ODDDARG, nil, nil) 1763 arg.Pos = call.Pos 1764 arr := types.NewArray(param.Type.Elem(), int64(len(rest))) 1765 arg.Type = types.NewPtr(arr) // make pointer so it will be tracked 1766 e.track(arg) 1767 call.Right = arg 1768 1769 // Store arguments into slice for ... arg. 1770 for _, a := range rest { 1771 if Debug['m'] > 3 { 1772 fmt.Printf("%v::esccall:: ... <- %S\n", linestr(lineno), a) 1773 } 1774 if note == uintptrEscapesTag { 1775 e.escassignSinkWhyWhere(arg, a, "arg to uintptrescapes ...", call) 1776 } else { 1777 e.escassignWhyWhere(arg, a, "arg to ...", call) 1778 } 1779 } 1780 } else { 1781 arg = args[i] 1782 if note == uintptrEscapesTag { 1783 e.escassignSinkWhy(arg, arg, "escaping uintptr") 1784 } 1785 } 1786 1787 if types.Haspointers(param.Type) && e.escassignfromtag(note, cE.Retval, arg, call)&EscMask == EscNone && parent.Op != ODEFER && parent.Op != OGO { 1788 a := arg 1789 for a.Op == OCONVNOP { 1790 a = a.Left 1791 } 1792 switch a.Op { 1793 // The callee has already been analyzed, so its arguments have esc tags. 1794 // The argument is marked as not escaping at all. 1795 // Record that fact so that any temporary used for 1796 // synthesizing this expression can be reclaimed when 1797 // the function returns. 1798 // This 'noescape' is even stronger than the usual esc == EscNone. 1799 // arg.Esc == EscNone means that arg does not escape the current function. 1800 // arg.SetNoescape(true) here means that arg does not escape this statement 1801 // in the current function. 1802 case OCALLPART, OCLOSURE, ODDDARG, OARRAYLIT, OSLICELIT, OPTRLIT, OSTRUCTLIT: 1803 a.SetNoescape(true) 1804 } 1805 } 1806 } 1807 } 1808 1809 // escflows records the link src->dst in dst, throwing out some quick wins, 1810 // and also ensuring that dst is noted as a flow destination. 1811 func (e *EscState) escflows(dst, src *Node, why *EscStep) { 1812 if dst == nil || src == nil || dst == src { 1813 return 1814 } 1815 1816 // Don't bother building a graph for scalars. 1817 if src.Type != nil && !types.Haspointers(src.Type) && !isReflectHeaderDataField(src) { 1818 if Debug['m'] > 3 { 1819 fmt.Printf("%v::NOT flows:: %S <- %S\n", linestr(lineno), dst, src) 1820 } 1821 return 1822 } 1823 1824 if Debug['m'] > 3 { 1825 fmt.Printf("%v::flows:: %S <- %S\n", linestr(lineno), dst, src) 1826 } 1827 1828 dstE := e.nodeEscState(dst) 1829 if len(dstE.Flowsrc) == 0 { 1830 e.dsts = append(e.dsts, dst) 1831 e.dstcount++ 1832 } 1833 1834 e.edgecount++ 1835 1836 if why == nil { 1837 dstE.Flowsrc = append(dstE.Flowsrc, EscStep{src: src}) 1838 } else { 1839 starwhy := *why 1840 starwhy.src = src // TODO: need to reconcile this w/ needs of explanations. 1841 dstE.Flowsrc = append(dstE.Flowsrc, starwhy) 1842 } 1843 } 1844 1845 // Whenever we hit a reference node, the level goes up by one, and whenever 1846 // we hit an OADDR, the level goes down by one. as long as we're on a level > 0 1847 // finding an OADDR just means we're following the upstream of a dereference, 1848 // so this address doesn't leak (yet). 1849 // If level == 0, it means the /value/ of this node can reach the root of this flood. 1850 // so if this node is an OADDR, its argument should be marked as escaping iff 1851 // its currfn/e.loopdepth are different from the flood's root. 1852 // Once an object has been moved to the heap, all of its upstream should be considered 1853 // escaping to the global scope. 1854 func (e *EscState) escflood(dst *Node) { 1855 switch dst.Op { 1856 case ONAME, OCLOSURE: 1857 default: 1858 return 1859 } 1860 1861 dstE := e.nodeEscState(dst) 1862 if Debug['m'] > 2 { 1863 fmt.Printf("\nescflood:%d: dst %S scope:%v[%d]\n", e.walkgen, dst, e.curfnSym(dst), dstE.Loopdepth) 1864 } 1865 1866 for i := range dstE.Flowsrc { 1867 e.walkgen++ 1868 s := &dstE.Flowsrc[i] 1869 s.parent = nil 1870 e.escwalk(levelFrom(0), dst, s.src, s) 1871 } 1872 } 1873 1874 // funcOutputAndInput reports whether dst and src correspond to output and input parameters of the same function. 1875 func funcOutputAndInput(dst, src *Node) bool { 1876 // Note if dst is marked as escaping, then "returned" is too weak. 1877 return dst.Op == ONAME && dst.Class() == PPARAMOUT && 1878 src.Op == ONAME && src.Class() == PPARAM && src.Name.Curfn == dst.Name.Curfn 1879 } 1880 1881 func (es *EscStep) describe(src *Node) { 1882 if Debug['m'] < 2 { 1883 return 1884 } 1885 step0 := es 1886 for step := step0; step != nil && !step.busy; step = step.parent { 1887 // TODO: We get cycles. Trigger is i = &i (where var i interface{}) 1888 step.busy = true 1889 // The trail is a little odd because of how the 1890 // graph is constructed. The link to the current 1891 // Node is parent.src unless parent is nil in which 1892 // case it is step.dst. 1893 nextDest := step.parent 1894 dst := step.dst 1895 where := step.where 1896 if nextDest != nil { 1897 dst = nextDest.src 1898 } 1899 if where == nil { 1900 where = dst 1901 } 1902 Warnl(src.Pos, "\tfrom %v (%s) at %s", dst, step.why, where.Line()) 1903 } 1904 for step := step0; step != nil && step.busy; step = step.parent { 1905 step.busy = false 1906 } 1907 } 1908 1909 const NOTALOOPDEPTH = -1 1910 1911 func (e *EscState) escwalk(level Level, dst *Node, src *Node, step *EscStep) { 1912 e.escwalkBody(level, dst, src, step, NOTALOOPDEPTH) 1913 } 1914 1915 func (e *EscState) escwalkBody(level Level, dst *Node, src *Node, step *EscStep, extraloopdepth int32) { 1916 if src.Op == OLITERAL { 1917 return 1918 } 1919 srcE := e.nodeEscState(src) 1920 if srcE.Walkgen == e.walkgen { 1921 // Esclevels are vectors, do not compare as integers, 1922 // and must use "min" of old and new to guarantee 1923 // convergence. 1924 level = level.min(srcE.Level) 1925 if level == srcE.Level { 1926 // Have we been here already with an extraloopdepth, 1927 // or is the extraloopdepth provided no improvement on 1928 // what's already been seen? 1929 if srcE.Maxextraloopdepth >= extraloopdepth || srcE.Loopdepth >= extraloopdepth { 1930 return 1931 } 1932 srcE.Maxextraloopdepth = extraloopdepth 1933 } 1934 } else { // srcE.Walkgen < e.walkgen -- first time, reset this. 1935 srcE.Maxextraloopdepth = NOTALOOPDEPTH 1936 } 1937 1938 srcE.Walkgen = e.walkgen 1939 srcE.Level = level 1940 modSrcLoopdepth := srcE.Loopdepth 1941 1942 if extraloopdepth > modSrcLoopdepth { 1943 modSrcLoopdepth = extraloopdepth 1944 } 1945 1946 if Debug['m'] > 2 { 1947 fmt.Printf("escwalk: level:%d depth:%d %.*s op=%v %S(%0j) scope:%v[%d] extraloopdepth=%v\n", 1948 level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", src.Op, src, src, e.curfnSym(src), srcE.Loopdepth, extraloopdepth) 1949 } 1950 1951 e.pdepth++ 1952 1953 // Input parameter flowing to output parameter? 1954 var leaks bool 1955 var osrcesc uint16 // used to prevent duplicate error messages 1956 1957 dstE := e.nodeEscState(dst) 1958 if funcOutputAndInput(dst, src) && src.Esc&EscMask < EscHeap && dst.Esc != EscHeap { 1959 // This case handles: 1960 // 1. return in 1961 // 2. return &in 1962 // 3. tmp := in; return &tmp 1963 // 4. return *in 1964 if Debug['m'] != 0 { 1965 if Debug['m'] <= 2 { 1966 Warnl(src.Pos, "leaking param: %S to result %v level=%v", src, dst.Sym, level.int()) 1967 step.describe(src) 1968 } else { 1969 Warnl(src.Pos, "leaking param: %S to result %v level=%v", src, dst.Sym, level) 1970 } 1971 } 1972 if src.Esc&EscMask != EscReturn { 1973 src.Esc = EscReturn | src.Esc&EscContentEscapes 1974 } 1975 src.Esc = escNoteOutputParamFlow(src.Esc, dst.Name.Vargen, level) 1976 goto recurse 1977 } 1978 1979 // If parameter content escapes to heap, set EscContentEscapes 1980 // Note minor confusion around escape from pointer-to-struct vs escape from struct 1981 if dst.Esc == EscHeap && 1982 src.Op == ONAME && src.Class() == PPARAM && src.Esc&EscMask < EscHeap && 1983 level.int() > 0 { 1984 src.Esc = escMax(EscContentEscapes|src.Esc, EscNone) 1985 if Debug['m'] != 0 { 1986 Warnl(src.Pos, "mark escaped content: %S", src) 1987 step.describe(src) 1988 } 1989 } 1990 1991 leaks = level.int() <= 0 && level.guaranteedDereference() <= 0 && dstE.Loopdepth < modSrcLoopdepth 1992 leaks = leaks || level.int() <= 0 && dst.Esc&EscMask == EscHeap 1993 1994 osrcesc = src.Esc 1995 switch src.Op { 1996 case ONAME: 1997 if src.Class() == PPARAM && (leaks || dstE.Loopdepth < 0) && src.Esc&EscMask < EscHeap { 1998 if level.guaranteedDereference() > 0 { 1999 src.Esc = escMax(EscContentEscapes|src.Esc, EscNone) 2000 if Debug['m'] != 0 { 2001 if Debug['m'] <= 2 { 2002 if osrcesc != src.Esc { 2003 Warnl(src.Pos, "leaking param content: %S", src) 2004 step.describe(src) 2005 } 2006 } else { 2007 Warnl(src.Pos, "leaking param content: %S level=%v dst.eld=%v src.eld=%v dst=%S", 2008 src, level, dstE.Loopdepth, modSrcLoopdepth, dst) 2009 } 2010 } 2011 } else { 2012 src.Esc = EscHeap 2013 if Debug['m'] != 0 { 2014 if Debug['m'] <= 2 { 2015 Warnl(src.Pos, "leaking param: %S", src) 2016 step.describe(src) 2017 } else { 2018 Warnl(src.Pos, "leaking param: %S level=%v dst.eld=%v src.eld=%v dst=%S", 2019 src, level, dstE.Loopdepth, modSrcLoopdepth, dst) 2020 } 2021 } 2022 } 2023 } 2024 2025 // Treat a captured closure variable as equivalent to the 2026 // original variable. 2027 if src.IsClosureVar() { 2028 if leaks && Debug['m'] != 0 { 2029 Warnl(src.Pos, "leaking closure reference %S", src) 2030 step.describe(src) 2031 } 2032 e.escwalk(level, dst, src.Name.Defn, e.stepWalk(dst, src.Name.Defn, "closure-var", step)) 2033 } 2034 2035 case OPTRLIT, OADDR: 2036 why := "pointer literal" 2037 if src.Op == OADDR { 2038 why = "address-of" 2039 } 2040 if leaks { 2041 src.Esc = EscHeap 2042 if Debug['m'] != 0 && osrcesc != src.Esc { 2043 p := src 2044 if p.Left.Op == OCLOSURE { 2045 p = p.Left // merely to satisfy error messages in tests 2046 } 2047 if Debug['m'] > 2 { 2048 Warnl(src.Pos, "%S escapes to heap, level=%v, dst=%v dst.eld=%v, src.eld=%v", 2049 p, level, dst, dstE.Loopdepth, modSrcLoopdepth) 2050 } else { 2051 Warnl(src.Pos, "%S escapes to heap", p) 2052 step.describe(src) 2053 } 2054 } 2055 addrescapes(src.Left) 2056 e.escwalkBody(level.dec(), dst, src.Left, e.stepWalk(dst, src.Left, why, step), modSrcLoopdepth) 2057 extraloopdepth = modSrcLoopdepth // passes to recursive case, seems likely a no-op 2058 } else { 2059 e.escwalk(level.dec(), dst, src.Left, e.stepWalk(dst, src.Left, why, step)) 2060 } 2061 2062 case OAPPEND: 2063 e.escwalk(level, dst, src.List.First(), e.stepWalk(dst, src.List.First(), "append-first-arg", step)) 2064 2065 case ODDDARG: 2066 if leaks { 2067 src.Esc = EscHeap 2068 if Debug['m'] != 0 && osrcesc != src.Esc { 2069 Warnl(src.Pos, "%S escapes to heap", src) 2070 step.describe(src) 2071 } 2072 extraloopdepth = modSrcLoopdepth 2073 } 2074 // similar to a slice arraylit and its args. 2075 level = level.dec() 2076 2077 case OSLICELIT: 2078 for _, elt := range src.List.Slice() { 2079 if elt.Op == OKEY { 2080 elt = elt.Right 2081 } 2082 e.escwalk(level.dec(), dst, elt, e.stepWalk(dst, elt, "slice-literal-element", step)) 2083 } 2084 2085 fallthrough 2086 2087 case OMAKECHAN, 2088 OMAKEMAP, 2089 OMAKESLICE, 2090 ORUNES2STR, 2091 OBYTES2STR, 2092 OSTR2RUNES, 2093 OSTR2BYTES, 2094 OADDSTR, 2095 OMAPLIT, 2096 ONEW, 2097 OCLOSURE, 2098 OCALLPART, 2099 ORUNESTR, 2100 OCONVIFACE: 2101 if leaks { 2102 src.Esc = EscHeap 2103 if Debug['m'] != 0 && osrcesc != src.Esc { 2104 Warnl(src.Pos, "%S escapes to heap", src) 2105 step.describe(src) 2106 } 2107 extraloopdepth = modSrcLoopdepth 2108 if src.Op == OCONVIFACE { 2109 lt := src.Left.Type 2110 if !lt.IsInterface() && !isdirectiface(lt) && types.Haspointers(lt) { 2111 // We're converting from a non-direct interface type. 2112 // The interface will hold a heap copy of the data 2113 // (by calling convT2I or friend). Flow the data to heap. 2114 // See issue 29353. 2115 e.escwalk(level, &e.theSink, src.Left, e.stepWalk(dst, src.Left, "interface-converted", step)) 2116 } 2117 } 2118 } 2119 2120 case ODOT, 2121 ODOTTYPE: 2122 e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "dot", step)) 2123 2124 case 2125 OSLICE, 2126 OSLICEARR, 2127 OSLICE3, 2128 OSLICE3ARR, 2129 OSLICESTR: 2130 e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "slice", step)) 2131 2132 case OINDEX: 2133 if src.Left.Type.IsArray() { 2134 e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "fixed-array-index-of", step)) 2135 break 2136 } 2137 fallthrough 2138 2139 case ODOTPTR: 2140 e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "dot of pointer", step)) 2141 case OINDEXMAP: 2142 e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "map index", step)) 2143 case ODEREF: 2144 e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "indirection", step)) 2145 2146 // In this case a link went directly to a call, but should really go 2147 // to the dummy .outN outputs that were created for the call that 2148 // themselves link to the inputs with levels adjusted. 2149 // See e.g. #10466 2150 // This can only happen with functions returning a single result. 2151 case OCALLMETH, OCALLFUNC, OCALLINTER: 2152 if srcE.Retval.Len() != 0 { 2153 if Debug['m'] > 2 { 2154 fmt.Printf("%v:[%d] dst %S escwalk replace src: %S with %S\n", 2155 linestr(lineno), e.loopdepth, 2156 dst, src, srcE.Retval.First()) 2157 } 2158 src = srcE.Retval.First() 2159 srcE = e.nodeEscState(src) 2160 } 2161 } 2162 2163 recurse: 2164 level = level.copy() 2165 2166 for i := range srcE.Flowsrc { 2167 s := &srcE.Flowsrc[i] 2168 s.parent = step 2169 e.escwalkBody(level, dst, s.src, s, extraloopdepth) 2170 s.parent = nil 2171 } 2172 2173 e.pdepth-- 2174 } 2175 2176 // addrescapes tags node n as having had its address taken 2177 // by "increasing" the "value" of n.Esc to EscHeap. 2178 // Storage is allocated as necessary to allow the address 2179 // to be taken. 2180 func addrescapes(n *Node) { 2181 switch n.Op { 2182 default: 2183 // Unexpected Op, probably due to a previous type error. Ignore. 2184 2185 case ODEREF, ODOTPTR: 2186 // Nothing to do. 2187 2188 case ONAME: 2189 if n == nodfp { 2190 break 2191 } 2192 2193 // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping. 2194 // on PPARAM it means something different. 2195 if n.Class() == PAUTO && n.Esc == EscNever { 2196 break 2197 } 2198 2199 // If a closure reference escapes, mark the outer variable as escaping. 2200 if n.IsClosureVar() { 2201 addrescapes(n.Name.Defn) 2202 break 2203 } 2204 2205 if n.Class() != PPARAM && n.Class() != PPARAMOUT && n.Class() != PAUTO { 2206 break 2207 } 2208 2209 // This is a plain parameter or local variable that needs to move to the heap, 2210 // but possibly for the function outside the one we're compiling. 2211 // That is, if we have: 2212 // 2213 // func f(x int) { 2214 // func() { 2215 // global = &x 2216 // } 2217 // } 2218 // 2219 // then we're analyzing the inner closure but we need to move x to the 2220 // heap in f, not in the inner closure. Flip over to f before calling moveToHeap. 2221 oldfn := Curfn 2222 Curfn = n.Name.Curfn 2223 if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE { 2224 Curfn = Curfn.Func.Closure 2225 } 2226 ln := lineno 2227 lineno = Curfn.Pos 2228 moveToHeap(n) 2229 Curfn = oldfn 2230 lineno = ln 2231 2232 // ODOTPTR has already been introduced, 2233 // so these are the non-pointer ODOT and OINDEX. 2234 // In &x[0], if x is a slice, then x does not 2235 // escape--the pointer inside x does, but that 2236 // is always a heap pointer anyway. 2237 case ODOT, OINDEX, OPAREN, OCONVNOP: 2238 if !n.Left.Type.IsSlice() { 2239 addrescapes(n.Left) 2240 } 2241 } 2242 } 2243 2244 // moveToHeap records the parameter or local variable n as moved to the heap. 2245 func moveToHeap(n *Node) { 2246 if Debug['r'] != 0 { 2247 Dump("MOVE", n) 2248 } 2249 if compiling_runtime { 2250 yyerror("%v escapes to heap, not allowed in runtime.", n) 2251 } 2252 if n.Class() == PAUTOHEAP { 2253 Dump("n", n) 2254 Fatalf("double move to heap") 2255 } 2256 2257 // Allocate a local stack variable to hold the pointer to the heap copy. 2258 // temp will add it to the function declaration list automatically. 2259 heapaddr := temp(types.NewPtr(n.Type)) 2260 heapaddr.Sym = lookup("&" + n.Sym.Name) 2261 heapaddr.Orig.Sym = heapaddr.Sym 2262 heapaddr.Pos = n.Pos 2263 2264 // Unset AutoTemp to persist the &foo variable name through SSA to 2265 // liveness analysis. 2266 // TODO(mdempsky/drchase): Cleaner solution? 2267 heapaddr.Name.SetAutoTemp(false) 2268 2269 // Parameters have a local stack copy used at function start/end 2270 // in addition to the copy in the heap that may live longer than 2271 // the function. 2272 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 2273 if n.Xoffset == BADWIDTH { 2274 Fatalf("addrescapes before param assignment") 2275 } 2276 2277 // We rewrite n below to be a heap variable (indirection of heapaddr). 2278 // Preserve a copy so we can still write code referring to the original, 2279 // and substitute that copy into the function declaration list 2280 // so that analyses of the local (on-stack) variables use it. 2281 stackcopy := newname(n.Sym) 2282 stackcopy.SetAddable(false) 2283 stackcopy.Type = n.Type 2284 stackcopy.Xoffset = n.Xoffset 2285 stackcopy.SetClass(n.Class()) 2286 stackcopy.Name.Param.Heapaddr = heapaddr 2287 if n.Class() == PPARAMOUT { 2288 // Make sure the pointer to the heap copy is kept live throughout the function. 2289 // The function could panic at any point, and then a defer could recover. 2290 // Thus, we need the pointer to the heap copy always available so the 2291 // post-deferreturn code can copy the return value back to the stack. 2292 // See issue 16095. 2293 heapaddr.SetIsOutputParamHeapAddr(true) 2294 } 2295 n.Name.Param.Stackcopy = stackcopy 2296 2297 // Substitute the stackcopy into the function variable list so that 2298 // liveness and other analyses use the underlying stack slot 2299 // and not the now-pseudo-variable n. 2300 found := false 2301 for i, d := range Curfn.Func.Dcl { 2302 if d == n { 2303 Curfn.Func.Dcl[i] = stackcopy 2304 found = true 2305 break 2306 } 2307 // Parameters are before locals, so can stop early. 2308 // This limits the search even in functions with many local variables. 2309 if d.Class() == PAUTO { 2310 break 2311 } 2312 } 2313 if !found { 2314 Fatalf("cannot find %v in local variable list", n) 2315 } 2316 Curfn.Func.Dcl = append(Curfn.Func.Dcl, n) 2317 } 2318 2319 // Modify n in place so that uses of n now mean indirection of the heapaddr. 2320 n.SetClass(PAUTOHEAP) 2321 n.Xoffset = 0 2322 n.Name.Param.Heapaddr = heapaddr 2323 n.Esc = EscHeap 2324 if Debug['m'] != 0 { 2325 fmt.Printf("%v: moved to heap: %v\n", n.Line(), n) 2326 } 2327 } 2328 2329 // This special tag is applied to uintptr variables 2330 // that we believe may hold unsafe.Pointers for 2331 // calls into assembly functions. 2332 const unsafeUintptrTag = "unsafe-uintptr" 2333 2334 // This special tag is applied to uintptr parameters of functions 2335 // marked go:uintptrescapes. 2336 const uintptrEscapesTag = "uintptr-escapes" 2337 2338 func (e *EscState) esctag(fn *Node) { 2339 fn.Esc = EscFuncTagged 2340 2341 name := func(s *types.Sym, narg int) string { 2342 if s != nil { 2343 return s.Name 2344 } 2345 return fmt.Sprintf("arg#%d", narg) 2346 } 2347 2348 // External functions are assumed unsafe, 2349 // unless //go:noescape is given before the declaration. 2350 if fn.Nbody.Len() == 0 { 2351 if fn.Noescape() { 2352 for _, f := range fn.Type.Params().Fields().Slice() { 2353 if types.Haspointers(f.Type) { 2354 f.Note = mktag(EscNone) 2355 } 2356 } 2357 } 2358 2359 // Assume that uintptr arguments must be held live across the call. 2360 // This is most important for syscall.Syscall. 2361 // See golang.org/issue/13372. 2362 // This really doesn't have much to do with escape analysis per se, 2363 // but we are reusing the ability to annotate an individual function 2364 // argument and pass those annotations along to importing code. 2365 narg := 0 2366 for _, f := range fn.Type.Params().Fields().Slice() { 2367 narg++ 2368 if f.Type.Etype == TUINTPTR { 2369 if Debug['m'] != 0 { 2370 Warnl(fn.Pos, "%v assuming %v is unsafe uintptr", funcSym(fn), name(f.Sym, narg)) 2371 } 2372 f.Note = unsafeUintptrTag 2373 } 2374 } 2375 2376 return 2377 } 2378 2379 if fn.Func.Pragma&UintptrEscapes != 0 { 2380 narg := 0 2381 for _, f := range fn.Type.Params().Fields().Slice() { 2382 narg++ 2383 if f.Type.Etype == TUINTPTR { 2384 if Debug['m'] != 0 { 2385 Warnl(fn.Pos, "%v marking %v as escaping uintptr", funcSym(fn), name(f.Sym, narg)) 2386 } 2387 f.Note = uintptrEscapesTag 2388 } 2389 2390 if f.IsDDD() && f.Type.Elem().Etype == TUINTPTR { 2391 // final argument is ...uintptr. 2392 if Debug['m'] != 0 { 2393 Warnl(fn.Pos, "%v marking %v as escaping ...uintptr", funcSym(fn), name(f.Sym, narg)) 2394 } 2395 f.Note = uintptrEscapesTag 2396 } 2397 } 2398 } 2399 2400 for _, fs := range types.RecvsParams { 2401 for _, f := range fs(fn.Type).Fields().Slice() { 2402 if !types.Haspointers(f.Type) { // don't bother tagging for scalars 2403 continue 2404 } 2405 if f.Note == uintptrEscapesTag { 2406 // Note is already set in the loop above. 2407 continue 2408 } 2409 2410 // Unnamed parameters are unused and therefore do not escape. 2411 if f.Sym == nil || f.Sym.IsBlank() { 2412 f.Note = mktag(EscNone) 2413 continue 2414 } 2415 2416 switch esc := asNode(f.Nname).Esc; esc & EscMask { 2417 case EscNone, // not touched by escflood 2418 EscReturn: 2419 f.Note = mktag(int(esc)) 2420 2421 case EscHeap: // touched by escflood, moved to heap 2422 } 2423 } 2424 } 2425 }