golang.org/x/tools@v0.21.1-0.20240520172518-788d39e776b1/go/callgraph/vta/graph.go (about) 1 // Copyright 2021 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package vta 6 7 import ( 8 "fmt" 9 "go/token" 10 "go/types" 11 12 "golang.org/x/tools/go/callgraph" 13 "golang.org/x/tools/go/ssa" 14 "golang.org/x/tools/go/types/typeutil" 15 "golang.org/x/tools/internal/aliases" 16 "golang.org/x/tools/internal/typeparams" 17 ) 18 19 // node interface for VTA nodes. 20 type node interface { 21 Type() types.Type 22 String() string 23 } 24 25 // constant node for VTA. 26 type constant struct { 27 typ types.Type 28 } 29 30 func (c constant) Type() types.Type { 31 return c.typ 32 } 33 34 func (c constant) String() string { 35 return fmt.Sprintf("Constant(%v)", c.Type()) 36 } 37 38 // pointer node for VTA. 39 type pointer struct { 40 typ *types.Pointer 41 } 42 43 func (p pointer) Type() types.Type { 44 return p.typ 45 } 46 47 func (p pointer) String() string { 48 return fmt.Sprintf("Pointer(%v)", p.Type()) 49 } 50 51 // mapKey node for VTA, modeling reachable map key types. 52 type mapKey struct { 53 typ types.Type 54 } 55 56 func (mk mapKey) Type() types.Type { 57 return mk.typ 58 } 59 60 func (mk mapKey) String() string { 61 return fmt.Sprintf("MapKey(%v)", mk.Type()) 62 } 63 64 // mapValue node for VTA, modeling reachable map value types. 65 type mapValue struct { 66 typ types.Type 67 } 68 69 func (mv mapValue) Type() types.Type { 70 return mv.typ 71 } 72 73 func (mv mapValue) String() string { 74 return fmt.Sprintf("MapValue(%v)", mv.Type()) 75 } 76 77 // sliceElem node for VTA, modeling reachable slice and array element types. 78 type sliceElem struct { 79 typ types.Type 80 } 81 82 func (s sliceElem) Type() types.Type { 83 return s.typ 84 } 85 86 func (s sliceElem) String() string { 87 return fmt.Sprintf("Slice([]%v)", s.Type()) 88 } 89 90 // channelElem node for VTA, modeling reachable channel element types. 91 type channelElem struct { 92 typ types.Type 93 } 94 95 func (c channelElem) Type() types.Type { 96 return c.typ 97 } 98 99 func (c channelElem) String() string { 100 return fmt.Sprintf("Channel(chan %v)", c.Type()) 101 } 102 103 // field node for VTA. 104 type field struct { 105 StructType types.Type 106 index int // index of the field in the struct 107 } 108 109 func (f field) Type() types.Type { 110 s := typeparams.CoreType(f.StructType).(*types.Struct) 111 return s.Field(f.index).Type() 112 } 113 114 func (f field) String() string { 115 s := typeparams.CoreType(f.StructType).(*types.Struct) 116 return fmt.Sprintf("Field(%v:%s)", f.StructType, s.Field(f.index).Name()) 117 } 118 119 // global node for VTA. 120 type global struct { 121 val *ssa.Global 122 } 123 124 func (g global) Type() types.Type { 125 return g.val.Type() 126 } 127 128 func (g global) String() string { 129 return fmt.Sprintf("Global(%s)", g.val.Name()) 130 } 131 132 // local node for VTA modeling local variables 133 // and function/method parameters. 134 type local struct { 135 val ssa.Value 136 } 137 138 func (l local) Type() types.Type { 139 return l.val.Type() 140 } 141 142 func (l local) String() string { 143 return fmt.Sprintf("Local(%s)", l.val.Name()) 144 } 145 146 // indexedLocal node for VTA node. Models indexed locals 147 // related to the ssa extract instructions. 148 type indexedLocal struct { 149 val ssa.Value 150 index int 151 typ types.Type 152 } 153 154 func (i indexedLocal) Type() types.Type { 155 return i.typ 156 } 157 158 func (i indexedLocal) String() string { 159 return fmt.Sprintf("Local(%s[%d])", i.val.Name(), i.index) 160 } 161 162 // function node for VTA. 163 type function struct { 164 f *ssa.Function 165 } 166 167 func (f function) Type() types.Type { 168 return f.f.Type() 169 } 170 171 func (f function) String() string { 172 return fmt.Sprintf("Function(%s)", f.f.Name()) 173 } 174 175 // nestedPtrInterface node represents all references and dereferences 176 // of locals and globals that have a nested pointer to interface type. 177 // We merge such constructs into a single node for simplicity and without 178 // much precision sacrifice as such variables are rare in practice. Both 179 // a and b would be represented as the same PtrInterface(I) node in: 180 // 181 // type I interface 182 // var a ***I 183 // var b **I 184 type nestedPtrInterface struct { 185 typ types.Type 186 } 187 188 func (l nestedPtrInterface) Type() types.Type { 189 return l.typ 190 } 191 192 func (l nestedPtrInterface) String() string { 193 return fmt.Sprintf("PtrInterface(%v)", l.typ) 194 } 195 196 // nestedPtrFunction node represents all references and dereferences of locals 197 // and globals that have a nested pointer to function type. We merge such 198 // constructs into a single node for simplicity and without much precision 199 // sacrifice as such variables are rare in practice. Both a and b would be 200 // represented as the same PtrFunction(func()) node in: 201 // 202 // var a *func() 203 // var b **func() 204 type nestedPtrFunction struct { 205 typ types.Type 206 } 207 208 func (p nestedPtrFunction) Type() types.Type { 209 return p.typ 210 } 211 212 func (p nestedPtrFunction) String() string { 213 return fmt.Sprintf("PtrFunction(%v)", p.typ) 214 } 215 216 // panicArg models types of all arguments passed to panic. 217 type panicArg struct{} 218 219 func (p panicArg) Type() types.Type { 220 return nil 221 } 222 223 func (p panicArg) String() string { 224 return "Panic" 225 } 226 227 // recoverReturn models types of all return values of recover(). 228 type recoverReturn struct{} 229 230 func (r recoverReturn) Type() types.Type { 231 return nil 232 } 233 234 func (r recoverReturn) String() string { 235 return "Recover" 236 } 237 238 // vtaGraph remembers for each VTA node the set of its successors. 239 // Tailored for VTA, hence does not support singleton (sub)graphs. 240 type vtaGraph map[node]map[node]bool 241 242 // addEdge adds an edge x->y to the graph. 243 func (g vtaGraph) addEdge(x, y node) { 244 succs, ok := g[x] 245 if !ok { 246 succs = make(map[node]bool) 247 g[x] = succs 248 } 249 succs[y] = true 250 } 251 252 // successors returns all of n's immediate successors in the graph. 253 // The order of successor nodes is arbitrary. 254 func (g vtaGraph) successors(n node) []node { 255 var succs []node 256 for succ := range g[n] { 257 succs = append(succs, succ) 258 } 259 return succs 260 } 261 262 // typePropGraph builds a VTA graph for a set of `funcs` and initial 263 // `callgraph` needed to establish interprocedural edges. Returns the 264 // graph and a map for unique type representatives. 265 func typePropGraph(funcs map[*ssa.Function]bool, callgraph *callgraph.Graph) (vtaGraph, *typeutil.Map) { 266 b := builder{graph: make(vtaGraph), callGraph: callgraph} 267 b.visit(funcs) 268 return b.graph, &b.canon 269 } 270 271 // Data structure responsible for linearly traversing the 272 // code and building a VTA graph. 273 type builder struct { 274 graph vtaGraph 275 callGraph *callgraph.Graph // initial call graph for creating flows at unresolved call sites. 276 277 // Specialized type map for canonicalization of types.Type. 278 // Semantically equivalent types can have different implementations, 279 // i.e., they are different pointer values. The map allows us to 280 // have one unique representative. The keys are fixed and from the 281 // client perspective they are types. The values in our case are 282 // types too, in particular type representatives. Each value is a 283 // pointer so this map is not expected to take much memory. 284 canon typeutil.Map 285 } 286 287 func (b *builder) visit(funcs map[*ssa.Function]bool) { 288 // Add the fixed edge Panic -> Recover 289 b.graph.addEdge(panicArg{}, recoverReturn{}) 290 291 for f, in := range funcs { 292 if in { 293 b.fun(f) 294 } 295 } 296 } 297 298 func (b *builder) fun(f *ssa.Function) { 299 for _, bl := range f.Blocks { 300 for _, instr := range bl.Instrs { 301 b.instr(instr) 302 } 303 } 304 } 305 306 func (b *builder) instr(instr ssa.Instruction) { 307 switch i := instr.(type) { 308 case *ssa.Store: 309 b.addInFlowAliasEdges(b.nodeFromVal(i.Addr), b.nodeFromVal(i.Val)) 310 case *ssa.MakeInterface: 311 b.addInFlowEdge(b.nodeFromVal(i.X), b.nodeFromVal(i)) 312 case *ssa.MakeClosure: 313 b.closure(i) 314 case *ssa.UnOp: 315 b.unop(i) 316 case *ssa.Phi: 317 b.phi(i) 318 case *ssa.ChangeInterface: 319 // Although in change interface a := A(b) command a and b are 320 // the same object, the only interesting flow happens when A 321 // is an interface. We create flow b -> a, but omit a -> b. 322 // The latter flow is not needed: if a gets assigned concrete 323 // type later on, that cannot be propagated back to b as b 324 // is a separate variable. The a -> b flow can happen when 325 // A is a pointer to interface, but then the command is of 326 // type ChangeType, handled below. 327 b.addInFlowEdge(b.nodeFromVal(i.X), b.nodeFromVal(i)) 328 case *ssa.ChangeType: 329 // change type command a := A(b) results in a and b being the 330 // same value. For concrete type A, there is no interesting flow. 331 // 332 // When A is an interface, most interface casts are handled 333 // by the ChangeInterface instruction. The relevant case here is 334 // when converting a pointer to an interface type. This can happen 335 // when the underlying interfaces have the same method set. 336 // 337 // type I interface{ foo() } 338 // type J interface{ foo() } 339 // var b *I 340 // a := (*J)(b) 341 // 342 // When this happens we add flows between a <--> b. 343 b.addInFlowAliasEdges(b.nodeFromVal(i), b.nodeFromVal(i.X)) 344 case *ssa.TypeAssert: 345 b.tassert(i) 346 case *ssa.Extract: 347 b.extract(i) 348 case *ssa.Field: 349 b.field(i) 350 case *ssa.FieldAddr: 351 b.fieldAddr(i) 352 case *ssa.Send: 353 b.send(i) 354 case *ssa.Select: 355 b.selekt(i) 356 case *ssa.Index: 357 b.index(i) 358 case *ssa.IndexAddr: 359 b.indexAddr(i) 360 case *ssa.Lookup: 361 b.lookup(i) 362 case *ssa.MapUpdate: 363 b.mapUpdate(i) 364 case *ssa.Next: 365 b.next(i) 366 case ssa.CallInstruction: 367 b.call(i) 368 case *ssa.Panic: 369 b.panic(i) 370 case *ssa.Return: 371 b.rtrn(i) 372 case *ssa.MakeChan, *ssa.MakeMap, *ssa.MakeSlice, *ssa.BinOp, 373 *ssa.Alloc, *ssa.DebugRef, *ssa.Convert, *ssa.Jump, *ssa.If, 374 *ssa.Slice, *ssa.SliceToArrayPointer, *ssa.Range, *ssa.RunDefers: 375 // No interesting flow here. 376 // Notes on individual instructions: 377 // SliceToArrayPointer: t1 = slice to array pointer *[4]T <- []T (t0) 378 // No interesting flow as sliceArrayElem(t1) == sliceArrayElem(t0). 379 return 380 case *ssa.MultiConvert: 381 b.multiconvert(i) 382 default: 383 panic(fmt.Sprintf("unsupported instruction %v\n", instr)) 384 } 385 } 386 387 func (b *builder) unop(u *ssa.UnOp) { 388 switch u.Op { 389 case token.MUL: 390 // Multiplication operator * is used here as a dereference operator. 391 b.addInFlowAliasEdges(b.nodeFromVal(u), b.nodeFromVal(u.X)) 392 case token.ARROW: 393 t := typeparams.CoreType(u.X.Type()).(*types.Chan).Elem() 394 b.addInFlowAliasEdges(b.nodeFromVal(u), channelElem{typ: t}) 395 default: 396 // There is no interesting type flow otherwise. 397 } 398 } 399 400 func (b *builder) phi(p *ssa.Phi) { 401 for _, edge := range p.Edges { 402 b.addInFlowAliasEdges(b.nodeFromVal(p), b.nodeFromVal(edge)) 403 } 404 } 405 406 func (b *builder) tassert(a *ssa.TypeAssert) { 407 if !a.CommaOk { 408 b.addInFlowEdge(b.nodeFromVal(a.X), b.nodeFromVal(a)) 409 return 410 } 411 // The case where a is <a.AssertedType, bool> register so there 412 // is a flow from a.X to a[0]. Here, a[0] is represented as an 413 // indexedLocal: an entry into local tuple register a at index 0. 414 tup := a.Type().(*types.Tuple) 415 t := tup.At(0).Type() 416 417 local := indexedLocal{val: a, typ: t, index: 0} 418 b.addInFlowEdge(b.nodeFromVal(a.X), local) 419 } 420 421 // extract instruction t1 := t2[i] generates flows between t2[i] 422 // and t1 where the source is indexed local representing a value 423 // from tuple register t2 at index i and the target is t1. 424 func (b *builder) extract(e *ssa.Extract) { 425 tup := e.Tuple.Type().(*types.Tuple) 426 t := tup.At(e.Index).Type() 427 428 local := indexedLocal{val: e.Tuple, typ: t, index: e.Index} 429 b.addInFlowAliasEdges(b.nodeFromVal(e), local) 430 } 431 432 func (b *builder) field(f *ssa.Field) { 433 fnode := field{StructType: f.X.Type(), index: f.Field} 434 b.addInFlowEdge(fnode, b.nodeFromVal(f)) 435 } 436 437 func (b *builder) fieldAddr(f *ssa.FieldAddr) { 438 t := typeparams.CoreType(f.X.Type()).(*types.Pointer).Elem() 439 440 // Since we are getting pointer to a field, make a bidirectional edge. 441 fnode := field{StructType: t, index: f.Field} 442 b.addInFlowEdge(fnode, b.nodeFromVal(f)) 443 b.addInFlowEdge(b.nodeFromVal(f), fnode) 444 } 445 446 func (b *builder) send(s *ssa.Send) { 447 t := typeparams.CoreType(s.Chan.Type()).(*types.Chan).Elem() 448 b.addInFlowAliasEdges(channelElem{typ: t}, b.nodeFromVal(s.X)) 449 } 450 451 // selekt generates flows for select statement 452 // 453 // a = select blocking/nonblocking [c_1 <- t_1, c_2 <- t_2, ..., <- o_1, <- o_2, ...] 454 // 455 // between receiving channel registers c_i and corresponding input register t_i. Further, 456 // flows are generated between o_i and a[2 + i]. Note that a is a tuple register of type 457 // <int, bool, r_1, r_2, ...> where the type of r_i is the element type of channel o_i. 458 func (b *builder) selekt(s *ssa.Select) { 459 recvIndex := 0 460 for _, state := range s.States { 461 t := typeparams.CoreType(state.Chan.Type()).(*types.Chan).Elem() 462 463 if state.Dir == types.SendOnly { 464 b.addInFlowAliasEdges(channelElem{typ: t}, b.nodeFromVal(state.Send)) 465 } else { 466 // state.Dir == RecvOnly by definition of select instructions. 467 tupEntry := indexedLocal{val: s, typ: t, index: 2 + recvIndex} 468 b.addInFlowAliasEdges(tupEntry, channelElem{typ: t}) 469 recvIndex++ 470 } 471 } 472 } 473 474 // index instruction a := b[c] on slices creates flows between a and 475 // SliceElem(t) flow where t is an interface type of c. Arrays and 476 // slice elements are both modeled as SliceElem. 477 func (b *builder) index(i *ssa.Index) { 478 et := sliceArrayElem(i.X.Type()) 479 b.addInFlowAliasEdges(b.nodeFromVal(i), sliceElem{typ: et}) 480 } 481 482 // indexAddr instruction a := &b[c] fetches address of a index 483 // into the field so we create bidirectional flow a <-> SliceElem(t) 484 // where t is an interface type of c. Arrays and slice elements are 485 // both modeled as SliceElem. 486 func (b *builder) indexAddr(i *ssa.IndexAddr) { 487 et := sliceArrayElem(i.X.Type()) 488 b.addInFlowEdge(sliceElem{typ: et}, b.nodeFromVal(i)) 489 b.addInFlowEdge(b.nodeFromVal(i), sliceElem{typ: et}) 490 } 491 492 // lookup handles map query commands a := m[b] where m is of type 493 // map[...]V and V is an interface. It creates flows between `a` 494 // and MapValue(V). 495 func (b *builder) lookup(l *ssa.Lookup) { 496 t, ok := l.X.Type().Underlying().(*types.Map) 497 if !ok { 498 // No interesting flows for string lookups. 499 return 500 } 501 502 if !l.CommaOk { 503 b.addInFlowAliasEdges(b.nodeFromVal(l), mapValue{typ: t.Elem()}) 504 } else { 505 i := indexedLocal{val: l, typ: t.Elem(), index: 0} 506 b.addInFlowAliasEdges(i, mapValue{typ: t.Elem()}) 507 } 508 } 509 510 // mapUpdate handles map update commands m[b] = a where m is of type 511 // map[K]V and K and V are interfaces. It creates flows between `a` 512 // and MapValue(V) as well as between MapKey(K) and `b`. 513 func (b *builder) mapUpdate(u *ssa.MapUpdate) { 514 t, ok := u.Map.Type().Underlying().(*types.Map) 515 if !ok { 516 // No interesting flows for string updates. 517 return 518 } 519 520 b.addInFlowAliasEdges(mapKey{typ: t.Key()}, b.nodeFromVal(u.Key)) 521 b.addInFlowAliasEdges(mapValue{typ: t.Elem()}, b.nodeFromVal(u.Value)) 522 } 523 524 // next instruction <ok, key, value> := next r, where r 525 // is a range over map or string generates flow between 526 // key and MapKey as well value and MapValue nodes. 527 func (b *builder) next(n *ssa.Next) { 528 if n.IsString { 529 return 530 } 531 tup := n.Type().(*types.Tuple) 532 kt := tup.At(1).Type() 533 vt := tup.At(2).Type() 534 535 b.addInFlowAliasEdges(indexedLocal{val: n, typ: kt, index: 1}, mapKey{typ: kt}) 536 b.addInFlowAliasEdges(indexedLocal{val: n, typ: vt, index: 2}, mapValue{typ: vt}) 537 } 538 539 // addInFlowAliasEdges adds an edge r -> l to b.graph if l is a node that can 540 // have an inflow, i.e., a node that represents an interface or an unresolved 541 // function value. Similarly for the edge l -> r with an additional condition 542 // of that l and r can potentially alias. 543 func (b *builder) addInFlowAliasEdges(l, r node) { 544 b.addInFlowEdge(r, l) 545 546 if canAlias(l, r) { 547 b.addInFlowEdge(l, r) 548 } 549 } 550 551 func (b *builder) closure(c *ssa.MakeClosure) { 552 f := c.Fn.(*ssa.Function) 553 b.addInFlowEdge(function{f: f}, b.nodeFromVal(c)) 554 555 for i, fv := range f.FreeVars { 556 b.addInFlowAliasEdges(b.nodeFromVal(fv), b.nodeFromVal(c.Bindings[i])) 557 } 558 } 559 560 // panic creates a flow from arguments to panic instructions to return 561 // registers of all recover statements in the program. Introduces a 562 // global panic node Panic and 563 // 1. for every panic statement p: add p -> Panic 564 // 2. for every recover statement r: add Panic -> r (handled in call) 565 // 566 // TODO(zpavlinovic): improve precision by explicitly modeling how panic 567 // values flow from callees to callers and into deferred recover instructions. 568 func (b *builder) panic(p *ssa.Panic) { 569 // Panics often have, for instance, strings as arguments which do 570 // not create interesting flows. 571 if !canHaveMethods(p.X.Type()) { 572 return 573 } 574 575 b.addInFlowEdge(b.nodeFromVal(p.X), panicArg{}) 576 } 577 578 // call adds flows between arguments/parameters and return values/registers 579 // for both static and dynamic calls, as well as go and defer calls. 580 func (b *builder) call(c ssa.CallInstruction) { 581 // When c is r := recover() call register instruction, we add Recover -> r. 582 if bf, ok := c.Common().Value.(*ssa.Builtin); ok && bf.Name() == "recover" { 583 if v, ok := c.(ssa.Value); ok { 584 b.addInFlowEdge(recoverReturn{}, b.nodeFromVal(v)) 585 } 586 return 587 } 588 589 for _, f := range siteCallees(c, b.callGraph) { 590 addArgumentFlows(b, c, f) 591 } 592 } 593 594 func addArgumentFlows(b *builder, c ssa.CallInstruction, f *ssa.Function) { 595 // When f has no paremeters (including receiver), there is no type 596 // flow here. Also, f's body and parameters might be missing, such 597 // as when vta is used within the golang.org/x/tools/go/analysis 598 // framework (see github.com/golang/go/issues/50670). 599 if len(f.Params) == 0 { 600 return 601 } 602 cc := c.Common() 603 if cc.Method != nil { 604 // In principle we don't add interprocedural flows for receiver 605 // objects. At a call site, the receiver object is interface 606 // while the callee object is concrete. The flow from interface 607 // to concrete type in general does not make sense. The exception 608 // is when the concrete type is a named function type (see #57756). 609 // 610 // The flow other way around would bake in information from the 611 // initial call graph. 612 if isFunction(f.Params[0].Type()) { 613 b.addInFlowEdge(b.nodeFromVal(cc.Value), b.nodeFromVal(f.Params[0])) 614 } 615 } 616 617 offset := 0 618 if cc.Method != nil { 619 offset = 1 620 } 621 for i, v := range cc.Args { 622 // Parameters of f might not be available, as in the case 623 // when vta is used within the golang.org/x/tools/go/analysis 624 // framework (see github.com/golang/go/issues/50670). 625 // 626 // TODO: investigate other cases of missing body and parameters 627 if len(f.Params) <= i+offset { 628 return 629 } 630 b.addInFlowAliasEdges(b.nodeFromVal(f.Params[i+offset]), b.nodeFromVal(v)) 631 } 632 } 633 634 // rtrn produces flows between values of r and c where 635 // c is a call instruction that resolves to the enclosing 636 // function of r based on b.callGraph. 637 func (b *builder) rtrn(r *ssa.Return) { 638 n := b.callGraph.Nodes[r.Parent()] 639 // n != nil when b.callgraph is sound, but the client can 640 // pass any callgraph, including an underapproximate one. 641 if n == nil { 642 return 643 } 644 645 for _, e := range n.In { 646 if cv, ok := e.Site.(ssa.Value); ok { 647 addReturnFlows(b, r, cv) 648 } 649 } 650 } 651 652 func addReturnFlows(b *builder, r *ssa.Return, site ssa.Value) { 653 results := r.Results 654 if len(results) == 1 { 655 // When there is only one return value, the destination register does not 656 // have a tuple type. 657 b.addInFlowEdge(b.nodeFromVal(results[0]), b.nodeFromVal(site)) 658 return 659 } 660 661 tup := site.Type().(*types.Tuple) 662 for i, r := range results { 663 local := indexedLocal{val: site, typ: tup.At(i).Type(), index: i} 664 b.addInFlowEdge(b.nodeFromVal(r), local) 665 } 666 } 667 668 func (b *builder) multiconvert(c *ssa.MultiConvert) { 669 // TODO(zpavlinovic): decide what to do on MultiConvert long term. 670 // TODO(zpavlinovic): add unit tests. 671 typeSetOf := func(typ types.Type) []*types.Term { 672 // This is a adaptation of x/exp/typeparams.NormalTerms which x/tools cannot depend on. 673 var terms []*types.Term 674 var err error 675 switch typ := aliases.Unalias(typ).(type) { 676 case *types.TypeParam: 677 terms, err = typeparams.StructuralTerms(typ) 678 case *types.Union: 679 terms, err = typeparams.UnionTermSet(typ) 680 case *types.Interface: 681 terms, err = typeparams.InterfaceTermSet(typ) 682 default: 683 // Common case. 684 // Specializing the len=1 case to avoid a slice 685 // had no measurable space/time benefit. 686 terms = []*types.Term{types.NewTerm(false, typ)} 687 } 688 689 if err != nil { 690 return nil 691 } 692 return terms 693 } 694 // isValuePreserving returns true if a conversion from ut_src to 695 // ut_dst is value-preserving, i.e. just a change of type. 696 // Precondition: neither argument is a named or alias type. 697 isValuePreserving := func(ut_src, ut_dst types.Type) bool { 698 // Identical underlying types? 699 if types.IdenticalIgnoreTags(ut_dst, ut_src) { 700 return true 701 } 702 703 switch ut_dst.(type) { 704 case *types.Chan: 705 // Conversion between channel types? 706 _, ok := ut_src.(*types.Chan) 707 return ok 708 709 case *types.Pointer: 710 // Conversion between pointers with identical base types? 711 _, ok := ut_src.(*types.Pointer) 712 return ok 713 } 714 return false 715 } 716 dst_terms := typeSetOf(c.Type()) 717 src_terms := typeSetOf(c.X.Type()) 718 for _, s := range src_terms { 719 us := s.Type().Underlying() 720 for _, d := range dst_terms { 721 ud := d.Type().Underlying() 722 if isValuePreserving(us, ud) { 723 // This is equivalent to a ChangeType. 724 b.addInFlowAliasEdges(b.nodeFromVal(c), b.nodeFromVal(c.X)) 725 return 726 } 727 // This is equivalent to either: SliceToArrayPointer,, 728 // SliceToArrayPointer+Deref, Size 0 Array constant, or a Convert. 729 } 730 } 731 } 732 733 // addInFlowEdge adds s -> d to g if d is node that can have an inflow, i.e., a node 734 // that represents an interface or an unresolved function value. Otherwise, there 735 // is no interesting type flow so the edge is omitted. 736 func (b *builder) addInFlowEdge(s, d node) { 737 if hasInFlow(d) { 738 b.graph.addEdge(b.representative(s), b.representative(d)) 739 } 740 } 741 742 // Creates const, pointer, global, func, and local nodes based on register instructions. 743 func (b *builder) nodeFromVal(val ssa.Value) node { 744 if p, ok := aliases.Unalias(val.Type()).(*types.Pointer); ok && !types.IsInterface(p.Elem()) && !isFunction(p.Elem()) { 745 // Nested pointer to interfaces are modeled as a special 746 // nestedPtrInterface node. 747 if i := interfaceUnderPtr(p.Elem()); i != nil { 748 return nestedPtrInterface{typ: i} 749 } 750 // The same goes for nested function types. 751 if f := functionUnderPtr(p.Elem()); f != nil { 752 return nestedPtrFunction{typ: f} 753 } 754 return pointer{typ: p} 755 } 756 757 switch v := val.(type) { 758 case *ssa.Const: 759 return constant{typ: val.Type()} 760 case *ssa.Global: 761 return global{val: v} 762 case *ssa.Function: 763 return function{f: v} 764 case *ssa.Parameter, *ssa.FreeVar, ssa.Instruction: 765 // ssa.Param, ssa.FreeVar, and a specific set of "register" instructions, 766 // satisifying the ssa.Value interface, can serve as local variables. 767 return local{val: v} 768 default: 769 panic(fmt.Errorf("unsupported value %v in node creation", val)) 770 } 771 } 772 773 // representative returns a unique representative for node `n`. Since 774 // semantically equivalent types can have different implementations, 775 // this method guarantees the same implementation is always used. 776 func (b *builder) representative(n node) node { 777 if n.Type() == nil { 778 // panicArg and recoverReturn do not have 779 // types and are unique by definition. 780 return n 781 } 782 t := canonicalize(n.Type(), &b.canon) 783 784 switch i := n.(type) { 785 case constant: 786 return constant{typ: t} 787 case pointer: 788 return pointer{typ: t.(*types.Pointer)} 789 case sliceElem: 790 return sliceElem{typ: t} 791 case mapKey: 792 return mapKey{typ: t} 793 case mapValue: 794 return mapValue{typ: t} 795 case channelElem: 796 return channelElem{typ: t} 797 case nestedPtrInterface: 798 return nestedPtrInterface{typ: t} 799 case nestedPtrFunction: 800 return nestedPtrFunction{typ: t} 801 case field: 802 return field{StructType: canonicalize(i.StructType, &b.canon), index: i.index} 803 case indexedLocal: 804 return indexedLocal{typ: t, val: i.val, index: i.index} 805 case local, global, panicArg, recoverReturn, function: 806 return n 807 default: 808 panic(fmt.Errorf("canonicalizing unrecognized node %v", n)) 809 } 810 } 811 812 // canonicalize returns a type representative of `t` unique subject 813 // to type map `canon`. 814 func canonicalize(t types.Type, canon *typeutil.Map) types.Type { 815 rep := canon.At(t) 816 if rep != nil { 817 return rep.(types.Type) 818 } 819 canon.Set(t, t) 820 return t 821 }