github.com/graybobo/golang.org-package-offline-cache@v0.0.0-20200626051047-6608995c132f/x/tools/go/ssa/lift14.go (about)

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build !go1.5
     6  
     7  package ssa
     8  
     9  // This file defines the lifting pass which tries to "lift" Alloc
    10  // cells (new/local variables) into SSA registers, replacing loads
    11  // with the dominating stored value, eliminating loads and stores, and
    12  // inserting φ-nodes as needed.
    13  
    14  // Cited papers and resources:
    15  //
    16  // Ron Cytron et al. 1991. Efficiently computing SSA form...
    17  // http://doi.acm.org/10.1145/115372.115320
    18  //
    19  // Cooper, Harvey, Kennedy.  2001.  A Simple, Fast Dominance Algorithm.
    20  // Software Practice and Experience 2001, 4:1-10.
    21  // http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
    22  //
    23  // Daniel Berlin, llvmdev mailing list, 2012.
    24  // http://lists.cs.uiuc.edu/pipermail/llvmdev/2012-January/046638.html
    25  // (Be sure to expand the whole thread.)
    26  
    27  // TODO(adonovan): opt: there are many optimizations worth evaluating, and
    28  // the conventional wisdom for SSA construction is that a simple
    29  // algorithm well engineered often beats those of better asymptotic
    30  // complexity on all but the most egregious inputs.
    31  //
    32  // Danny Berlin suggests that the Cooper et al. algorithm for
    33  // computing the dominance frontier is superior to Cytron et al.
    34  // Furthermore he recommends that rather than computing the DF for the
    35  // whole function then renaming all alloc cells, it may be cheaper to
    36  // compute the DF for each alloc cell separately and throw it away.
    37  //
    38  // Consider exploiting liveness information to avoid creating dead
    39  // φ-nodes which we then immediately remove.
    40  //
    41  // Integrate lifting with scalar replacement of aggregates (SRA) since
    42  // the two are synergistic.
    43  //
    44  // Also see many other "TODO: opt" suggestions in the code.
    45  
    46  import (
    47  	"fmt"
    48  	"go/token"
    49  	"math/big"
    50  	"os"
    51  
    52  	"golang.org/x/tools/go/types"
    53  )
    54  
    55  // If true, perform sanity checking and show diagnostic information at
    56  // each step of lifting.  Very verbose.
    57  const debugLifting = false
    58  
    59  // domFrontier maps each block to the set of blocks in its dominance
    60  // frontier.  The outer slice is conceptually a map keyed by
    61  // Block.Index.  The inner slice is conceptually a set, possibly
    62  // containing duplicates.
    63  //
    64  // TODO(adonovan): opt: measure impact of dups; consider a packed bit
    65  // representation, e.g. big.Int, and bitwise parallel operations for
    66  // the union step in the Children loop.
    67  //
    68  // domFrontier's methods mutate the slice's elements but not its
    69  // length, so their receivers needn't be pointers.
    70  //
    71  type domFrontier [][]*BasicBlock
    72  
    73  func (df domFrontier) add(u, v *BasicBlock) {
    74  	p := &df[u.Index]
    75  	*p = append(*p, v)
    76  }
    77  
    78  // build builds the dominance frontier df for the dominator (sub)tree
    79  // rooted at u, using the Cytron et al. algorithm.
    80  //
    81  // TODO(adonovan): opt: consider Berlin approach, computing pruned SSA
    82  // by pruning the entire IDF computation, rather than merely pruning
    83  // the DF -> IDF step.
    84  func (df domFrontier) build(u *BasicBlock) {
    85  	// Encounter each node u in postorder of dom tree.
    86  	for _, child := range u.dom.children {
    87  		df.build(child)
    88  	}
    89  	for _, vb := range u.Succs {
    90  		if v := vb.dom; v.idom != u {
    91  			df.add(u, vb)
    92  		}
    93  	}
    94  	for _, w := range u.dom.children {
    95  		for _, vb := range df[w.Index] {
    96  			// TODO(adonovan): opt: use word-parallel bitwise union.
    97  			if v := vb.dom; v.idom != u {
    98  				df.add(u, vb)
    99  			}
   100  		}
   101  	}
   102  }
   103  
   104  func buildDomFrontier(fn *Function) domFrontier {
   105  	df := make(domFrontier, len(fn.Blocks))
   106  	df.build(fn.Blocks[0])
   107  	if fn.Recover != nil {
   108  		df.build(fn.Recover)
   109  	}
   110  	return df
   111  }
   112  
   113  func removeInstr(refs []Instruction, instr Instruction) []Instruction {
   114  	i := 0
   115  	for _, ref := range refs {
   116  		if ref == instr {
   117  			continue
   118  		}
   119  		refs[i] = ref
   120  		i++
   121  	}
   122  	for j := i; j != len(refs); j++ {
   123  		refs[j] = nil // aid GC
   124  	}
   125  	return refs[:i]
   126  }
   127  
   128  // lift attempts to replace local and new Allocs accessed only with
   129  // load/store by SSA registers, inserting φ-nodes where necessary.
   130  // The result is a program in classical pruned SSA form.
   131  //
   132  // Preconditions:
   133  // - fn has no dead blocks (blockopt has run).
   134  // - Def/use info (Operands and Referrers) is up-to-date.
   135  // - The dominator tree is up-to-date.
   136  //
   137  func lift(fn *Function) {
   138  	// TODO(adonovan): opt: lots of little optimizations may be
   139  	// worthwhile here, especially if they cause us to avoid
   140  	// buildDomFrontier.  For example:
   141  	//
   142  	// - Alloc never loaded?  Eliminate.
   143  	// - Alloc never stored?  Replace all loads with a zero constant.
   144  	// - Alloc stored once?  Replace loads with dominating store;
   145  	//   don't forget that an Alloc is itself an effective store
   146  	//   of zero.
   147  	// - Alloc used only within a single block?
   148  	//   Use degenerate algorithm avoiding φ-nodes.
   149  	// - Consider synergy with scalar replacement of aggregates (SRA).
   150  	//   e.g. *(&x.f) where x is an Alloc.
   151  	//   Perhaps we'd get better results if we generated this as x.f
   152  	//   i.e. Field(x, .f) instead of Load(FieldIndex(x, .f)).
   153  	//   Unclear.
   154  	//
   155  	// But we will start with the simplest correct code.
   156  	df := buildDomFrontier(fn)
   157  
   158  	if debugLifting {
   159  		title := false
   160  		for i, blocks := range df {
   161  			if blocks != nil {
   162  				if !title {
   163  					fmt.Fprintf(os.Stderr, "Dominance frontier of %s:\n", fn)
   164  					title = true
   165  				}
   166  				fmt.Fprintf(os.Stderr, "\t%s: %s\n", fn.Blocks[i], blocks)
   167  			}
   168  		}
   169  	}
   170  
   171  	newPhis := make(newPhiMap)
   172  
   173  	// During this pass we will replace some BasicBlock.Instrs
   174  	// (allocs, loads and stores) with nil, keeping a count in
   175  	// BasicBlock.gaps.  At the end we will reset Instrs to the
   176  	// concatenation of all non-dead newPhis and non-nil Instrs
   177  	// for the block, reusing the original array if space permits.
   178  
   179  	// While we're here, we also eliminate 'rundefers'
   180  	// instructions in functions that contain no 'defer'
   181  	// instructions.
   182  	usesDefer := false
   183  
   184  	// Determine which allocs we can lift and number them densely.
   185  	// The renaming phase uses this numbering for compact maps.
   186  	numAllocs := 0
   187  	for _, b := range fn.Blocks {
   188  		b.gaps = 0
   189  		b.rundefers = 0
   190  		for _, instr := range b.Instrs {
   191  			switch instr := instr.(type) {
   192  			case *Alloc:
   193  				index := -1
   194  				if liftAlloc(df, instr, newPhis) {
   195  					index = numAllocs
   196  					numAllocs++
   197  				}
   198  				instr.index = index
   199  			case *Defer:
   200  				usesDefer = true
   201  			case *RunDefers:
   202  				b.rundefers++
   203  			}
   204  		}
   205  	}
   206  
   207  	// renaming maps an alloc (keyed by index) to its replacement
   208  	// value.  Initially the renaming contains nil, signifying the
   209  	// zero constant of the appropriate type; we construct the
   210  	// Const lazily at most once on each path through the domtree.
   211  	// TODO(adonovan): opt: cache per-function not per subtree.
   212  	renaming := make([]Value, numAllocs)
   213  
   214  	// Renaming.
   215  	rename(fn.Blocks[0], renaming, newPhis)
   216  
   217  	// Eliminate dead new phis, then prepend the live ones to each block.
   218  	for _, b := range fn.Blocks {
   219  
   220  		// Compress the newPhis slice to eliminate unused phis.
   221  		// TODO(adonovan): opt: compute liveness to avoid
   222  		// placing phis in blocks for which the alloc cell is
   223  		// not live.
   224  		nps := newPhis[b]
   225  		j := 0
   226  		for _, np := range nps {
   227  			if !phiIsLive(np.phi) {
   228  				// discard it, first removing it from referrers
   229  				for _, newval := range np.phi.Edges {
   230  					if refs := newval.Referrers(); refs != nil {
   231  						*refs = removeInstr(*refs, np.phi)
   232  					}
   233  				}
   234  				continue
   235  			}
   236  			nps[j] = np
   237  			j++
   238  		}
   239  		nps = nps[:j]
   240  
   241  		rundefersToKill := b.rundefers
   242  		if usesDefer {
   243  			rundefersToKill = 0
   244  		}
   245  
   246  		if j+b.gaps+rundefersToKill == 0 {
   247  			continue // fast path: no new phis or gaps
   248  		}
   249  
   250  		// Compact nps + non-nil Instrs into a new slice.
   251  		// TODO(adonovan): opt: compact in situ if there is
   252  		// sufficient space or slack in the slice.
   253  		dst := make([]Instruction, len(b.Instrs)+j-b.gaps-rundefersToKill)
   254  		for i, np := range nps {
   255  			dst[i] = np.phi
   256  		}
   257  		for _, instr := range b.Instrs {
   258  			if instr == nil {
   259  				continue
   260  			}
   261  			if !usesDefer {
   262  				if _, ok := instr.(*RunDefers); ok {
   263  					continue
   264  				}
   265  			}
   266  			dst[j] = instr
   267  			j++
   268  		}
   269  		for i, np := range nps {
   270  			dst[i] = np.phi
   271  		}
   272  		b.Instrs = dst
   273  	}
   274  
   275  	// Remove any fn.Locals that were lifted.
   276  	j := 0
   277  	for _, l := range fn.Locals {
   278  		if l.index < 0 {
   279  			fn.Locals[j] = l
   280  			j++
   281  		}
   282  	}
   283  	// Nil out fn.Locals[j:] to aid GC.
   284  	for i := j; i < len(fn.Locals); i++ {
   285  		fn.Locals[i] = nil
   286  	}
   287  	fn.Locals = fn.Locals[:j]
   288  }
   289  
   290  func phiIsLive(phi *Phi) bool {
   291  	for _, instr := range *phi.Referrers() {
   292  		if instr == phi {
   293  			continue // self-refs don't count
   294  		}
   295  		if _, ok := instr.(*DebugRef); ok {
   296  			continue // debug refs don't count
   297  		}
   298  		return true
   299  	}
   300  	return false
   301  }
   302  
   303  type blockSet struct{ big.Int } // (inherit methods from Int)
   304  
   305  // add adds b to the set and returns true if the set changed.
   306  func (s *blockSet) add(b *BasicBlock) bool {
   307  	i := b.Index
   308  	if s.Bit(i) != 0 {
   309  		return false
   310  	}
   311  	s.SetBit(&s.Int, i, 1)
   312  	return true
   313  }
   314  
   315  // take removes an arbitrary element from a set s and
   316  // returns its index, or returns -1 if empty.
   317  func (s *blockSet) take() int {
   318  	l := s.BitLen()
   319  	for i := 0; i < l; i++ {
   320  		if s.Bit(i) == 1 {
   321  			s.SetBit(&s.Int, i, 0)
   322  			return i
   323  		}
   324  	}
   325  	return -1
   326  }
   327  
   328  // newPhi is a pair of a newly introduced φ-node and the lifted Alloc
   329  // it replaces.
   330  type newPhi struct {
   331  	phi   *Phi
   332  	alloc *Alloc
   333  }
   334  
   335  // newPhiMap records for each basic block, the set of newPhis that
   336  // must be prepended to the block.
   337  type newPhiMap map[*BasicBlock][]newPhi
   338  
   339  // liftAlloc determines whether alloc can be lifted into registers,
   340  // and if so, it populates newPhis with all the φ-nodes it may require
   341  // and returns true.
   342  //
   343  func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap) bool {
   344  	// Don't lift aggregates into registers, because we don't have
   345  	// a way to express their zero-constants.
   346  	switch deref(alloc.Type()).Underlying().(type) {
   347  	case *types.Array, *types.Struct:
   348  		return false
   349  	}
   350  
   351  	// Don't lift named return values in functions that defer
   352  	// calls that may recover from panic.
   353  	if fn := alloc.Parent(); fn.Recover != nil {
   354  		for _, nr := range fn.namedResults {
   355  			if nr == alloc {
   356  				return false
   357  			}
   358  		}
   359  	}
   360  
   361  	// Compute defblocks, the set of blocks containing a
   362  	// definition of the alloc cell.
   363  	var defblocks blockSet
   364  	for _, instr := range *alloc.Referrers() {
   365  		// Bail out if we discover the alloc is not liftable;
   366  		// the only operations permitted to use the alloc are
   367  		// loads/stores into the cell, and DebugRef.
   368  		switch instr := instr.(type) {
   369  		case *Store:
   370  			if instr.Val == alloc {
   371  				return false // address used as value
   372  			}
   373  			if instr.Addr != alloc {
   374  				panic("Alloc.Referrers is inconsistent")
   375  			}
   376  			defblocks.add(instr.Block())
   377  		case *UnOp:
   378  			if instr.Op != token.MUL {
   379  				return false // not a load
   380  			}
   381  			if instr.X != alloc {
   382  				panic("Alloc.Referrers is inconsistent")
   383  			}
   384  		case *DebugRef:
   385  			// ok
   386  		default:
   387  			return false // some other instruction
   388  		}
   389  	}
   390  	// The Alloc itself counts as a (zero) definition of the cell.
   391  	defblocks.add(alloc.Block())
   392  
   393  	if debugLifting {
   394  		fmt.Fprintln(os.Stderr, "\tlifting ", alloc, alloc.Name())
   395  	}
   396  
   397  	fn := alloc.Parent()
   398  
   399  	// Φ-insertion.
   400  	//
   401  	// What follows is the body of the main loop of the insert-φ
   402  	// function described by Cytron et al, but instead of using
   403  	// counter tricks, we just reset the 'hasAlready' and 'work'
   404  	// sets each iteration.  These are bitmaps so it's pretty cheap.
   405  	//
   406  	// TODO(adonovan): opt: recycle slice storage for W,
   407  	// hasAlready, defBlocks across liftAlloc calls.
   408  	var hasAlready blockSet
   409  
   410  	// Initialize W and work to defblocks.
   411  	var work blockSet = defblocks // blocks seen
   412  	var W blockSet                // blocks to do
   413  	W.Set(&defblocks.Int)
   414  
   415  	// Traverse iterated dominance frontier, inserting φ-nodes.
   416  	for i := W.take(); i != -1; i = W.take() {
   417  		u := fn.Blocks[i]
   418  		for _, v := range df[u.Index] {
   419  			if hasAlready.add(v) {
   420  				// Create φ-node.
   421  				// It will be prepended to v.Instrs later, if needed.
   422  				phi := &Phi{
   423  					Edges:   make([]Value, len(v.Preds)),
   424  					Comment: alloc.Comment,
   425  				}
   426  				phi.pos = alloc.Pos()
   427  				phi.setType(deref(alloc.Type()))
   428  				phi.block = v
   429  				if debugLifting {
   430  					fmt.Fprintf(os.Stderr, "\tplace %s = %s at block %s\n", phi.Name(), phi, v)
   431  				}
   432  				newPhis[v] = append(newPhis[v], newPhi{phi, alloc})
   433  
   434  				if work.add(v) {
   435  					W.add(v)
   436  				}
   437  			}
   438  		}
   439  	}
   440  
   441  	return true
   442  }
   443  
   444  // replaceAll replaces all intraprocedural uses of x with y,
   445  // updating x.Referrers and y.Referrers.
   446  // Precondition: x.Referrers() != nil, i.e. x must be local to some function.
   447  //
   448  func replaceAll(x, y Value) {
   449  	var rands []*Value
   450  	pxrefs := x.Referrers()
   451  	pyrefs := y.Referrers()
   452  	for _, instr := range *pxrefs {
   453  		rands = instr.Operands(rands[:0]) // recycle storage
   454  		for _, rand := range rands {
   455  			if *rand != nil {
   456  				if *rand == x {
   457  					*rand = y
   458  				}
   459  			}
   460  		}
   461  		if pyrefs != nil {
   462  			*pyrefs = append(*pyrefs, instr) // dups ok
   463  		}
   464  	}
   465  	*pxrefs = nil // x is now unreferenced
   466  }
   467  
   468  // renamed returns the value to which alloc is being renamed,
   469  // constructing it lazily if it's the implicit zero initialization.
   470  //
   471  func renamed(renaming []Value, alloc *Alloc) Value {
   472  	v := renaming[alloc.index]
   473  	if v == nil {
   474  		v = zeroConst(deref(alloc.Type()))
   475  		renaming[alloc.index] = v
   476  	}
   477  	return v
   478  }
   479  
   480  // rename implements the (Cytron et al) SSA renaming algorithm, a
   481  // preorder traversal of the dominator tree replacing all loads of
   482  // Alloc cells with the value stored to that cell by the dominating
   483  // store instruction.  For lifting, we need only consider loads,
   484  // stores and φ-nodes.
   485  //
   486  // renaming is a map from *Alloc (keyed by index number) to its
   487  // dominating stored value; newPhis[x] is the set of new φ-nodes to be
   488  // prepended to block x.
   489  //
   490  func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap) {
   491  	// Each φ-node becomes the new name for its associated Alloc.
   492  	for _, np := range newPhis[u] {
   493  		phi := np.phi
   494  		alloc := np.alloc
   495  		renaming[alloc.index] = phi
   496  	}
   497  
   498  	// Rename loads and stores of allocs.
   499  	for i, instr := range u.Instrs {
   500  		switch instr := instr.(type) {
   501  		case *Alloc:
   502  			if instr.index >= 0 { // store of zero to Alloc cell
   503  				// Replace dominated loads by the zero value.
   504  				renaming[instr.index] = nil
   505  				if debugLifting {
   506  					fmt.Fprintf(os.Stderr, "\tkill alloc %s\n", instr)
   507  				}
   508  				// Delete the Alloc.
   509  				u.Instrs[i] = nil
   510  				u.gaps++
   511  			}
   512  
   513  		case *Store:
   514  			if alloc, ok := instr.Addr.(*Alloc); ok && alloc.index >= 0 { // store to Alloc cell
   515  				// Replace dominated loads by the stored value.
   516  				renaming[alloc.index] = instr.Val
   517  				if debugLifting {
   518  					fmt.Fprintf(os.Stderr, "\tkill store %s; new value: %s\n",
   519  						instr, instr.Val.Name())
   520  				}
   521  				// Remove the store from the referrer list of the stored value.
   522  				if refs := instr.Val.Referrers(); refs != nil {
   523  					*refs = removeInstr(*refs, instr)
   524  				}
   525  				// Delete the Store.
   526  				u.Instrs[i] = nil
   527  				u.gaps++
   528  			}
   529  
   530  		case *UnOp:
   531  			if instr.Op == token.MUL {
   532  				if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // load of Alloc cell
   533  					newval := renamed(renaming, alloc)
   534  					if debugLifting {
   535  						fmt.Fprintf(os.Stderr, "\tupdate load %s = %s with %s\n",
   536  							instr.Name(), instr, newval.Name())
   537  					}
   538  					// Replace all references to
   539  					// the loaded value by the
   540  					// dominating stored value.
   541  					replaceAll(instr, newval)
   542  					// Delete the Load.
   543  					u.Instrs[i] = nil
   544  					u.gaps++
   545  				}
   546  			}
   547  
   548  		case *DebugRef:
   549  			if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // ref of Alloc cell
   550  				if instr.IsAddr {
   551  					instr.X = renamed(renaming, alloc)
   552  					instr.IsAddr = false
   553  
   554  					// Add DebugRef to instr.X's referrers.
   555  					if refs := instr.X.Referrers(); refs != nil {
   556  						*refs = append(*refs, instr)
   557  					}
   558  				} else {
   559  					// A source expression denotes the address
   560  					// of an Alloc that was optimized away.
   561  					instr.X = nil
   562  
   563  					// Delete the DebugRef.
   564  					u.Instrs[i] = nil
   565  					u.gaps++
   566  				}
   567  			}
   568  		}
   569  	}
   570  
   571  	// For each φ-node in a CFG successor, rename the edge.
   572  	for _, v := range u.Succs {
   573  		phis := newPhis[v]
   574  		if len(phis) == 0 {
   575  			continue
   576  		}
   577  		i := v.predIndex(u)
   578  		for _, np := range phis {
   579  			phi := np.phi
   580  			alloc := np.alloc
   581  			newval := renamed(renaming, alloc)
   582  			if debugLifting {
   583  				fmt.Fprintf(os.Stderr, "\tsetphi %s edge %s -> %s (#%d) (alloc=%s) := %s\n",
   584  					phi.Name(), u, v, i, alloc.Name(), newval.Name())
   585  			}
   586  			phi.Edges[i] = newval
   587  			if prefs := newval.Referrers(); prefs != nil {
   588  				*prefs = append(*prefs, phi)
   589  			}
   590  		}
   591  	}
   592  
   593  	// Continue depth-first recursion over domtree, pushing a
   594  	// fresh copy of the renaming map for each subtree.
   595  	for _, v := range u.dom.children {
   596  		// TODO(adonovan): opt: avoid copy on final iteration; use destructive update.
   597  		r := make([]Value, len(renaming))
   598  		copy(r, renaming)
   599  		rename(v, r, newPhis)
   600  	}
   601  }