github.com/karrick/go@v0.0.0-20170817181416-d5b0ec858b37/src/cmd/compile/internal/ssa/rewrite.go (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ssa
     6  
     7  import (
     8  	"cmd/compile/internal/types"
     9  	"cmd/internal/obj"
    10  	"fmt"
    11  	"io"
    12  	"math"
    13  	"os"
    14  	"path/filepath"
    15  )
    16  
    17  func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter) {
    18  	// repeat rewrites until we find no more rewrites
    19  	for {
    20  		change := false
    21  		for _, b := range f.Blocks {
    22  			if b.Control != nil && b.Control.Op == OpCopy {
    23  				for b.Control.Op == OpCopy {
    24  					b.SetControl(b.Control.Args[0])
    25  				}
    26  			}
    27  			if rb(b) {
    28  				change = true
    29  			}
    30  			for _, v := range b.Values {
    31  				change = phielimValue(v) || change
    32  
    33  				// Eliminate copy inputs.
    34  				// If any copy input becomes unused, mark it
    35  				// as invalid and discard its argument. Repeat
    36  				// recursively on the discarded argument.
    37  				// This phase helps remove phantom "dead copy" uses
    38  				// of a value so that a x.Uses==1 rule condition
    39  				// fires reliably.
    40  				for i, a := range v.Args {
    41  					if a.Op != OpCopy {
    42  						continue
    43  					}
    44  					v.SetArg(i, copySource(a))
    45  					change = true
    46  					for a.Uses == 0 {
    47  						b := a.Args[0]
    48  						a.reset(OpInvalid)
    49  						a = b
    50  					}
    51  				}
    52  
    53  				// apply rewrite function
    54  				if rv(v) {
    55  					change = true
    56  				}
    57  			}
    58  		}
    59  		if !change {
    60  			break
    61  		}
    62  	}
    63  	// remove clobbered values
    64  	for _, b := range f.Blocks {
    65  		j := 0
    66  		for i, v := range b.Values {
    67  			if v.Op == OpInvalid {
    68  				f.freeValue(v)
    69  				continue
    70  			}
    71  			if i != j {
    72  				b.Values[j] = v
    73  			}
    74  			j++
    75  		}
    76  		if j != len(b.Values) {
    77  			tail := b.Values[j:]
    78  			for j := range tail {
    79  				tail[j] = nil
    80  			}
    81  			b.Values = b.Values[:j]
    82  		}
    83  	}
    84  }
    85  
    86  // Common functions called from rewriting rules
    87  
    88  func is64BitFloat(t *types.Type) bool {
    89  	return t.Size() == 8 && t.IsFloat()
    90  }
    91  
    92  func is32BitFloat(t *types.Type) bool {
    93  	return t.Size() == 4 && t.IsFloat()
    94  }
    95  
    96  func is64BitInt(t *types.Type) bool {
    97  	return t.Size() == 8 && t.IsInteger()
    98  }
    99  
   100  func is32BitInt(t *types.Type) bool {
   101  	return t.Size() == 4 && t.IsInteger()
   102  }
   103  
   104  func is16BitInt(t *types.Type) bool {
   105  	return t.Size() == 2 && t.IsInteger()
   106  }
   107  
   108  func is8BitInt(t *types.Type) bool {
   109  	return t.Size() == 1 && t.IsInteger()
   110  }
   111  
   112  func isPtr(t *types.Type) bool {
   113  	return t.IsPtrShaped()
   114  }
   115  
   116  func isSigned(t *types.Type) bool {
   117  	return t.IsSigned()
   118  }
   119  
   120  func typeSize(t *types.Type) int64 {
   121  	return t.Size()
   122  }
   123  
   124  // mergeSym merges two symbolic offsets. There is no real merging of
   125  // offsets, we just pick the non-nil one.
   126  func mergeSym(x, y interface{}) interface{} {
   127  	if x == nil {
   128  		return y
   129  	}
   130  	if y == nil {
   131  		return x
   132  	}
   133  	panic(fmt.Sprintf("mergeSym with two non-nil syms %s %s", x, y))
   134  }
   135  func canMergeSym(x, y interface{}) bool {
   136  	return x == nil || y == nil
   137  }
   138  
   139  // canMergeLoad reports whether the load can be merged into target without
   140  // invalidating the schedule.
   141  // It also checks that the other non-load argument x is something we
   142  // are ok with clobbering (all our current load+op instructions clobber
   143  // their input register).
   144  func canMergeLoad(target, load, x *Value) bool {
   145  	if target.Block.ID != load.Block.ID {
   146  		// If the load is in a different block do not merge it.
   147  		return false
   148  	}
   149  
   150  	// We can't merge the load into the target if the load
   151  	// has more than one use.
   152  	if load.Uses != 1 {
   153  		return false
   154  	}
   155  
   156  	// The register containing x is going to get clobbered.
   157  	// Don't merge if we still need the value of x.
   158  	// We don't have liveness information here, but we can
   159  	// approximate x dying with:
   160  	//  1) target is x's only use.
   161  	//  2) target is not in a deeper loop than x.
   162  	if x.Uses != 1 {
   163  		return false
   164  	}
   165  	loopnest := x.Block.Func.loopnest()
   166  	loopnest.calculateDepths()
   167  	if loopnest.depth(target.Block.ID) > loopnest.depth(x.Block.ID) {
   168  		return false
   169  	}
   170  
   171  	mem := load.MemoryArg()
   172  
   173  	// We need the load's memory arg to still be alive at target. That
   174  	// can't be the case if one of target's args depends on a memory
   175  	// state that is a successor of load's memory arg.
   176  	//
   177  	// For example, it would be invalid to merge load into target in
   178  	// the following situation because newmem has killed oldmem
   179  	// before target is reached:
   180  	//     load = read ... oldmem
   181  	//   newmem = write ... oldmem
   182  	//     arg0 = read ... newmem
   183  	//   target = add arg0 load
   184  	//
   185  	// If the argument comes from a different block then we can exclude
   186  	// it immediately because it must dominate load (which is in the
   187  	// same block as target).
   188  	var args []*Value
   189  	for _, a := range target.Args {
   190  		if a != load && a.Block.ID == target.Block.ID {
   191  			args = append(args, a)
   192  		}
   193  	}
   194  
   195  	// memPreds contains memory states known to be predecessors of load's
   196  	// memory state. It is lazily initialized.
   197  	var memPreds map[*Value]bool
   198  search:
   199  	for i := 0; len(args) > 0; i++ {
   200  		const limit = 100
   201  		if i >= limit {
   202  			// Give up if we have done a lot of iterations.
   203  			return false
   204  		}
   205  		v := args[len(args)-1]
   206  		args = args[:len(args)-1]
   207  		if target.Block.ID != v.Block.ID {
   208  			// Since target and load are in the same block
   209  			// we can stop searching when we leave the block.
   210  			continue search
   211  		}
   212  		if v.Op == OpPhi {
   213  			// A Phi implies we have reached the top of the block.
   214  			// The memory phi, if it exists, is always
   215  			// the first logical store in the block.
   216  			continue search
   217  		}
   218  		if v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
   219  			// We could handle this situation however it is likely
   220  			// to be very rare.
   221  			return false
   222  		}
   223  		if v.Type.IsMemory() {
   224  			if memPreds == nil {
   225  				// Initialise a map containing memory states
   226  				// known to be predecessors of load's memory
   227  				// state.
   228  				memPreds = make(map[*Value]bool)
   229  				m := mem
   230  				const limit = 50
   231  				for i := 0; i < limit; i++ {
   232  					if m.Op == OpPhi {
   233  						// The memory phi, if it exists, is always
   234  						// the first logical store in the block.
   235  						break
   236  					}
   237  					if m.Block.ID != target.Block.ID {
   238  						break
   239  					}
   240  					if !m.Type.IsMemory() {
   241  						break
   242  					}
   243  					memPreds[m] = true
   244  					if len(m.Args) == 0 {
   245  						break
   246  					}
   247  					m = m.MemoryArg()
   248  				}
   249  			}
   250  
   251  			// We can merge if v is a predecessor of mem.
   252  			//
   253  			// For example, we can merge load into target in the
   254  			// following scenario:
   255  			//      x = read ... v
   256  			//    mem = write ... v
   257  			//   load = read ... mem
   258  			// target = add x load
   259  			if memPreds[v] {
   260  				continue search
   261  			}
   262  			return false
   263  		}
   264  		if len(v.Args) > 0 && v.Args[len(v.Args)-1] == mem {
   265  			// If v takes mem as an input then we know mem
   266  			// is valid at this point.
   267  			continue search
   268  		}
   269  		for _, a := range v.Args {
   270  			if target.Block.ID == a.Block.ID {
   271  				args = append(args, a)
   272  			}
   273  		}
   274  	}
   275  
   276  	return true
   277  }
   278  
   279  // isArg returns whether s is an arg symbol
   280  func isArg(s interface{}) bool {
   281  	_, ok := s.(*ArgSymbol)
   282  	return ok
   283  }
   284  
   285  // isAuto returns whether s is an auto symbol
   286  func isAuto(s interface{}) bool {
   287  	_, ok := s.(*AutoSymbol)
   288  	return ok
   289  }
   290  
   291  // isSameSym returns whether sym is the same as the given named symbol
   292  func isSameSym(sym interface{}, name string) bool {
   293  	s, ok := sym.(fmt.Stringer)
   294  	return ok && s.String() == name
   295  }
   296  
   297  // nlz returns the number of leading zeros.
   298  func nlz(x int64) int64 {
   299  	// log2(0) == 1, so nlz(0) == 64
   300  	return 63 - log2(x)
   301  }
   302  
   303  // ntz returns the number of trailing zeros.
   304  func ntz(x int64) int64 {
   305  	return 64 - nlz(^x&(x-1))
   306  }
   307  
   308  func oneBit(x int64) bool {
   309  	return nlz(x)+ntz(x) == 63
   310  }
   311  
   312  // nlo returns the number of leading ones.
   313  func nlo(x int64) int64 {
   314  	return nlz(^x)
   315  }
   316  
   317  // nto returns the number of trailing ones.
   318  func nto(x int64) int64 {
   319  	return ntz(^x)
   320  }
   321  
   322  // log2 returns logarithm in base 2 of uint64(n), with log2(0) = -1.
   323  // Rounds down.
   324  func log2(n int64) (l int64) {
   325  	l = -1
   326  	x := uint64(n)
   327  	for ; x >= 0x8000; x >>= 16 {
   328  		l += 16
   329  	}
   330  	if x >= 0x80 {
   331  		x >>= 8
   332  		l += 8
   333  	}
   334  	if x >= 0x8 {
   335  		x >>= 4
   336  		l += 4
   337  	}
   338  	if x >= 0x2 {
   339  		x >>= 2
   340  		l += 2
   341  	}
   342  	if x >= 0x1 {
   343  		l++
   344  	}
   345  	return
   346  }
   347  
   348  // isPowerOfTwo reports whether n is a power of 2.
   349  func isPowerOfTwo(n int64) bool {
   350  	return n > 0 && n&(n-1) == 0
   351  }
   352  
   353  // is32Bit reports whether n can be represented as a signed 32 bit integer.
   354  func is32Bit(n int64) bool {
   355  	return n == int64(int32(n))
   356  }
   357  
   358  // is16Bit reports whether n can be represented as a signed 16 bit integer.
   359  func is16Bit(n int64) bool {
   360  	return n == int64(int16(n))
   361  }
   362  
   363  // isU12Bit reports whether n can be represented as an unsigned 12 bit integer.
   364  func isU12Bit(n int64) bool {
   365  	return 0 <= n && n < (1<<12)
   366  }
   367  
   368  // isU16Bit reports whether n can be represented as an unsigned 16 bit integer.
   369  func isU16Bit(n int64) bool {
   370  	return n == int64(uint16(n))
   371  }
   372  
   373  // isU32Bit reports whether n can be represented as an unsigned 32 bit integer.
   374  func isU32Bit(n int64) bool {
   375  	return n == int64(uint32(n))
   376  }
   377  
   378  // is20Bit reports whether n can be represented as a signed 20 bit integer.
   379  func is20Bit(n int64) bool {
   380  	return -(1<<19) <= n && n < (1<<19)
   381  }
   382  
   383  // b2i translates a boolean value to 0 or 1 for assigning to auxInt.
   384  func b2i(b bool) int64 {
   385  	if b {
   386  		return 1
   387  	}
   388  	return 0
   389  }
   390  
   391  // i2f is used in rules for converting from an AuxInt to a float.
   392  func i2f(i int64) float64 {
   393  	return math.Float64frombits(uint64(i))
   394  }
   395  
   396  // i2f32 is used in rules for converting from an AuxInt to a float32.
   397  func i2f32(i int64) float32 {
   398  	return float32(math.Float64frombits(uint64(i)))
   399  }
   400  
   401  // f2i is used in the rules for storing a float in AuxInt.
   402  func f2i(f float64) int64 {
   403  	return int64(math.Float64bits(f))
   404  }
   405  
   406  // uaddOvf returns true if unsigned a+b would overflow.
   407  func uaddOvf(a, b int64) bool {
   408  	return uint64(a)+uint64(b) < uint64(a)
   409  }
   410  
   411  // de-virtualize an InterCall
   412  // 'sym' is the symbol for the itab
   413  func devirt(v *Value, sym interface{}, offset int64) *obj.LSym {
   414  	f := v.Block.Func
   415  	ext, ok := sym.(*ExternSymbol)
   416  	if !ok {
   417  		return nil
   418  	}
   419  	lsym := f.fe.DerefItab(ext.Sym, offset)
   420  	if f.pass.debug > 0 {
   421  		if lsym != nil {
   422  			f.Warnl(v.Pos, "de-virtualizing call")
   423  		} else {
   424  			f.Warnl(v.Pos, "couldn't de-virtualize call")
   425  		}
   426  	}
   427  	return lsym
   428  }
   429  
   430  // isSamePtr reports whether p1 and p2 point to the same address.
   431  func isSamePtr(p1, p2 *Value) bool {
   432  	if p1 == p2 {
   433  		return true
   434  	}
   435  	if p1.Op != p2.Op {
   436  		return false
   437  	}
   438  	switch p1.Op {
   439  	case OpOffPtr:
   440  		return p1.AuxInt == p2.AuxInt && isSamePtr(p1.Args[0], p2.Args[0])
   441  	case OpAddr:
   442  		// OpAddr's 0th arg is either OpSP or OpSB, which means that it is uniquely identified by its Op.
   443  		// Checking for value equality only works after [z]cse has run.
   444  		return p1.Aux == p2.Aux && p1.Args[0].Op == p2.Args[0].Op
   445  	case OpAddPtr:
   446  		return p1.Args[1] == p2.Args[1] && isSamePtr(p1.Args[0], p2.Args[0])
   447  	}
   448  	return false
   449  }
   450  
   451  // moveSize returns the number of bytes an aligned MOV instruction moves
   452  func moveSize(align int64, c *Config) int64 {
   453  	switch {
   454  	case align%8 == 0 && c.PtrSize == 8:
   455  		return 8
   456  	case align%4 == 0:
   457  		return 4
   458  	case align%2 == 0:
   459  		return 2
   460  	}
   461  	return 1
   462  }
   463  
   464  // mergePoint finds a block among a's blocks which dominates b and is itself
   465  // dominated by all of a's blocks. Returns nil if it can't find one.
   466  // Might return nil even if one does exist.
   467  func mergePoint(b *Block, a ...*Value) *Block {
   468  	// Walk backward from b looking for one of the a's blocks.
   469  
   470  	// Max distance
   471  	d := 100
   472  
   473  	for d > 0 {
   474  		for _, x := range a {
   475  			if b == x.Block {
   476  				goto found
   477  			}
   478  		}
   479  		if len(b.Preds) > 1 {
   480  			// Don't know which way to go back. Abort.
   481  			return nil
   482  		}
   483  		b = b.Preds[0].b
   484  		d--
   485  	}
   486  	return nil // too far away
   487  found:
   488  	// At this point, r is the first value in a that we find by walking backwards.
   489  	// if we return anything, r will be it.
   490  	r := b
   491  
   492  	// Keep going, counting the other a's that we find. They must all dominate r.
   493  	na := 0
   494  	for d > 0 {
   495  		for _, x := range a {
   496  			if b == x.Block {
   497  				na++
   498  			}
   499  		}
   500  		if na == len(a) {
   501  			// Found all of a in a backwards walk. We can return r.
   502  			return r
   503  		}
   504  		if len(b.Preds) > 1 {
   505  			return nil
   506  		}
   507  		b = b.Preds[0].b
   508  		d--
   509  
   510  	}
   511  	return nil // too far away
   512  }
   513  
   514  // clobber invalidates v.  Returns true.
   515  // clobber is used by rewrite rules to:
   516  //   A) make sure v is really dead and never used again.
   517  //   B) decrement use counts of v's args.
   518  func clobber(v *Value) bool {
   519  	v.reset(OpInvalid)
   520  	// Note: leave v.Block intact.  The Block field is used after clobber.
   521  	return true
   522  }
   523  
   524  // noteRule is an easy way to track if a rule is matched when writing
   525  // new ones.  Make the rule of interest also conditional on
   526  //     noteRule("note to self: rule of interest matched")
   527  // and that message will print when the rule matches.
   528  func noteRule(s string) bool {
   529  	fmt.Println(s)
   530  	return true
   531  }
   532  
   533  // warnRule generates a compiler debug output with string s when
   534  // cond is true and the rule is fired.
   535  func warnRule(cond bool, v *Value, s string) bool {
   536  	if cond {
   537  		v.Block.Func.Warnl(v.Pos, s)
   538  	}
   539  	return true
   540  }
   541  
   542  // logRule logs the use of the rule s. This will only be enabled if
   543  // rewrite rules were generated with the -log option, see gen/rulegen.go.
   544  func logRule(s string) {
   545  	if ruleFile == nil {
   546  		// Open a log file to write log to. We open in append
   547  		// mode because all.bash runs the compiler lots of times,
   548  		// and we want the concatenation of all of those logs.
   549  		// This means, of course, that users need to rm the old log
   550  		// to get fresh data.
   551  		// TODO: all.bash runs compilers in parallel. Need to synchronize logging somehow?
   552  		w, err := os.OpenFile(filepath.Join(os.Getenv("GOROOT"), "src", "rulelog"),
   553  			os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
   554  		if err != nil {
   555  			panic(err)
   556  		}
   557  		ruleFile = w
   558  	}
   559  	_, err := fmt.Fprintf(ruleFile, "rewrite %s\n", s)
   560  	if err != nil {
   561  		panic(err)
   562  	}
   563  }
   564  
   565  var ruleFile io.Writer
   566  
   567  func min(x, y int64) int64 {
   568  	if x < y {
   569  		return x
   570  	}
   571  	return y
   572  }
   573  
   574  func isConstZero(v *Value) bool {
   575  	switch v.Op {
   576  	case OpConstNil:
   577  		return true
   578  	case OpConst64, OpConst32, OpConst16, OpConst8, OpConstBool, OpConst32F, OpConst64F:
   579  		return v.AuxInt == 0
   580  	}
   581  	return false
   582  }
   583  
   584  // reciprocalExact64 reports whether 1/c is exactly representable.
   585  func reciprocalExact64(c float64) bool {
   586  	b := math.Float64bits(c)
   587  	man := b & (1<<52 - 1)
   588  	if man != 0 {
   589  		return false // not a power of 2, denormal, or NaN
   590  	}
   591  	exp := b >> 52 & (1<<11 - 1)
   592  	// exponent bias is 0x3ff.  So taking the reciprocal of a number
   593  	// changes the exponent to 0x7fe-exp.
   594  	switch exp {
   595  	case 0:
   596  		return false // ±0
   597  	case 0x7ff:
   598  		return false // ±inf
   599  	case 0x7fe:
   600  		return false // exponent is not representable
   601  	default:
   602  		return true
   603  	}
   604  }
   605  
   606  // reciprocalExact32 reports whether 1/c is exactly representable.
   607  func reciprocalExact32(c float32) bool {
   608  	b := math.Float32bits(c)
   609  	man := b & (1<<23 - 1)
   610  	if man != 0 {
   611  		return false // not a power of 2, denormal, or NaN
   612  	}
   613  	exp := b >> 23 & (1<<8 - 1)
   614  	// exponent bias is 0x7f.  So taking the reciprocal of a number
   615  	// changes the exponent to 0xfe-exp.
   616  	switch exp {
   617  	case 0:
   618  		return false // ±0
   619  	case 0xff:
   620  		return false // ±inf
   621  	case 0xfe:
   622  		return false // exponent is not representable
   623  	default:
   624  		return true
   625  	}
   626  }
   627  
   628  // check if an immediate can be directly encoded into an ARM's instruction
   629  func isARMImmRot(v uint32) bool {
   630  	for i := 0; i < 16; i++ {
   631  		if v&^0xff == 0 {
   632  			return true
   633  		}
   634  		v = v<<2 | v>>30
   635  	}
   636  
   637  	return false
   638  }
   639  
   640  // overlap reports whether the ranges given by the given offset and
   641  // size pairs overlap.
   642  func overlap(offset1, size1, offset2, size2 int64) bool {
   643  	if offset1 >= offset2 && offset2+size2 > offset1 {
   644  		return true
   645  	}
   646  	if offset2 >= offset1 && offset1+size1 > offset2 {
   647  		return true
   648  	}
   649  	return false
   650  }