github.com/go-asm/go@v1.21.1-0.20240213172139-40c5ead50c48/cmd/compile/ssa/deadstore.go (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ssa
     6  
     7  import (
     8  	"github.com/go-asm/go/cmd/compile/ir"
     9  	"github.com/go-asm/go/cmd/compile/types"
    10  )
    11  
    12  // dse does dead-store elimination on the Function.
    13  // Dead stores are those which are unconditionally followed by
    14  // another store to the same location, with no intervening load.
    15  // This implementation only works within a basic block. TODO: use something more global.
    16  func dse(f *Func) {
    17  	var stores []*Value
    18  	loadUse := f.newSparseSet(f.NumValues())
    19  	defer f.retSparseSet(loadUse)
    20  	storeUse := f.newSparseSet(f.NumValues())
    21  	defer f.retSparseSet(storeUse)
    22  	shadowed := f.newSparseMap(f.NumValues())
    23  	defer f.retSparseMap(shadowed)
    24  	for _, b := range f.Blocks {
    25  		// Find all the stores in this block. Categorize their uses:
    26  		//  loadUse contains stores which are used by a subsequent load.
    27  		//  storeUse contains stores which are used by a subsequent store.
    28  		loadUse.clear()
    29  		storeUse.clear()
    30  		stores = stores[:0]
    31  		for _, v := range b.Values {
    32  			if v.Op == OpPhi {
    33  				// Ignore phis - they will always be first and can't be eliminated
    34  				continue
    35  			}
    36  			if v.Type.IsMemory() {
    37  				stores = append(stores, v)
    38  				for _, a := range v.Args {
    39  					if a.Block == b && a.Type.IsMemory() {
    40  						storeUse.add(a.ID)
    41  						if v.Op != OpStore && v.Op != OpZero && v.Op != OpVarDef {
    42  							// CALL, DUFFCOPY, etc. are both
    43  							// reads and writes.
    44  							loadUse.add(a.ID)
    45  						}
    46  					}
    47  				}
    48  			} else {
    49  				for _, a := range v.Args {
    50  					if a.Block == b && a.Type.IsMemory() {
    51  						loadUse.add(a.ID)
    52  					}
    53  				}
    54  			}
    55  		}
    56  		if len(stores) == 0 {
    57  			continue
    58  		}
    59  
    60  		// find last store in the block
    61  		var last *Value
    62  		for _, v := range stores {
    63  			if storeUse.contains(v.ID) {
    64  				continue
    65  			}
    66  			if last != nil {
    67  				b.Fatalf("two final stores - simultaneous live stores %s %s", last.LongString(), v.LongString())
    68  			}
    69  			last = v
    70  		}
    71  		if last == nil {
    72  			b.Fatalf("no last store found - cycle?")
    73  		}
    74  
    75  		// Walk backwards looking for dead stores. Keep track of shadowed addresses.
    76  		// A "shadowed address" is a pointer, offset, and size describing a memory region that
    77  		// is known to be written. We keep track of shadowed addresses in the shadowed map,
    78  		// mapping the ID of the address to a shadowRange where future writes will happen.
    79  		// Since we're walking backwards, writes to a shadowed region are useless,
    80  		// as they will be immediately overwritten.
    81  		shadowed.clear()
    82  		v := last
    83  
    84  	walkloop:
    85  		if loadUse.contains(v.ID) {
    86  			// Someone might be reading this memory state.
    87  			// Clear all shadowed addresses.
    88  			shadowed.clear()
    89  		}
    90  		if v.Op == OpStore || v.Op == OpZero {
    91  			ptr := v.Args[0]
    92  			var off int64
    93  			for ptr.Op == OpOffPtr { // Walk to base pointer
    94  				off += ptr.AuxInt
    95  				ptr = ptr.Args[0]
    96  			}
    97  			var sz int64
    98  			if v.Op == OpStore {
    99  				sz = v.Aux.(*types.Type).Size()
   100  			} else { // OpZero
   101  				sz = v.AuxInt
   102  			}
   103  			sr := shadowRange(shadowed.get(ptr.ID))
   104  			if sr.contains(off, off+sz) {
   105  				// Modify the store/zero into a copy of the memory state,
   106  				// effectively eliding the store operation.
   107  				if v.Op == OpStore {
   108  					// store addr value mem
   109  					v.SetArgs1(v.Args[2])
   110  				} else {
   111  					// zero addr mem
   112  					v.SetArgs1(v.Args[1])
   113  				}
   114  				v.Aux = nil
   115  				v.AuxInt = 0
   116  				v.Op = OpCopy
   117  			} else {
   118  				// Extend shadowed region.
   119  				shadowed.set(ptr.ID, int32(sr.merge(off, off+sz)))
   120  			}
   121  		}
   122  		// walk to previous store
   123  		if v.Op == OpPhi {
   124  			// At start of block.  Move on to next block.
   125  			// The memory phi, if it exists, is always
   126  			// the first logical store in the block.
   127  			// (Even if it isn't the first in the current b.Values order.)
   128  			continue
   129  		}
   130  		for _, a := range v.Args {
   131  			if a.Block == b && a.Type.IsMemory() {
   132  				v = a
   133  				goto walkloop
   134  			}
   135  		}
   136  	}
   137  }
   138  
   139  // A shadowRange encodes a set of byte offsets [lo():hi()] from
   140  // a given pointer that will be written to later in the block.
   141  // A zero shadowRange encodes an empty shadowed range (and so
   142  // does a -1 shadowRange, which is what sparsemap.get returns
   143  // on a failed lookup).
   144  type shadowRange int32
   145  
   146  func (sr shadowRange) lo() int64 {
   147  	return int64(sr & 0xffff)
   148  }
   149  func (sr shadowRange) hi() int64 {
   150  	return int64((sr >> 16) & 0xffff)
   151  }
   152  
   153  // contains reports whether [lo:hi] is completely within sr.
   154  func (sr shadowRange) contains(lo, hi int64) bool {
   155  	return lo >= sr.lo() && hi <= sr.hi()
   156  }
   157  
   158  // merge returns the union of sr and [lo:hi].
   159  // merge is allowed to return something smaller than the union.
   160  func (sr shadowRange) merge(lo, hi int64) shadowRange {
   161  	if lo < 0 || hi > 0xffff {
   162  		// Ignore offsets that are too large or small.
   163  		return sr
   164  	}
   165  	if sr.lo() == sr.hi() {
   166  		// Old range is empty - use new one.
   167  		return shadowRange(lo + hi<<16)
   168  	}
   169  	if hi < sr.lo() || lo > sr.hi() {
   170  		// The two regions don't overlap or abut, so we would
   171  		// have to keep track of multiple disjoint ranges.
   172  		// Because we can only keep one, keep the larger one.
   173  		if sr.hi()-sr.lo() >= hi-lo {
   174  			return sr
   175  		}
   176  		return shadowRange(lo + hi<<16)
   177  	}
   178  	// Regions overlap or abut - compute the union.
   179  	return shadowRange(min(lo, sr.lo()) + max(hi, sr.hi())<<16)
   180  }
   181  
   182  // elimDeadAutosGeneric deletes autos that are never accessed. To achieve this
   183  // we track the operations that the address of each auto reaches and if it only
   184  // reaches stores then we delete all the stores. The other operations will then
   185  // be eliminated by the dead code elimination pass.
   186  func elimDeadAutosGeneric(f *Func) {
   187  	addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches
   188  	elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is
   189  	var used ir.NameSet               // used autos that must be kept
   190  
   191  	// visit the value and report whether any of the maps are updated
   192  	visit := func(v *Value) (changed bool) {
   193  		args := v.Args
   194  		switch v.Op {
   195  		case OpAddr, OpLocalAddr:
   196  			// Propagate the address if it points to an auto.
   197  			n, ok := v.Aux.(*ir.Name)
   198  			if !ok || n.Class != ir.PAUTO {
   199  				return
   200  			}
   201  			if addr[v] == nil {
   202  				addr[v] = n
   203  				changed = true
   204  			}
   205  			return
   206  		case OpVarDef:
   207  			// v should be eliminated if we eliminate the auto.
   208  			n, ok := v.Aux.(*ir.Name)
   209  			if !ok || n.Class != ir.PAUTO {
   210  				return
   211  			}
   212  			if elim[v] == nil {
   213  				elim[v] = n
   214  				changed = true
   215  			}
   216  			return
   217  		case OpVarLive:
   218  			// Don't delete the auto if it needs to be kept alive.
   219  
   220  			// We depend on this check to keep the autotmp stack slots
   221  			// for open-coded defers from being removed (since they
   222  			// may not be used by the inline code, but will be used by
   223  			// panic processing).
   224  			n, ok := v.Aux.(*ir.Name)
   225  			if !ok || n.Class != ir.PAUTO {
   226  				return
   227  			}
   228  			if !used.Has(n) {
   229  				used.Add(n)
   230  				changed = true
   231  			}
   232  			return
   233  		case OpStore, OpMove, OpZero:
   234  			// v should be eliminated if we eliminate the auto.
   235  			n, ok := addr[args[0]]
   236  			if ok && elim[v] == nil {
   237  				elim[v] = n
   238  				changed = true
   239  			}
   240  			// Other args might hold pointers to autos.
   241  			args = args[1:]
   242  		}
   243  
   244  		// The code below assumes that we have handled all the ops
   245  		// with sym effects already. Sanity check that here.
   246  		// Ignore Args since they can't be autos.
   247  		if v.Op.SymEffect() != SymNone && v.Op != OpArg {
   248  			panic("unhandled op with sym effect")
   249  		}
   250  
   251  		if v.Uses == 0 && v.Op != OpNilCheck && !v.Op.IsCall() && !v.Op.HasSideEffects() || len(args) == 0 {
   252  			// We need to keep nil checks even if they have no use.
   253  			// Also keep calls and values that have side effects.
   254  			return
   255  		}
   256  
   257  		// If the address of the auto reaches a memory or control
   258  		// operation not covered above then we probably need to keep it.
   259  		// We also need to keep autos if they reach Phis (issue #26153).
   260  		if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil {
   261  			for _, a := range args {
   262  				if n, ok := addr[a]; ok {
   263  					if !used.Has(n) {
   264  						used.Add(n)
   265  						changed = true
   266  					}
   267  				}
   268  			}
   269  			return
   270  		}
   271  
   272  		// Propagate any auto addresses through v.
   273  		var node *ir.Name
   274  		for _, a := range args {
   275  			if n, ok := addr[a]; ok && !used.Has(n) {
   276  				if node == nil {
   277  					node = n
   278  				} else if node != n {
   279  					// Most of the time we only see one pointer
   280  					// reaching an op, but some ops can take
   281  					// multiple pointers (e.g. NeqPtr, Phi etc.).
   282  					// This is rare, so just propagate the first
   283  					// value to keep things simple.
   284  					used.Add(n)
   285  					changed = true
   286  				}
   287  			}
   288  		}
   289  		if node == nil {
   290  			return
   291  		}
   292  		if addr[v] == nil {
   293  			// The address of an auto reaches this op.
   294  			addr[v] = node
   295  			changed = true
   296  			return
   297  		}
   298  		if addr[v] != node {
   299  			// This doesn't happen in practice, but catch it just in case.
   300  			used.Add(node)
   301  			changed = true
   302  		}
   303  		return
   304  	}
   305  
   306  	iterations := 0
   307  	for {
   308  		if iterations == 4 {
   309  			// give up
   310  			return
   311  		}
   312  		iterations++
   313  		changed := false
   314  		for _, b := range f.Blocks {
   315  			for _, v := range b.Values {
   316  				changed = visit(v) || changed
   317  			}
   318  			// keep the auto if its address reaches a control value
   319  			for _, c := range b.ControlValues() {
   320  				if n, ok := addr[c]; ok && !used.Has(n) {
   321  					used.Add(n)
   322  					changed = true
   323  				}
   324  			}
   325  		}
   326  		if !changed {
   327  			break
   328  		}
   329  	}
   330  
   331  	// Eliminate stores to unread autos.
   332  	for v, n := range elim {
   333  		if used.Has(n) {
   334  			continue
   335  		}
   336  		// replace with OpCopy
   337  		v.SetArgs1(v.MemoryArg())
   338  		v.Aux = nil
   339  		v.AuxInt = 0
   340  		v.Op = OpCopy
   341  	}
   342  }
   343  
   344  // elimUnreadAutos deletes stores (and associated bookkeeping ops VarDef and VarKill)
   345  // to autos that are never read from.
   346  func elimUnreadAutos(f *Func) {
   347  	// Loop over all ops that affect autos taking note of which
   348  	// autos we need and also stores that we might be able to
   349  	// eliminate.
   350  	var seen ir.NameSet
   351  	var stores []*Value
   352  	for _, b := range f.Blocks {
   353  		for _, v := range b.Values {
   354  			n, ok := v.Aux.(*ir.Name)
   355  			if !ok {
   356  				continue
   357  			}
   358  			if n.Class != ir.PAUTO {
   359  				continue
   360  			}
   361  
   362  			effect := v.Op.SymEffect()
   363  			switch effect {
   364  			case SymNone, SymWrite:
   365  				// If we haven't seen the auto yet
   366  				// then this might be a store we can
   367  				// eliminate.
   368  				if !seen.Has(n) {
   369  					stores = append(stores, v)
   370  				}
   371  			default:
   372  				// Assume the auto is needed (loaded,
   373  				// has its address taken, etc.).
   374  				// Note we have to check the uses
   375  				// because dead loads haven't been
   376  				// eliminated yet.
   377  				if v.Uses > 0 {
   378  					seen.Add(n)
   379  				}
   380  			}
   381  		}
   382  	}
   383  
   384  	// Eliminate stores to unread autos.
   385  	for _, store := range stores {
   386  		n, _ := store.Aux.(*ir.Name)
   387  		if seen.Has(n) {
   388  			continue
   389  		}
   390  
   391  		// replace store with OpCopy
   392  		store.SetArgs1(store.MemoryArg())
   393  		store.Aux = nil
   394  		store.AuxInt = 0
   395  		store.Op = OpCopy
   396  	}
   397  }