github.com/bir3/gocompiler@v0.3.205/src/cmd/compile/internal/ssa/deadstore.go (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ssa
     6  
     7  import (
     8  	"github.com/bir3/gocompiler/src/cmd/compile/internal/ir"
     9  	"github.com/bir3/gocompiler/src/cmd/compile/internal/types"
    10  )
    11  
    12  // dse does dead-store elimination on the Function.
    13  // Dead stores are those which are unconditionally followed by
    14  // another store to the same location, with no intervening load.
    15  // This implementation only works within a basic block. TODO: use something more global.
    16  func dse(f *Func) {
    17  	var stores []*Value
    18  	loadUse := f.newSparseSet(f.NumValues())
    19  	defer f.retSparseSet(loadUse)
    20  	storeUse := f.newSparseSet(f.NumValues())
    21  	defer f.retSparseSet(storeUse)
    22  	shadowed := f.newSparseMap(f.NumValues())
    23  	defer f.retSparseMap(shadowed)
    24  	for _, b := range f.Blocks {
    25  		// Find all the stores in this block. Categorize their uses:
    26  		//  loadUse contains stores which are used by a subsequent load.
    27  		//  storeUse contains stores which are used by a subsequent store.
    28  		loadUse.clear()
    29  		storeUse.clear()
    30  		stores = stores[:0]
    31  		for _, v := range b.Values {
    32  			if v.Op == OpPhi {
    33  				// Ignore phis - they will always be first and can't be eliminated
    34  				continue
    35  			}
    36  			if v.Type.IsMemory() {
    37  				stores = append(stores, v)
    38  				for _, a := range v.Args {
    39  					if a.Block == b && a.Type.IsMemory() {
    40  						storeUse.add(a.ID)
    41  						if v.Op != OpStore && v.Op != OpZero && v.Op != OpVarDef {
    42  							// CALL, DUFFCOPY, etc. are both
    43  							// reads and writes.
    44  							loadUse.add(a.ID)
    45  						}
    46  					}
    47  				}
    48  			} else {
    49  				for _, a := range v.Args {
    50  					if a.Block == b && a.Type.IsMemory() {
    51  						loadUse.add(a.ID)
    52  					}
    53  				}
    54  			}
    55  		}
    56  		if len(stores) == 0 {
    57  			continue
    58  		}
    59  
    60  		// find last store in the block
    61  		var last *Value
    62  		for _, v := range stores {
    63  			if storeUse.contains(v.ID) {
    64  				continue
    65  			}
    66  			if last != nil {
    67  				b.Fatalf("two final stores - simultaneous live stores %s %s", last.LongString(), v.LongString())
    68  			}
    69  			last = v
    70  		}
    71  		if last == nil {
    72  			b.Fatalf("no last store found - cycle?")
    73  		}
    74  
    75  		// Walk backwards looking for dead stores. Keep track of shadowed addresses.
    76  		// A "shadowed address" is a pointer and a size describing a memory region that
    77  		// is known to be written. We keep track of shadowed addresses in the shadowed
    78  		// map, mapping the ID of the address to the size of the shadowed region.
    79  		// Since we're walking backwards, writes to a shadowed region are useless,
    80  		// as they will be immediately overwritten.
    81  		shadowed.clear()
    82  		v := last
    83  
    84  	walkloop:
    85  		if loadUse.contains(v.ID) {
    86  			// Someone might be reading this memory state.
    87  			// Clear all shadowed addresses.
    88  			shadowed.clear()
    89  		}
    90  		if v.Op == OpStore || v.Op == OpZero {
    91  			var sz int64
    92  			if v.Op == OpStore {
    93  				sz = v.Aux.(*types.Type).Size()
    94  			} else { // OpZero
    95  				sz = v.AuxInt
    96  			}
    97  			if shadowedSize := int64(shadowed.get(v.Args[0].ID)); shadowedSize != -1 && shadowedSize >= sz {
    98  				// Modify the store/zero into a copy of the memory state,
    99  				// effectively eliding the store operation.
   100  				if v.Op == OpStore {
   101  					// store addr value mem
   102  					v.SetArgs1(v.Args[2])
   103  				} else {
   104  					// zero addr mem
   105  					v.SetArgs1(v.Args[1])
   106  				}
   107  				v.Aux = nil
   108  				v.AuxInt = 0
   109  				v.Op = OpCopy
   110  			} else {
   111  				if sz > 0x7fffffff { // work around sparseMap's int32 value type
   112  					sz = 0x7fffffff
   113  				}
   114  				shadowed.set(v.Args[0].ID, int32(sz))
   115  			}
   116  		}
   117  		// walk to previous store
   118  		if v.Op == OpPhi {
   119  			// At start of block.  Move on to next block.
   120  			// The memory phi, if it exists, is always
   121  			// the first logical store in the block.
   122  			// (Even if it isn't the first in the current b.Values order.)
   123  			continue
   124  		}
   125  		for _, a := range v.Args {
   126  			if a.Block == b && a.Type.IsMemory() {
   127  				v = a
   128  				goto walkloop
   129  			}
   130  		}
   131  	}
   132  }
   133  
   134  // elimDeadAutosGeneric deletes autos that are never accessed. To achieve this
   135  // we track the operations that the address of each auto reaches and if it only
   136  // reaches stores then we delete all the stores. The other operations will then
   137  // be eliminated by the dead code elimination pass.
   138  func elimDeadAutosGeneric(f *Func) {
   139  	addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches
   140  	elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is
   141  	var used ir.NameSet               // used autos that must be kept
   142  
   143  	// visit the value and report whether any of the maps are updated
   144  	visit := func(v *Value) (changed bool) {
   145  		args := v.Args
   146  		switch v.Op {
   147  		case OpAddr, OpLocalAddr:
   148  			// Propagate the address if it points to an auto.
   149  			n, ok := v.Aux.(*ir.Name)
   150  			if !ok || n.Class != ir.PAUTO {
   151  				return
   152  			}
   153  			if addr[v] == nil {
   154  				addr[v] = n
   155  				changed = true
   156  			}
   157  			return
   158  		case OpVarDef:
   159  			// v should be eliminated if we eliminate the auto.
   160  			n, ok := v.Aux.(*ir.Name)
   161  			if !ok || n.Class != ir.PAUTO {
   162  				return
   163  			}
   164  			if elim[v] == nil {
   165  				elim[v] = n
   166  				changed = true
   167  			}
   168  			return
   169  		case OpVarLive:
   170  			// Don't delete the auto if it needs to be kept alive.
   171  
   172  			// We depend on this check to keep the autotmp stack slots
   173  			// for open-coded defers from being removed (since they
   174  			// may not be used by the inline code, but will be used by
   175  			// panic processing).
   176  			n, ok := v.Aux.(*ir.Name)
   177  			if !ok || n.Class != ir.PAUTO {
   178  				return
   179  			}
   180  			if !used.Has(n) {
   181  				used.Add(n)
   182  				changed = true
   183  			}
   184  			return
   185  		case OpStore, OpMove, OpZero:
   186  			// v should be eliminated if we eliminate the auto.
   187  			n, ok := addr[args[0]]
   188  			if ok && elim[v] == nil {
   189  				elim[v] = n
   190  				changed = true
   191  			}
   192  			// Other args might hold pointers to autos.
   193  			args = args[1:]
   194  		}
   195  
   196  		// The code below assumes that we have handled all the ops
   197  		// with sym effects already. Sanity check that here.
   198  		// Ignore Args since they can't be autos.
   199  		if v.Op.SymEffect() != SymNone && v.Op != OpArg {
   200  			panic("unhandled op with sym effect")
   201  		}
   202  
   203  		if v.Uses == 0 && v.Op != OpNilCheck && !v.Op.IsCall() && !v.Op.HasSideEffects() || len(args) == 0 {
   204  			// Nil check has no use, but we need to keep it.
   205  			// Also keep calls and values that have side effects.
   206  			return
   207  		}
   208  
   209  		// If the address of the auto reaches a memory or control
   210  		// operation not covered above then we probably need to keep it.
   211  		// We also need to keep autos if they reach Phis (issue #26153).
   212  		if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil {
   213  			for _, a := range args {
   214  				if n, ok := addr[a]; ok {
   215  					if !used.Has(n) {
   216  						used.Add(n)
   217  						changed = true
   218  					}
   219  				}
   220  			}
   221  			return
   222  		}
   223  
   224  		// Propagate any auto addresses through v.
   225  		var node *ir.Name
   226  		for _, a := range args {
   227  			if n, ok := addr[a]; ok && !used.Has(n) {
   228  				if node == nil {
   229  					node = n
   230  				} else if node != n {
   231  					// Most of the time we only see one pointer
   232  					// reaching an op, but some ops can take
   233  					// multiple pointers (e.g. NeqPtr, Phi etc.).
   234  					// This is rare, so just propagate the first
   235  					// value to keep things simple.
   236  					used.Add(n)
   237  					changed = true
   238  				}
   239  			}
   240  		}
   241  		if node == nil {
   242  			return
   243  		}
   244  		if addr[v] == nil {
   245  			// The address of an auto reaches this op.
   246  			addr[v] = node
   247  			changed = true
   248  			return
   249  		}
   250  		if addr[v] != node {
   251  			// This doesn't happen in practice, but catch it just in case.
   252  			used.Add(node)
   253  			changed = true
   254  		}
   255  		return
   256  	}
   257  
   258  	iterations := 0
   259  	for {
   260  		if iterations == 4 {
   261  			// give up
   262  			return
   263  		}
   264  		iterations++
   265  		changed := false
   266  		for _, b := range f.Blocks {
   267  			for _, v := range b.Values {
   268  				changed = visit(v) || changed
   269  			}
   270  			// keep the auto if its address reaches a control value
   271  			for _, c := range b.ControlValues() {
   272  				if n, ok := addr[c]; ok && !used.Has(n) {
   273  					used.Add(n)
   274  					changed = true
   275  				}
   276  			}
   277  		}
   278  		if !changed {
   279  			break
   280  		}
   281  	}
   282  
   283  	// Eliminate stores to unread autos.
   284  	for v, n := range elim {
   285  		if used.Has(n) {
   286  			continue
   287  		}
   288  		// replace with OpCopy
   289  		v.SetArgs1(v.MemoryArg())
   290  		v.Aux = nil
   291  		v.AuxInt = 0
   292  		v.Op = OpCopy
   293  	}
   294  }
   295  
   296  // elimUnreadAutos deletes stores (and associated bookkeeping ops VarDef and VarKill)
   297  // to autos that are never read from.
   298  func elimUnreadAutos(f *Func) {
   299  	// Loop over all ops that affect autos taking note of which
   300  	// autos we need and also stores that we might be able to
   301  	// eliminate.
   302  	var seen ir.NameSet
   303  	var stores []*Value
   304  	for _, b := range f.Blocks {
   305  		for _, v := range b.Values {
   306  			n, ok := v.Aux.(*ir.Name)
   307  			if !ok {
   308  				continue
   309  			}
   310  			if n.Class != ir.PAUTO {
   311  				continue
   312  			}
   313  
   314  			effect := v.Op.SymEffect()
   315  			switch effect {
   316  			case SymNone, SymWrite:
   317  				// If we haven't seen the auto yet
   318  				// then this might be a store we can
   319  				// eliminate.
   320  				if !seen.Has(n) {
   321  					stores = append(stores, v)
   322  				}
   323  			default:
   324  				// Assume the auto is needed (loaded,
   325  				// has its address taken, etc.).
   326  				// Note we have to check the uses
   327  				// because dead loads haven't been
   328  				// eliminated yet.
   329  				if v.Uses > 0 {
   330  					seen.Add(n)
   331  				}
   332  			}
   333  		}
   334  	}
   335  
   336  	// Eliminate stores to unread autos.
   337  	for _, store := range stores {
   338  		n, _ := store.Aux.(*ir.Name)
   339  		if seen.Has(n) {
   340  			continue
   341  		}
   342  
   343  		// replace store with OpCopy
   344  		store.SetArgs1(store.MemoryArg())
   345  		store.Aux = nil
   346  		store.AuxInt = 0
   347  		store.Op = OpCopy
   348  	}
   349  }