github.com/sbinet/go@v0.0.0-20160827155028-54d7de7dd62b/src/cmd/compile/internal/ssa/regalloc.go (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Register allocation.
     6  //
     7  // We use a version of a linear scan register allocator. We treat the
     8  // whole function as a single long basic block and run through
     9  // it using a greedy register allocator. Then all merge edges
    10  // (those targeting a block with len(Preds)>1) are processed to
    11  // shuffle data into the place that the target of the edge expects.
    12  //
    13  // The greedy allocator moves values into registers just before they
    14  // are used, spills registers only when necessary, and spills the
    15  // value whose next use is farthest in the future.
    16  //
    17  // The register allocator requires that a block is not scheduled until
    18  // at least one of its predecessors have been scheduled. The most recent
    19  // such predecessor provides the starting register state for a block.
    20  //
    21  // It also requires that there are no critical edges (critical =
    22  // comes from a block with >1 successor and goes to a block with >1
    23  // predecessor).  This makes it easy to add fixup code on merge edges -
    24  // the source of a merge edge has only one successor, so we can add
    25  // fixup code to the end of that block.
    26  
    27  // Spilling
    28  //
    29  // For every value, we generate a spill immediately after the value itself.
    30  //     x = Op y z    : AX
    31  //     x2 = StoreReg x
    32  // While AX still holds x, any uses of x will use that value. When AX is needed
    33  // for another value, we simply reuse AX.  Spill code has already been generated
    34  // so there is no code generated at "spill" time. When x is referenced
    35  // subsequently, we issue a load to restore x to a register using x2 as
    36  //  its argument:
    37  //    x3 = Restore x2 : CX
    38  // x3 can then be used wherever x is referenced again.
    39  // If the spill (x2) is never used, it will be removed at the end of regalloc.
    40  //
    41  // Phi values are special, as always. We define two kinds of phis, those
    42  // where the merge happens in a register (a "register" phi) and those where
    43  // the merge happens in a stack location (a "stack" phi).
    44  //
    45  // A register phi must have the phi and all of its inputs allocated to the
    46  // same register. Register phis are spilled similarly to regular ops:
    47  //     b1: y = ... : AX        b2: z = ... : AX
    48  //         goto b3                 goto b3
    49  //     b3: x = phi(y, z) : AX
    50  //         x2 = StoreReg x
    51  //
    52  // A stack phi must have the phi and all of its inputs allocated to the same
    53  // stack location. Stack phis start out life already spilled - each phi
    54  // input must be a store (using StoreReg) at the end of the corresponding
    55  // predecessor block.
    56  //     b1: y = ... : AX        b2: z = ... : BX
    57  //         y2 = StoreReg y         z2 = StoreReg z
    58  //         goto b3                 goto b3
    59  //     b3: x = phi(y2, z2)
    60  // The stack allocator knows that StoreReg args of stack-allocated phis
    61  // must be allocated to the same stack slot as the phi that uses them.
    62  // x is now a spilled value and a restore must appear before its first use.
    63  
    64  // TODO
    65  
    66  // Use an affinity graph to mark two values which should use the
    67  // same register. This affinity graph will be used to prefer certain
    68  // registers for allocation. This affinity helps eliminate moves that
    69  // are required for phi implementations and helps generate allocations
    70  // for 2-register architectures.
    71  
    72  // Note: regalloc generates a not-quite-SSA output. If we have:
    73  //
    74  //             b1: x = ... : AX
    75  //                 x2 = StoreReg x
    76  //                 ... AX gets reused for something else ...
    77  //                 if ... goto b3 else b4
    78  //
    79  //   b3: x3 = LoadReg x2 : BX       b4: x4 = LoadReg x2 : CX
    80  //       ... use x3 ...                 ... use x4 ...
    81  //
    82  //             b2: ... use x3 ...
    83  //
    84  // If b3 is the primary predecessor of b2, then we use x3 in b2 and
    85  // add a x4:CX->BX copy at the end of b4.
    86  // But the definition of x3 doesn't dominate b2.  We should really
    87  // insert a dummy phi at the start of b2 (x5=phi(x3,x4):BX) to keep
    88  // SSA form. For now, we ignore this problem as remaining in strict
    89  // SSA form isn't needed after regalloc. We'll just leave the use
    90  // of x3 not dominated by the definition of x3, and the CX->BX copy
    91  // will have no use (so don't run deadcode after regalloc!).
    92  // TODO: maybe we should introduce these extra phis?
    93  
    94  // Additional not-quite-SSA output occurs when spills are sunk out
    95  // of loops to the targets of exit edges from the loop.  Before sinking,
    96  // there is one spill site (one StoreReg) targeting stack slot X, after
    97  // sinking there may be multiple spill sites targeting stack slot X,
    98  // with no phi functions at any join points reachable by the multiple
    99  // spill sites.  In addition, uses of the spill from copies of the original
   100  // will not name the copy in their reference; instead they will name
   101  // the original, though both will have the same spill location.  The
   102  // first sunk spill will be the original, but moved, to an exit block,
   103  // thus ensuring that there is a definition somewhere corresponding to
   104  // the original spill's uses.
   105  
   106  package ssa
   107  
   108  import (
   109  	"fmt"
   110  	"unsafe"
   111  )
   112  
   113  const (
   114  	moveSpills = iota
   115  	logSpills
   116  	regDebug
   117  	stackDebug
   118  )
   119  
   120  // distance is a measure of how far into the future values are used.
   121  // distance is measured in units of instructions.
   122  const (
   123  	likelyDistance   = 1
   124  	normalDistance   = 10
   125  	unlikelyDistance = 100
   126  )
   127  
   128  // regalloc performs register allocation on f. It sets f.RegAlloc
   129  // to the resulting allocation.
   130  func regalloc(f *Func) {
   131  	var s regAllocState
   132  	s.init(f)
   133  	s.regalloc(f)
   134  }
   135  
   136  type register uint8
   137  
   138  const noRegister register = 255
   139  
   140  type regMask uint64
   141  
   142  func (m regMask) String() string {
   143  	s := ""
   144  	for r := register(0); m != 0; r++ {
   145  		if m>>r&1 == 0 {
   146  			continue
   147  		}
   148  		m &^= regMask(1) << r
   149  		if s != "" {
   150  			s += " "
   151  		}
   152  		s += fmt.Sprintf("r%d", r)
   153  	}
   154  	return s
   155  }
   156  
   157  // countRegs returns the number of set bits in the register mask.
   158  func countRegs(r regMask) int {
   159  	n := 0
   160  	for r != 0 {
   161  		n += int(r & 1)
   162  		r >>= 1
   163  	}
   164  	return n
   165  }
   166  
   167  // pickReg picks an arbitrary register from the register mask.
   168  func pickReg(r regMask) register {
   169  	// pick the lowest one
   170  	if r == 0 {
   171  		panic("can't pick a register from an empty set")
   172  	}
   173  	for i := register(0); ; i++ {
   174  		if r&1 != 0 {
   175  			return i
   176  		}
   177  		r >>= 1
   178  	}
   179  }
   180  
   181  type use struct {
   182  	dist int32 // distance from start of the block to a use of a value
   183  	next *use  // linked list of uses of a value in nondecreasing dist order
   184  }
   185  
   186  type valState struct {
   187  	regs              regMask // the set of registers holding a Value (usually just one)
   188  	uses              *use    // list of uses in this block
   189  	spill             *Value  // spilled copy of the Value
   190  	spillUsed         bool
   191  	spillUsedShuffle  bool // true if used in shuffling, after ordinary uses
   192  	needReg           bool // cached value of !v.Type.IsMemory() && !v.Type.IsVoid() && !.v.Type.IsFlags()
   193  	rematerializeable bool // cached value of v.rematerializeable()
   194  }
   195  
   196  type regState struct {
   197  	v *Value // Original (preregalloc) Value stored in this register.
   198  	c *Value // A Value equal to v which is currently in a register.  Might be v or a copy of it.
   199  	// If a register is unused, v==c==nil
   200  }
   201  
   202  type regAllocState struct {
   203  	f *Func
   204  
   205  	registers   []Register
   206  	numRegs     register
   207  	SPReg       register
   208  	SBReg       register
   209  	GReg        register
   210  	allocatable regMask
   211  
   212  	// for each block, its primary predecessor.
   213  	// A predecessor of b is primary if it is the closest
   214  	// predecessor that appears before b in the layout order.
   215  	// We record the index in the Preds list where the primary predecessor sits.
   216  	primary []int32
   217  
   218  	// live values at the end of each block.  live[b.ID] is a list of value IDs
   219  	// which are live at the end of b, together with a count of how many instructions
   220  	// forward to the next use.
   221  	live [][]liveInfo
   222  	// desired register assignments at the end of each block.
   223  	// Note that this is a static map computed before allocation occurs. Dynamic
   224  	// register desires (from partially completed allocations) will trump
   225  	// this information.
   226  	desired []desiredState
   227  
   228  	// current state of each (preregalloc) Value
   229  	values []valState
   230  
   231  	// For each Value, map from its value ID back to the
   232  	// preregalloc Value it was derived from.
   233  	orig []*Value
   234  
   235  	// current state of each register
   236  	regs []regState
   237  
   238  	// registers that contain values which can't be kicked out
   239  	nospill regMask
   240  
   241  	// mask of registers currently in use
   242  	used regMask
   243  
   244  	// current block we're working on
   245  	curBlock *Block
   246  
   247  	// cache of use records
   248  	freeUseRecords *use
   249  
   250  	// endRegs[blockid] is the register state at the end of each block.
   251  	// encoded as a set of endReg records.
   252  	endRegs [][]endReg
   253  
   254  	// startRegs[blockid] is the register state at the start of merge blocks.
   255  	// saved state does not include the state of phi ops in the block.
   256  	startRegs [][]startReg
   257  
   258  	// spillLive[blockid] is the set of live spills at the end of each block
   259  	spillLive [][]ID
   260  
   261  	loopnest *loopnest
   262  }
   263  
   264  type spillToSink struct {
   265  	spill *Value // Spill instruction to move (a StoreReg)
   266  	dests int32  // Bitmask indicating exit blocks from loop in which spill/val is defined. 1<<i set means val is live into loop.exitBlocks[i]
   267  }
   268  
   269  func (sts *spillToSink) spilledValue() *Value {
   270  	return sts.spill.Args[0]
   271  }
   272  
   273  type endReg struct {
   274  	r register
   275  	v *Value // pre-regalloc value held in this register (TODO: can we use ID here?)
   276  	c *Value // cached version of the value
   277  }
   278  
   279  type startReg struct {
   280  	r   register
   281  	vid ID // pre-regalloc value needed in this register
   282  }
   283  
   284  // freeReg frees up register r. Any current user of r is kicked out.
   285  func (s *regAllocState) freeReg(r register) {
   286  	v := s.regs[r].v
   287  	if v == nil {
   288  		s.f.Fatalf("tried to free an already free register %d\n", r)
   289  	}
   290  
   291  	// Mark r as unused.
   292  	if s.f.pass.debug > regDebug {
   293  		fmt.Printf("freeReg %s (dump %s/%s)\n", s.registers[r].Name(), v, s.regs[r].c)
   294  	}
   295  	s.regs[r] = regState{}
   296  	s.values[v.ID].regs &^= regMask(1) << r
   297  	s.used &^= regMask(1) << r
   298  }
   299  
   300  // freeRegs frees up all registers listed in m.
   301  func (s *regAllocState) freeRegs(m regMask) {
   302  	for m&s.used != 0 {
   303  		s.freeReg(pickReg(m & s.used))
   304  	}
   305  }
   306  
   307  // setOrig records that c's original value is the same as
   308  // v's original value.
   309  func (s *regAllocState) setOrig(c *Value, v *Value) {
   310  	for int(c.ID) >= len(s.orig) {
   311  		s.orig = append(s.orig, nil)
   312  	}
   313  	if s.orig[c.ID] != nil {
   314  		s.f.Fatalf("orig value set twice %s %s", c, v)
   315  	}
   316  	s.orig[c.ID] = s.orig[v.ID]
   317  }
   318  
   319  // assignReg assigns register r to hold c, a copy of v.
   320  // r must be unused.
   321  func (s *regAllocState) assignReg(r register, v *Value, c *Value) {
   322  	if s.f.pass.debug > regDebug {
   323  		fmt.Printf("assignReg %s %s/%s\n", s.registers[r].Name(), v, c)
   324  	}
   325  	if s.regs[r].v != nil {
   326  		s.f.Fatalf("tried to assign register %d to %s/%s but it is already used by %s", r, v, c, s.regs[r].v)
   327  	}
   328  
   329  	// Update state.
   330  	s.regs[r] = regState{v, c}
   331  	s.values[v.ID].regs |= regMask(1) << r
   332  	s.used |= regMask(1) << r
   333  	s.f.setHome(c, &s.registers[r])
   334  }
   335  
   336  // allocReg chooses a register from the set of registers in mask.
   337  // If there is no unused register, a Value will be kicked out of
   338  // a register to make room.
   339  func (s *regAllocState) allocReg(mask regMask, v *Value) register {
   340  	mask &= s.allocatable
   341  	mask &^= s.nospill
   342  	if mask == 0 {
   343  		s.f.Fatalf("no register available for %s", v)
   344  	}
   345  
   346  	// Pick an unused register if one is available.
   347  	if mask&^s.used != 0 {
   348  		return pickReg(mask &^ s.used)
   349  	}
   350  
   351  	// Pick a value to spill. Spill the value with the
   352  	// farthest-in-the-future use.
   353  	// TODO: Prefer registers with already spilled Values?
   354  	// TODO: Modify preference using affinity graph.
   355  	// TODO: if a single value is in multiple registers, spill one of them
   356  	// before spilling a value in just a single register.
   357  
   358  	// Find a register to spill. We spill the register containing the value
   359  	// whose next use is as far in the future as possible.
   360  	// https://en.wikipedia.org/wiki/Page_replacement_algorithm#The_theoretically_optimal_page_replacement_algorithm
   361  	var r register
   362  	maxuse := int32(-1)
   363  	for t := register(0); t < s.numRegs; t++ {
   364  		if mask>>t&1 == 0 {
   365  			continue
   366  		}
   367  		v := s.regs[t].v
   368  		if n := s.values[v.ID].uses.dist; n > maxuse {
   369  			// v's next use is farther in the future than any value
   370  			// we've seen so far. A new best spill candidate.
   371  			r = t
   372  			maxuse = n
   373  		}
   374  	}
   375  	if maxuse == -1 {
   376  		s.f.Unimplementedf("couldn't find register to spill")
   377  	}
   378  	s.freeReg(r)
   379  	return r
   380  }
   381  
   382  // allocValToReg allocates v to a register selected from regMask and
   383  // returns the register copy of v. Any previous user is kicked out and spilled
   384  // (if necessary). Load code is added at the current pc. If nospill is set the
   385  // allocated register is marked nospill so the assignment cannot be
   386  // undone until the caller allows it by clearing nospill. Returns a
   387  // *Value which is either v or a copy of v allocated to the chosen register.
   388  func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, line int32) *Value {
   389  	vi := &s.values[v.ID]
   390  
   391  	// Check if v is already in a requested register.
   392  	if mask&vi.regs != 0 {
   393  		r := pickReg(mask & vi.regs)
   394  		if s.regs[r].v != v || s.regs[r].c == nil {
   395  			panic("bad register state")
   396  		}
   397  		if nospill {
   398  			s.nospill |= regMask(1) << r
   399  		}
   400  		return s.regs[r].c
   401  	}
   402  
   403  	// Allocate a register.
   404  	r := s.allocReg(mask, v)
   405  
   406  	// Allocate v to the new register.
   407  	var c *Value
   408  	if vi.regs != 0 {
   409  		// Copy from a register that v is already in.
   410  		r2 := pickReg(vi.regs)
   411  		if s.regs[r2].v != v {
   412  			panic("bad register state")
   413  		}
   414  		c = s.curBlock.NewValue1(line, OpCopy, v.Type, s.regs[r2].c)
   415  	} else if v.rematerializeable() {
   416  		// Rematerialize instead of loading from the spill location.
   417  		c = v.copyInto(s.curBlock)
   418  	} else {
   419  		switch {
   420  		// Load v from its spill location.
   421  		case vi.spill != nil:
   422  			if s.f.pass.debug > logSpills {
   423  				s.f.Config.Warnl(vi.spill.Line, "load spill for %v from %v", v, vi.spill)
   424  			}
   425  			c = s.curBlock.NewValue1(line, OpLoadReg, v.Type, vi.spill)
   426  			vi.spillUsed = true
   427  		default:
   428  			s.f.Fatalf("attempt to load unspilled value %v", v.LongString())
   429  		}
   430  	}
   431  	s.setOrig(c, v)
   432  	s.assignReg(r, v, c)
   433  	if nospill {
   434  		s.nospill |= regMask(1) << r
   435  	}
   436  	return c
   437  }
   438  
   439  func (s *regAllocState) init(f *Func) {
   440  	s.f = f
   441  	s.registers = f.Config.registers
   442  	if nr := len(s.registers); nr == 0 || nr > int(noRegister) || nr > int(unsafe.Sizeof(regMask(0))*8) {
   443  		s.f.Fatalf("bad number of registers: %d", nr)
   444  	} else {
   445  		s.numRegs = register(nr)
   446  	}
   447  	// Locate SP, SB, and g registers.
   448  	s.SPReg = noRegister
   449  	s.SBReg = noRegister
   450  	s.GReg = noRegister
   451  	for r := register(0); r < s.numRegs; r++ {
   452  		switch s.registers[r].Name() {
   453  		case "SP":
   454  			s.SPReg = r
   455  		case "SB":
   456  			s.SBReg = r
   457  		case "g":
   458  			s.GReg = r
   459  		}
   460  	}
   461  	// Make sure we found all required registers.
   462  	switch noRegister {
   463  	case s.SPReg:
   464  		s.f.Fatalf("no SP register found")
   465  	case s.SBReg:
   466  		s.f.Fatalf("no SB register found")
   467  	case s.GReg:
   468  		if f.Config.hasGReg {
   469  			s.f.Fatalf("no g register found")
   470  		}
   471  	}
   472  
   473  	// Figure out which registers we're allowed to use.
   474  	s.allocatable = s.f.Config.gpRegMask | s.f.Config.fpRegMask | s.f.Config.specialRegMask
   475  	s.allocatable &^= 1 << s.SPReg
   476  	s.allocatable &^= 1 << s.SBReg
   477  	if s.f.Config.hasGReg {
   478  		s.allocatable &^= 1 << s.GReg
   479  	}
   480  	if s.f.Config.ctxt.Framepointer_enabled && s.f.Config.FPReg >= 0 {
   481  		s.allocatable &^= 1 << uint(s.f.Config.FPReg)
   482  	}
   483  	if s.f.Config.ctxt.Flag_shared {
   484  		switch s.f.Config.arch {
   485  		case "ppc64le": // R2 already reserved.
   486  			s.allocatable &^= 1 << 11 // R12 -- R0 is skipped in PPC64Ops.go
   487  		}
   488  	}
   489  	if s.f.Config.ctxt.Flag_dynlink {
   490  		switch s.f.Config.arch {
   491  		case "amd64":
   492  			s.allocatable &^= 1 << 15 // R15
   493  		case "arm":
   494  			s.allocatable &^= 1 << 9 // R9
   495  		case "ppc64le": // R2 already reserved.
   496  			s.allocatable &^= 1 << 11 // R12 -- R0 is skipped in PPC64Ops.go
   497  		case "arm64":
   498  			// nothing to do?
   499  		case "386":
   500  			// nothing to do.
   501  			// Note that for Flag_shared (position independent code)
   502  			// we do need to be careful, but that carefulness is hidden
   503  			// in the rewrite rules so we always have a free register
   504  			// available for global load/stores. See gen/386.rules (search for Flag_shared).
   505  		default:
   506  			s.f.Config.fe.Unimplementedf(0, "arch %s not implemented", s.f.Config.arch)
   507  		}
   508  	}
   509  	if s.f.Config.nacl {
   510  		switch s.f.Config.arch {
   511  		case "arm":
   512  			s.allocatable &^= 1 << 9 // R9 is "thread pointer" on nacl/arm
   513  		case "amd64p32":
   514  			s.allocatable &^= 1 << 5  // BP - reserved for nacl
   515  			s.allocatable &^= 1 << 15 // R15 - reserved for nacl
   516  		}
   517  	}
   518  	if s.f.Config.use387 {
   519  		s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go)
   520  	}
   521  
   522  	s.regs = make([]regState, s.numRegs)
   523  	s.values = make([]valState, f.NumValues())
   524  	s.orig = make([]*Value, f.NumValues())
   525  	for _, b := range f.Blocks {
   526  		for _, v := range b.Values {
   527  			if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple() {
   528  				s.values[v.ID].needReg = true
   529  				s.values[v.ID].rematerializeable = v.rematerializeable()
   530  				s.orig[v.ID] = v
   531  			}
   532  			// Note: needReg is false for values returning Tuple types.
   533  			// Instead, we mark the corresponding Selects as needReg.
   534  		}
   535  	}
   536  	s.computeLive()
   537  
   538  	// Compute block order. This array allows us to distinguish forward edges
   539  	// from backward edges and compute how far they go.
   540  	blockOrder := make([]int32, f.NumBlocks())
   541  	for i, b := range f.Blocks {
   542  		blockOrder[b.ID] = int32(i)
   543  	}
   544  
   545  	// Compute primary predecessors.
   546  	s.primary = make([]int32, f.NumBlocks())
   547  	for _, b := range f.Blocks {
   548  		best := -1
   549  		for i, e := range b.Preds {
   550  			p := e.b
   551  			if blockOrder[p.ID] >= blockOrder[b.ID] {
   552  				continue // backward edge
   553  			}
   554  			if best == -1 || blockOrder[p.ID] > blockOrder[b.Preds[best].b.ID] {
   555  				best = i
   556  			}
   557  		}
   558  		s.primary[b.ID] = int32(best)
   559  	}
   560  
   561  	s.endRegs = make([][]endReg, f.NumBlocks())
   562  	s.startRegs = make([][]startReg, f.NumBlocks())
   563  	s.spillLive = make([][]ID, f.NumBlocks())
   564  }
   565  
   566  // Adds a use record for id at distance dist from the start of the block.
   567  // All calls to addUse must happen with nonincreasing dist.
   568  func (s *regAllocState) addUse(id ID, dist int32) {
   569  	r := s.freeUseRecords
   570  	if r != nil {
   571  		s.freeUseRecords = r.next
   572  	} else {
   573  		r = &use{}
   574  	}
   575  	r.dist = dist
   576  	r.next = s.values[id].uses
   577  	s.values[id].uses = r
   578  	if r.next != nil && dist > r.next.dist {
   579  		s.f.Fatalf("uses added in wrong order")
   580  	}
   581  }
   582  
   583  // advanceUses advances the uses of v's args from the state before v to the state after v.
   584  // Any values which have no more uses are deallocated from registers.
   585  func (s *regAllocState) advanceUses(v *Value) {
   586  	for _, a := range v.Args {
   587  		if !s.values[a.ID].needReg {
   588  			continue
   589  		}
   590  		ai := &s.values[a.ID]
   591  		r := ai.uses
   592  		ai.uses = r.next
   593  		if r.next == nil {
   594  			// Value is dead, free all registers that hold it.
   595  			s.freeRegs(ai.regs)
   596  		}
   597  		r.next = s.freeUseRecords
   598  		s.freeUseRecords = r
   599  	}
   600  }
   601  
   602  // liveAfterCurrentInstruction reports whether v is live after
   603  // the current instruction is completed.  v must be used by the
   604  // current instruction.
   605  func (s *regAllocState) liveAfterCurrentInstruction(v *Value) bool {
   606  	u := s.values[v.ID].uses
   607  	d := u.dist
   608  	for u != nil && u.dist == d {
   609  		u = u.next
   610  	}
   611  	return u != nil && u.dist > d
   612  }
   613  
   614  // Sets the state of the registers to that encoded in regs.
   615  func (s *regAllocState) setState(regs []endReg) {
   616  	s.freeRegs(s.used)
   617  	for _, x := range regs {
   618  		s.assignReg(x.r, x.v, x.c)
   619  	}
   620  }
   621  
   622  // compatRegs returns the set of registers which can store a type t.
   623  func (s *regAllocState) compatRegs(t Type) regMask {
   624  	var m regMask
   625  	if t.IsFloat() || t == TypeInt128 {
   626  		m = s.f.Config.fpRegMask
   627  	} else {
   628  		m = s.f.Config.gpRegMask
   629  	}
   630  	return m & s.allocatable
   631  }
   632  
   633  // loopForBlock returns the loop containing block b,
   634  // provided that the loop is "interesting" for purposes
   635  // of improving register allocation (= is inner, and does
   636  // not contain a call)
   637  func (s *regAllocState) loopForBlock(b *Block) *loop {
   638  	loop := s.loopnest.b2l[b.ID]
   639  
   640  	// Minor for-the-time-being optimization: nothing happens
   641  	// unless a loop is both inner and call-free, therefore
   642  	// don't bother with other loops.
   643  	if loop != nil && (loop.containsCall || !loop.isInner) {
   644  		loop = nil
   645  	}
   646  	return loop
   647  }
   648  
   649  func (s *regAllocState) regalloc(f *Func) {
   650  	liveSet := f.newSparseSet(f.NumValues())
   651  	defer f.retSparseSet(liveSet)
   652  	var oldSched []*Value
   653  	var phis []*Value
   654  	var phiRegs []register
   655  	var args []*Value
   656  
   657  	// statistics
   658  	var nSpills int               // # of spills remaining
   659  	var nSpillsInner int          // # of spills remaining in inner loops
   660  	var nSpillsSunk int           // # of sunk spills remaining
   661  	var nSpillsChanged int        // # of sunk spills lost because of register use change
   662  	var nSpillsSunkUnused int     // # of spills not sunk because they were removed completely
   663  	var nSpillsNotSunkLateUse int // # of spills not sunk because of very late use (in shuffle)
   664  
   665  	// Data structure used for computing desired registers.
   666  	var desired desiredState
   667  
   668  	// Desired registers for inputs & outputs for each instruction in the block.
   669  	type dentry struct {
   670  		out [4]register    // desired output registers
   671  		in  [3][4]register // desired input registers (for inputs 0,1, and 2)
   672  	}
   673  	var dinfo []dentry
   674  
   675  	if f.Entry != f.Blocks[0] {
   676  		f.Fatalf("entry block must be first")
   677  	}
   678  
   679  	// Get loop nest so that spills in inner loops can be
   680  	// tracked.  When the last block of a loop is processed,
   681  	// attempt to move spills out of the loop.
   682  	s.loopnest.findExits()
   683  
   684  	// Spills are moved from one block's slice of values to another's.
   685  	// This confuses register allocation if it occurs before it is
   686  	// complete, so candidates are recorded, then rechecked and
   687  	// moved after all allocation (register and stack) is complete.
   688  	// Because movement is only within a stack slot's lifetime, it
   689  	// is safe to do this.
   690  	var toSink []spillToSink
   691  	// Will be used to figure out live inputs to exit blocks of inner loops.
   692  	entryCandidates := newSparseMap(f.NumValues())
   693  
   694  	for _, b := range f.Blocks {
   695  		s.curBlock = b
   696  		loop := s.loopForBlock(b)
   697  
   698  		// Initialize liveSet and uses fields for this block.
   699  		// Walk backwards through the block doing liveness analysis.
   700  		liveSet.clear()
   701  		d := int32(len(b.Values))
   702  		if b.Kind == BlockCall || b.Kind == BlockDefer {
   703  			d += unlikelyDistance
   704  		}
   705  		for _, e := range s.live[b.ID] {
   706  			s.addUse(e.ID, d+e.dist) // pseudo-uses from beyond end of block
   707  			liveSet.add(e.ID)
   708  		}
   709  		if v := b.Control; v != nil && s.values[v.ID].needReg {
   710  			s.addUse(v.ID, int32(len(b.Values))) // psuedo-use by control value
   711  			liveSet.add(v.ID)
   712  		}
   713  		for i := len(b.Values) - 1; i >= 0; i-- {
   714  			v := b.Values[i]
   715  			liveSet.remove(v.ID)
   716  			if v.Op == OpPhi {
   717  				// Remove v from the live set, but don't add
   718  				// any inputs. This is the state the len(b.Preds)>1
   719  				// case below desires; it wants to process phis specially.
   720  				continue
   721  			}
   722  			for _, a := range v.Args {
   723  				if !s.values[a.ID].needReg {
   724  					continue
   725  				}
   726  				s.addUse(a.ID, int32(i))
   727  				liveSet.add(a.ID)
   728  			}
   729  		}
   730  		if s.f.pass.debug > regDebug {
   731  			fmt.Printf("uses for %s:%s\n", s.f.Name, b)
   732  			for i := range s.values {
   733  				vi := &s.values[i]
   734  				u := vi.uses
   735  				if u == nil {
   736  					continue
   737  				}
   738  				fmt.Printf("  v%d:", i)
   739  				for u != nil {
   740  					fmt.Printf(" %d", u.dist)
   741  					u = u.next
   742  				}
   743  				fmt.Println()
   744  			}
   745  		}
   746  
   747  		// Make a copy of the block schedule so we can generate a new one in place.
   748  		// We make a separate copy for phis and regular values.
   749  		nphi := 0
   750  		for _, v := range b.Values {
   751  			if v.Op != OpPhi {
   752  				break
   753  			}
   754  			nphi++
   755  		}
   756  		phis = append(phis[:0], b.Values[:nphi]...)
   757  		oldSched = append(oldSched[:0], b.Values[nphi:]...)
   758  		b.Values = b.Values[:0]
   759  
   760  		// Initialize start state of block.
   761  		if b == f.Entry {
   762  			// Regalloc state is empty to start.
   763  			if nphi > 0 {
   764  				f.Fatalf("phis in entry block")
   765  			}
   766  		} else if len(b.Preds) == 1 {
   767  			// Start regalloc state with the end state of the previous block.
   768  			s.setState(s.endRegs[b.Preds[0].b.ID])
   769  			if nphi > 0 {
   770  				f.Fatalf("phis in single-predecessor block")
   771  			}
   772  			// Drop any values which are no longer live.
   773  			// This may happen because at the end of p, a value may be
   774  			// live but only used by some other successor of p.
   775  			for r := register(0); r < s.numRegs; r++ {
   776  				v := s.regs[r].v
   777  				if v != nil && !liveSet.contains(v.ID) {
   778  					s.freeReg(r)
   779  				}
   780  			}
   781  		} else {
   782  			// This is the complicated case. We have more than one predecessor,
   783  			// which means we may have Phi ops.
   784  
   785  			// Copy phi ops into new schedule.
   786  			b.Values = append(b.Values, phis...)
   787  
   788  			// Start with the final register state of the primary predecessor
   789  			idx := s.primary[b.ID]
   790  			if idx < 0 {
   791  				f.Fatalf("block with no primary predecessor %s", b)
   792  			}
   793  			p := b.Preds[idx].b
   794  			s.setState(s.endRegs[p.ID])
   795  
   796  			if s.f.pass.debug > regDebug {
   797  				fmt.Printf("starting merge block %s with end state of %s:\n", b, p)
   798  				for _, x := range s.endRegs[p.ID] {
   799  					fmt.Printf("  %s: orig:%s cache:%s\n", s.registers[x.r].Name(), x.v, x.c)
   800  				}
   801  			}
   802  
   803  			// Decide on registers for phi ops. Use the registers determined
   804  			// by the primary predecessor if we can.
   805  			// TODO: pick best of (already processed) predecessors?
   806  			// Majority vote?  Deepest nesting level?
   807  			phiRegs = phiRegs[:0]
   808  			var phiUsed regMask
   809  			for _, v := range phis {
   810  				if !s.values[v.ID].needReg {
   811  					phiRegs = append(phiRegs, noRegister)
   812  					continue
   813  				}
   814  				a := v.Args[idx]
   815  				m := s.values[a.ID].regs &^ phiUsed
   816  				if m != 0 {
   817  					r := pickReg(m)
   818  					s.freeReg(r)
   819  					phiUsed |= regMask(1) << r
   820  					phiRegs = append(phiRegs, r)
   821  				} else {
   822  					phiRegs = append(phiRegs, noRegister)
   823  				}
   824  			}
   825  
   826  			// Second pass - deallocate any phi inputs which are now dead.
   827  			for _, v := range phis {
   828  				if !s.values[v.ID].needReg {
   829  					continue
   830  				}
   831  				a := v.Args[idx]
   832  				if !liveSet.contains(a.ID) {
   833  					// Input is dead beyond the phi, deallocate
   834  					// anywhere else it might live.
   835  					s.freeRegs(s.values[a.ID].regs)
   836  				}
   837  			}
   838  
   839  			// Third pass - pick registers for phis whose inputs
   840  			// were not in a register.
   841  			for i, v := range phis {
   842  				if !s.values[v.ID].needReg {
   843  					continue
   844  				}
   845  				if phiRegs[i] != noRegister {
   846  					continue
   847  				}
   848  				if s.f.Config.use387 && v.Type.IsFloat() {
   849  					continue // 387 can't handle floats in registers between blocks
   850  				}
   851  				m := s.compatRegs(v.Type) &^ phiUsed &^ s.used
   852  				if m != 0 {
   853  					r := pickReg(m)
   854  					phiRegs[i] = r
   855  					phiUsed |= regMask(1) << r
   856  				}
   857  			}
   858  
   859  			// Set registers for phis. Add phi spill code.
   860  			for i, v := range phis {
   861  				if !s.values[v.ID].needReg {
   862  					continue
   863  				}
   864  				r := phiRegs[i]
   865  				if r == noRegister {
   866  					// stack-based phi
   867  					// Spills will be inserted in all the predecessors below.
   868  					s.values[v.ID].spill = v        // v starts life spilled
   869  					s.values[v.ID].spillUsed = true // use is guaranteed
   870  					continue
   871  				}
   872  				// register-based phi
   873  				s.assignReg(r, v, v)
   874  				// Spill the phi in case we need to restore it later.
   875  				spill := b.NewValue1(v.Line, OpStoreReg, v.Type, v)
   876  				s.setOrig(spill, v)
   877  				s.values[v.ID].spill = spill
   878  				s.values[v.ID].spillUsed = false
   879  				if loop != nil {
   880  					loop.spills = append(loop.spills, v)
   881  					nSpillsInner++
   882  				}
   883  				nSpills++
   884  			}
   885  
   886  			// Save the starting state for use by merge edges.
   887  			var regList []startReg
   888  			for r := register(0); r < s.numRegs; r++ {
   889  				v := s.regs[r].v
   890  				if v == nil {
   891  					continue
   892  				}
   893  				if phiUsed>>r&1 != 0 {
   894  					// Skip registers that phis used, we'll handle those
   895  					// specially during merge edge processing.
   896  					continue
   897  				}
   898  				regList = append(regList, startReg{r, v.ID})
   899  			}
   900  			s.startRegs[b.ID] = regList
   901  
   902  			if s.f.pass.debug > regDebug {
   903  				fmt.Printf("after phis\n")
   904  				for _, x := range s.startRegs[b.ID] {
   905  					fmt.Printf("  %s: v%d\n", s.registers[x.r].Name(), x.vid)
   906  				}
   907  			}
   908  		}
   909  
   910  		// Allocate space to record the desired registers for each value.
   911  		dinfo = dinfo[:0]
   912  		for i := 0; i < len(oldSched); i++ {
   913  			dinfo = append(dinfo, dentry{})
   914  		}
   915  
   916  		// Load static desired register info at the end of the block.
   917  		desired.copy(&s.desired[b.ID])
   918  
   919  		// Check actual assigned registers at the start of the next block(s).
   920  		// Dynamically assigned registers will trump the static
   921  		// desired registers computed during liveness analysis.
   922  		// Note that we do this phase after startRegs is set above, so that
   923  		// we get the right behavior for a block which branches to itself.
   924  		for _, e := range b.Succs {
   925  			succ := e.b
   926  			// TODO: prioritize likely successor?
   927  			for _, x := range s.startRegs[succ.ID] {
   928  				desired.add(x.vid, x.r)
   929  			}
   930  			// Process phi ops in succ.
   931  			pidx := e.i
   932  			for _, v := range succ.Values {
   933  				if v.Op != OpPhi {
   934  					break
   935  				}
   936  				if !s.values[v.ID].needReg {
   937  					continue
   938  				}
   939  				rp, ok := s.f.getHome(v.ID).(*Register)
   940  				if !ok {
   941  					continue
   942  				}
   943  				desired.add(v.Args[pidx].ID, register(rp.Num))
   944  			}
   945  		}
   946  		// Walk values backwards computing desired register info.
   947  		// See computeLive for more comments.
   948  		for i := len(oldSched) - 1; i >= 0; i-- {
   949  			v := oldSched[i]
   950  			prefs := desired.remove(v.ID)
   951  			desired.clobber(opcodeTable[v.Op].reg.clobbers)
   952  			for _, j := range opcodeTable[v.Op].reg.inputs {
   953  				if countRegs(j.regs) != 1 {
   954  					continue
   955  				}
   956  				desired.clobber(j.regs)
   957  				desired.add(v.Args[j.idx].ID, pickReg(j.regs))
   958  			}
   959  			if opcodeTable[v.Op].resultInArg0 {
   960  				if opcodeTable[v.Op].commutative {
   961  					desired.addList(v.Args[1].ID, prefs)
   962  				}
   963  				desired.addList(v.Args[0].ID, prefs)
   964  			}
   965  			// Save desired registers for this value.
   966  			dinfo[i].out = prefs
   967  			for j, a := range v.Args {
   968  				if j >= len(dinfo[i].in) {
   969  					break
   970  				}
   971  				dinfo[i].in[j] = desired.get(a.ID)
   972  			}
   973  		}
   974  
   975  		// Process all the non-phi values.
   976  		for idx, v := range oldSched {
   977  			if s.f.pass.debug > regDebug {
   978  				fmt.Printf("  processing %s\n", v.LongString())
   979  			}
   980  			regspec := opcodeTable[v.Op].reg
   981  			if v.Op == OpPhi {
   982  				f.Fatalf("phi %s not at start of block", v)
   983  			}
   984  			if v.Op == OpSP {
   985  				s.assignReg(s.SPReg, v, v)
   986  				b.Values = append(b.Values, v)
   987  				s.advanceUses(v)
   988  				continue
   989  			}
   990  			if v.Op == OpSB {
   991  				s.assignReg(s.SBReg, v, v)
   992  				b.Values = append(b.Values, v)
   993  				s.advanceUses(v)
   994  				continue
   995  			}
   996  			if v.Op == OpSelect0 || v.Op == OpSelect1 {
   997  				if s.values[v.ID].needReg {
   998  					var i = 0
   999  					if v.Op == OpSelect1 {
  1000  						i = 1
  1001  					}
  1002  					s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocPair)[i].(*Register).Num), v, v)
  1003  				}
  1004  				b.Values = append(b.Values, v)
  1005  				s.advanceUses(v)
  1006  				goto issueSpill
  1007  			}
  1008  			if v.Op == OpGetG && s.f.Config.hasGReg {
  1009  				// use hardware g register
  1010  				if s.regs[s.GReg].v != nil {
  1011  					s.freeReg(s.GReg) // kick out the old value
  1012  				}
  1013  				s.assignReg(s.GReg, v, v)
  1014  				b.Values = append(b.Values, v)
  1015  				s.advanceUses(v)
  1016  				goto issueSpill
  1017  			}
  1018  			if v.Op == OpArg {
  1019  				// Args are "pre-spilled" values. We don't allocate
  1020  				// any register here. We just set up the spill pointer to
  1021  				// point at itself and any later user will restore it to use it.
  1022  				s.values[v.ID].spill = v
  1023  				s.values[v.ID].spillUsed = true // use is guaranteed
  1024  				b.Values = append(b.Values, v)
  1025  				s.advanceUses(v)
  1026  				continue
  1027  			}
  1028  			if v.Op == OpKeepAlive {
  1029  				// Make sure the argument to v is still live here.
  1030  				s.advanceUses(v)
  1031  				vi := &s.values[v.Args[0].ID]
  1032  				if vi.spillUsed {
  1033  					// Use the spill location.
  1034  					v.SetArg(0, vi.spill)
  1035  				} else {
  1036  					// No need to keep unspilled values live.
  1037  					// These are typically rematerializeable constants like nil,
  1038  					// or values of a variable that were modified since the last call.
  1039  					v.Op = OpCopy
  1040  					v.SetArgs1(v.Args[1])
  1041  				}
  1042  				b.Values = append(b.Values, v)
  1043  				continue
  1044  			}
  1045  			if len(regspec.inputs) == 0 && len(regspec.outputs) == 0 {
  1046  				// No register allocation required (or none specified yet)
  1047  				s.freeRegs(regspec.clobbers)
  1048  				b.Values = append(b.Values, v)
  1049  				s.advanceUses(v)
  1050  				continue
  1051  			}
  1052  
  1053  			if s.values[v.ID].rematerializeable {
  1054  				// Value is rematerializeable, don't issue it here.
  1055  				// It will get issued just before each use (see
  1056  				// allocValueToReg).
  1057  				for _, a := range v.Args {
  1058  					a.Uses--
  1059  				}
  1060  				s.advanceUses(v)
  1061  				continue
  1062  			}
  1063  
  1064  			if s.f.pass.debug > regDebug {
  1065  				fmt.Printf("value %s\n", v.LongString())
  1066  				fmt.Printf("  out:")
  1067  				for _, r := range dinfo[idx].out {
  1068  					if r != noRegister {
  1069  						fmt.Printf(" %s", s.registers[r].Name())
  1070  					}
  1071  				}
  1072  				fmt.Println()
  1073  				for i := 0; i < len(v.Args) && i < 3; i++ {
  1074  					fmt.Printf("  in%d:", i)
  1075  					for _, r := range dinfo[idx].in[i] {
  1076  						if r != noRegister {
  1077  							fmt.Printf(" %s", s.registers[r].Name())
  1078  						}
  1079  					}
  1080  					fmt.Println()
  1081  				}
  1082  			}
  1083  
  1084  			// Move arguments to registers. Process in an ordering defined
  1085  			// by the register specification (most constrained first).
  1086  			args = append(args[:0], v.Args...)
  1087  			for _, i := range regspec.inputs {
  1088  				mask := i.regs
  1089  				if mask&s.values[args[i.idx].ID].regs == 0 {
  1090  					// Need a new register for the input.
  1091  					mask &= s.allocatable
  1092  					mask &^= s.nospill
  1093  					// Used desired register if available.
  1094  					if i.idx < 3 {
  1095  						for _, r := range dinfo[idx].in[i.idx] {
  1096  							if r != noRegister && (mask&^s.used)>>r&1 != 0 {
  1097  								// Desired register is allowed and unused.
  1098  								mask = regMask(1) << r
  1099  								break
  1100  							}
  1101  						}
  1102  					}
  1103  					// Avoid registers we're saving for other values.
  1104  					if mask&^desired.avoid != 0 {
  1105  						mask &^= desired.avoid
  1106  					}
  1107  				}
  1108  				args[i.idx] = s.allocValToReg(args[i.idx], mask, true, v.Line)
  1109  			}
  1110  
  1111  			// If the output clobbers the input register, make sure we have
  1112  			// at least two copies of the input register so we don't
  1113  			// have to reload the value from the spill location.
  1114  			if opcodeTable[v.Op].resultInArg0 {
  1115  				var m regMask
  1116  				if !s.liveAfterCurrentInstruction(v.Args[0]) {
  1117  					// arg0 is dead.  We can clobber its register.
  1118  					goto ok
  1119  				}
  1120  				if countRegs(s.values[v.Args[0].ID].regs) >= 2 {
  1121  					// we have at least 2 copies of arg0.  We can afford to clobber one.
  1122  					goto ok
  1123  				}
  1124  				if opcodeTable[v.Op].commutative {
  1125  					if !s.liveAfterCurrentInstruction(v.Args[1]) {
  1126  						args[0], args[1] = args[1], args[0]
  1127  						goto ok
  1128  					}
  1129  					if countRegs(s.values[v.Args[1].ID].regs) >= 2 {
  1130  						args[0], args[1] = args[1], args[0]
  1131  						goto ok
  1132  					}
  1133  				}
  1134  
  1135  				// We can't overwrite arg0 (or arg1, if commutative).  So we
  1136  				// need to make a copy of an input so we have a register we can modify.
  1137  
  1138  				// Possible new registers to copy into.
  1139  				m = s.compatRegs(v.Args[0].Type) &^ s.used
  1140  				if m == 0 {
  1141  					// No free registers.  In this case we'll just clobber
  1142  					// an input and future uses of that input must use a restore.
  1143  					// TODO(khr): We should really do this like allocReg does it,
  1144  					// spilling the value with the most distant next use.
  1145  					goto ok
  1146  				}
  1147  
  1148  				// Try to move an input to the desired output.
  1149  				for _, r := range dinfo[idx].out {
  1150  					if r != noRegister && m>>r&1 != 0 {
  1151  						m = regMask(1) << r
  1152  						args[0] = s.allocValToReg(v.Args[0], m, true, v.Line)
  1153  						// Note: we update args[0] so the instruction will
  1154  						// use the register copy we just made.
  1155  						goto ok
  1156  					}
  1157  				}
  1158  				// Try to copy input to its desired location & use its old
  1159  				// location as the result register.
  1160  				for _, r := range dinfo[idx].in[0] {
  1161  					if r != noRegister && m>>r&1 != 0 {
  1162  						m = regMask(1) << r
  1163  						s.allocValToReg(v.Args[0], m, true, v.Line)
  1164  						// Note: no update to args[0] so the instruction will
  1165  						// use the original copy.
  1166  						goto ok
  1167  					}
  1168  				}
  1169  				if opcodeTable[v.Op].commutative {
  1170  					for _, r := range dinfo[idx].in[1] {
  1171  						if r != noRegister && m>>r&1 != 0 {
  1172  							m = regMask(1) << r
  1173  							s.allocValToReg(v.Args[1], m, true, v.Line)
  1174  							args[0], args[1] = args[1], args[0]
  1175  							goto ok
  1176  						}
  1177  					}
  1178  				}
  1179  				// Avoid future fixed uses if we can.
  1180  				if m&^desired.avoid != 0 {
  1181  					m &^= desired.avoid
  1182  				}
  1183  				// Save input 0 to a new register so we can clobber it.
  1184  				s.allocValToReg(v.Args[0], m, true, v.Line)
  1185  			ok:
  1186  			}
  1187  
  1188  			// Now that all args are in regs, we're ready to issue the value itself.
  1189  			// Before we pick a register for the output value, allow input registers
  1190  			// to be deallocated. We do this here so that the output can use the
  1191  			// same register as a dying input.
  1192  			s.nospill = 0
  1193  			s.advanceUses(v) // frees any registers holding args that are no longer live
  1194  
  1195  			// Dump any registers which will be clobbered
  1196  			s.freeRegs(regspec.clobbers)
  1197  
  1198  			// Pick registers for outputs.
  1199  			{
  1200  				outRegs := [2]register{noRegister, noRegister}
  1201  				var used regMask
  1202  				for _, out := range regspec.outputs {
  1203  					mask := out.regs & s.allocatable &^ used
  1204  					if mask == 0 {
  1205  						continue
  1206  					}
  1207  					if opcodeTable[v.Op].resultInArg0 && out.idx == 0 {
  1208  						if !opcodeTable[v.Op].commutative {
  1209  							// Output must use the same register as input 0.
  1210  							r := register(s.f.getHome(args[0].ID).(*Register).Num)
  1211  							mask = regMask(1) << r
  1212  						} else {
  1213  							// Output must use the same register as input 0 or 1.
  1214  							r0 := register(s.f.getHome(args[0].ID).(*Register).Num)
  1215  							r1 := register(s.f.getHome(args[1].ID).(*Register).Num)
  1216  							// Check r0 and r1 for desired output register.
  1217  							found := false
  1218  							for _, r := range dinfo[idx].out {
  1219  								if (r == r0 || r == r1) && (mask&^s.used)>>r&1 != 0 {
  1220  									mask = regMask(1) << r
  1221  									found = true
  1222  									if r == r1 {
  1223  										args[0], args[1] = args[1], args[0]
  1224  									}
  1225  									break
  1226  								}
  1227  							}
  1228  							if !found {
  1229  								// Neither are desired, pick r0.
  1230  								mask = regMask(1) << r0
  1231  							}
  1232  						}
  1233  					}
  1234  					for _, r := range dinfo[idx].out {
  1235  						if r != noRegister && (mask&^s.used)>>r&1 != 0 {
  1236  							// Desired register is allowed and unused.
  1237  							mask = regMask(1) << r
  1238  							break
  1239  						}
  1240  					}
  1241  					// Avoid registers we're saving for other values.
  1242  					if mask&^desired.avoid != 0 {
  1243  						mask &^= desired.avoid
  1244  					}
  1245  					r := s.allocReg(mask, v)
  1246  					outRegs[out.idx] = r
  1247  					used |= regMask(1) << r
  1248  				}
  1249  				// Record register choices
  1250  				if v.Type.IsTuple() {
  1251  					var outLocs LocPair
  1252  					if r := outRegs[0]; r != noRegister {
  1253  						outLocs[0] = &s.registers[r]
  1254  					}
  1255  					if r := outRegs[1]; r != noRegister {
  1256  						outLocs[1] = &s.registers[r]
  1257  					}
  1258  					s.f.setHome(v, outLocs)
  1259  					// Note that subsequent SelectX instructions will do the assignReg calls.
  1260  				} else {
  1261  					if r := outRegs[0]; r != noRegister {
  1262  						s.assignReg(r, v, v)
  1263  					}
  1264  				}
  1265  			}
  1266  
  1267  			// Issue the Value itself.
  1268  			for i, a := range args {
  1269  				v.SetArg(i, a) // use register version of arguments
  1270  			}
  1271  			b.Values = append(b.Values, v)
  1272  
  1273  			// Issue a spill for this value. We issue spills unconditionally,
  1274  			// then at the end of regalloc delete the ones we never use.
  1275  			// TODO: schedule the spill at a point that dominates all restores.
  1276  			// The restore may be off in an unlikely branch somewhere and it
  1277  			// would be better to have the spill in that unlikely branch as well.
  1278  			// v := ...
  1279  			// if unlikely {
  1280  			//     f()
  1281  			// }
  1282  			// It would be good to have both spill and restore inside the IF.
  1283  		issueSpill:
  1284  			if s.values[v.ID].needReg {
  1285  				spill := b.NewValue1(v.Line, OpStoreReg, v.Type, v)
  1286  				s.setOrig(spill, v)
  1287  				s.values[v.ID].spill = spill
  1288  				s.values[v.ID].spillUsed = false
  1289  				if loop != nil {
  1290  					loop.spills = append(loop.spills, v)
  1291  					nSpillsInner++
  1292  				}
  1293  				nSpills++
  1294  			}
  1295  		}
  1296  
  1297  		// Load control value into reg.
  1298  		if v := b.Control; v != nil && s.values[v.ID].needReg {
  1299  			if s.f.pass.debug > regDebug {
  1300  				fmt.Printf("  processing control %s\n", v.LongString())
  1301  			}
  1302  			// We assume that a control input can be passed in any
  1303  			// type-compatible register. If this turns out not to be true,
  1304  			// we'll need to introduce a regspec for a block's control value.
  1305  			b.Control = s.allocValToReg(v, s.compatRegs(v.Type), false, b.Line)
  1306  			// Remove this use from the uses list.
  1307  			vi := &s.values[v.ID]
  1308  			u := vi.uses
  1309  			vi.uses = u.next
  1310  			if u.next == nil {
  1311  				s.freeRegs(vi.regs) // value is dead
  1312  			}
  1313  			u.next = s.freeUseRecords
  1314  			s.freeUseRecords = u
  1315  		}
  1316  
  1317  		// Spill any values that can't live across basic block boundaries.
  1318  		if s.f.Config.use387 {
  1319  			s.freeRegs(s.f.Config.fpRegMask)
  1320  		}
  1321  
  1322  		// If we are approaching a merge point and we are the primary
  1323  		// predecessor of it, find live values that we use soon after
  1324  		// the merge point and promote them to registers now.
  1325  		if len(b.Succs) == 1 {
  1326  			// For this to be worthwhile, the loop must have no calls in it.
  1327  			top := b.Succs[0].b
  1328  			loop := s.loopnest.b2l[top.ID]
  1329  			if loop == nil || loop.header != top || loop.containsCall {
  1330  				goto badloop
  1331  			}
  1332  
  1333  			// TODO: sort by distance, pick the closest ones?
  1334  			for _, live := range s.live[b.ID] {
  1335  				if live.dist >= unlikelyDistance {
  1336  					// Don't preload anything live after the loop.
  1337  					continue
  1338  				}
  1339  				vid := live.ID
  1340  				vi := &s.values[vid]
  1341  				if vi.regs != 0 {
  1342  					continue
  1343  				}
  1344  				v := s.orig[vid]
  1345  				if s.f.Config.use387 && v.Type.IsFloat() {
  1346  					continue // 387 can't handle floats in registers between blocks
  1347  				}
  1348  				m := s.compatRegs(v.Type) &^ s.used
  1349  				if m&^desired.avoid != 0 {
  1350  					m &^= desired.avoid
  1351  				}
  1352  				if m != 0 {
  1353  					s.allocValToReg(v, m, false, b.Line)
  1354  				}
  1355  			}
  1356  		}
  1357  	badloop:
  1358  		;
  1359  
  1360  		// Save end-of-block register state.
  1361  		// First count how many, this cuts allocations in half.
  1362  		k := 0
  1363  		for r := register(0); r < s.numRegs; r++ {
  1364  			v := s.regs[r].v
  1365  			if v == nil {
  1366  				continue
  1367  			}
  1368  			k++
  1369  		}
  1370  		regList := make([]endReg, 0, k)
  1371  		for r := register(0); r < s.numRegs; r++ {
  1372  			v := s.regs[r].v
  1373  			if v == nil {
  1374  				continue
  1375  			}
  1376  			regList = append(regList, endReg{r, v, s.regs[r].c})
  1377  		}
  1378  		s.endRegs[b.ID] = regList
  1379  
  1380  		// Check. TODO: remove
  1381  		{
  1382  			liveSet.clear()
  1383  			for _, x := range s.live[b.ID] {
  1384  				liveSet.add(x.ID)
  1385  			}
  1386  			for r := register(0); r < s.numRegs; r++ {
  1387  				v := s.regs[r].v
  1388  				if v == nil {
  1389  					continue
  1390  				}
  1391  				if !liveSet.contains(v.ID) {
  1392  					s.f.Fatalf("val %s is in reg but not live at end of %s", v, b)
  1393  				}
  1394  			}
  1395  		}
  1396  
  1397  		// If a value is live at the end of the block and
  1398  		// isn't in a register, remember that its spill location
  1399  		// is live. We need to remember this information so that
  1400  		// the liveness analysis in stackalloc is correct.
  1401  		for _, e := range s.live[b.ID] {
  1402  			if s.values[e.ID].regs != 0 {
  1403  				// in a register, we'll use that source for the merge.
  1404  				continue
  1405  			}
  1406  			spill := s.values[e.ID].spill
  1407  			if spill == nil {
  1408  				// rematerializeable values will have spill==nil.
  1409  				continue
  1410  			}
  1411  			s.spillLive[b.ID] = append(s.spillLive[b.ID], spill.ID)
  1412  			s.values[e.ID].spillUsed = true
  1413  		}
  1414  
  1415  		// Keep track of values that are spilled in the loop, but whose spill
  1416  		// is not used in the loop.  It may be possible to move ("sink") the
  1417  		// spill out of the loop into one or more exit blocks.
  1418  		if loop != nil {
  1419  			loop.scratch++                    // increment count of blocks in this loop that have been processed
  1420  			if loop.scratch == loop.nBlocks { // just processed last block of loop, if it is an inner loop.
  1421  				// This check is redundant with code at the top of the loop.
  1422  				// This is definitive; the one at the top of the loop is an optimization.
  1423  				if loop.isInner && // Common case, easier, most likely to be profitable
  1424  					!loop.containsCall && // Calls force spills, also lead to puzzling spill info.
  1425  					len(loop.exits) <= 32 { // Almost no inner loops have more than 32 exits,
  1426  					// and this allows use of a bitvector and a sparseMap.
  1427  
  1428  					// TODO: exit calculation is messed up for non-inner loops
  1429  					// because of multilevel exits that are not part of the "exit"
  1430  					// count.
  1431  
  1432  					// Compute the set of spill-movement candidates live at entry to exit blocks.
  1433  					// isLoopSpillCandidate filters for
  1434  					// (1) defined in appropriate loop
  1435  					// (2) needs a register
  1436  					// (3) spill not already used (in the loop)
  1437  					// Condition (3) === "in a register at all loop exits"
  1438  
  1439  					entryCandidates.clear()
  1440  
  1441  					for whichExit, ss := range loop.exits {
  1442  						// Start with live at end.
  1443  						for _, li := range s.live[ss.ID] {
  1444  							if s.isLoopSpillCandidate(loop, s.orig[li.ID]) {
  1445  								// s.live contains original IDs, use s.orig above to map back to *Value
  1446  								entryCandidates.setBit(li.ID, uint(whichExit))
  1447  							}
  1448  						}
  1449  						// Control can also be live.
  1450  						if ss.Control != nil && s.orig[ss.Control.ID] != nil && s.isLoopSpillCandidate(loop, s.orig[ss.Control.ID]) {
  1451  							entryCandidates.setBit(s.orig[ss.Control.ID].ID, uint(whichExit))
  1452  						}
  1453  						// Walk backwards, filling in locally live values, removing those defined.
  1454  						for i := len(ss.Values) - 1; i >= 0; i-- {
  1455  							v := ss.Values[i]
  1456  							vorig := s.orig[v.ID]
  1457  							if vorig != nil {
  1458  								entryCandidates.remove(vorig.ID) // Cannot be an issue, only keeps the sets smaller.
  1459  							}
  1460  							for _, a := range v.Args {
  1461  								aorig := s.orig[a.ID]
  1462  								if aorig != nil && s.isLoopSpillCandidate(loop, aorig) {
  1463  									entryCandidates.setBit(aorig.ID, uint(whichExit))
  1464  								}
  1465  							}
  1466  						}
  1467  					}
  1468  
  1469  					for _, e := range loop.spills {
  1470  						whichblocks := entryCandidates.get(e.ID)
  1471  						oldSpill := s.values[e.ID].spill
  1472  						if whichblocks != 0 && whichblocks != -1 { // -1 = not in map.
  1473  							toSink = append(toSink, spillToSink{spill: oldSpill, dests: whichblocks})
  1474  						}
  1475  					}
  1476  
  1477  				} // loop is inner etc
  1478  				loop.scratch = 0 // Don't leave a mess, just in case.
  1479  				loop.spills = nil
  1480  			} // if scratch == nBlocks
  1481  		} // if loop is not nil
  1482  
  1483  		// Clear any final uses.
  1484  		// All that is left should be the pseudo-uses added for values which
  1485  		// are live at the end of b.
  1486  		for _, e := range s.live[b.ID] {
  1487  			u := s.values[e.ID].uses
  1488  			if u == nil {
  1489  				f.Fatalf("live at end, no uses v%d", e.ID)
  1490  			}
  1491  			if u.next != nil {
  1492  				f.Fatalf("live at end, too many uses v%d", e.ID)
  1493  			}
  1494  			s.values[e.ID].uses = nil
  1495  			u.next = s.freeUseRecords
  1496  			s.freeUseRecords = u
  1497  		}
  1498  	}
  1499  
  1500  	// Erase any spills we never used
  1501  	for i := range s.values {
  1502  		vi := s.values[i]
  1503  		if vi.spillUsed {
  1504  			if s.f.pass.debug > logSpills {
  1505  				s.f.Config.Warnl(vi.spill.Line, "spilled value at %v remains", vi.spill)
  1506  			}
  1507  			continue
  1508  		}
  1509  		spill := vi.spill
  1510  		if spill == nil {
  1511  			// Constants, SP, SB, ...
  1512  			continue
  1513  		}
  1514  		loop := s.loopForBlock(spill.Block)
  1515  		if loop != nil {
  1516  			nSpillsInner--
  1517  		}
  1518  
  1519  		spill.Args[0].Uses--
  1520  		f.freeValue(spill)
  1521  		nSpills--
  1522  	}
  1523  
  1524  	for _, b := range f.Blocks {
  1525  		i := 0
  1526  		for _, v := range b.Values {
  1527  			if v.Op == OpInvalid {
  1528  				continue
  1529  			}
  1530  			b.Values[i] = v
  1531  			i++
  1532  		}
  1533  		b.Values = b.Values[:i]
  1534  		// TODO: zero b.Values[i:], recycle Values
  1535  		// Not important now because this is the last phase that manipulates Values
  1536  	}
  1537  
  1538  	// Must clear these out before any potential recycling, though that's
  1539  	// not currently implemented.
  1540  	for i, ts := range toSink {
  1541  		vsp := ts.spill
  1542  		if vsp.Op == OpInvalid { // This spill was completely eliminated
  1543  			toSink[i].spill = nil
  1544  		}
  1545  	}
  1546  
  1547  	// Anything that didn't get a register gets a stack location here.
  1548  	// (StoreReg, stack-based phis, inputs, ...)
  1549  	stacklive := stackalloc(s.f, s.spillLive)
  1550  
  1551  	// Fix up all merge edges.
  1552  	s.shuffle(stacklive)
  1553  
  1554  	// Insert moved spills (that have not been marked invalid above)
  1555  	// at start of appropriate block and remove the originals from their
  1556  	// location within loops.  Notice that this can break SSA form;
  1557  	// if a spill is sunk to multiple exits, there will be no phi for that
  1558  	// spill at a join point downstream of those two exits, though the
  1559  	// two spills will target the same stack slot.  Notice also that this
  1560  	// takes place after stack allocation, so the stack allocator does
  1561  	// not need to process these malformed flow graphs.
  1562  sinking:
  1563  	for _, ts := range toSink {
  1564  		vsp := ts.spill
  1565  		if vsp == nil { // This spill was completely eliminated
  1566  			nSpillsSunkUnused++
  1567  			continue sinking
  1568  		}
  1569  		e := ts.spilledValue()
  1570  		if s.values[e.ID].spillUsedShuffle {
  1571  			nSpillsNotSunkLateUse++
  1572  			continue sinking
  1573  		}
  1574  
  1575  		// move spills to a better (outside of loop) block.
  1576  		// This would be costly if it occurred very often, but it doesn't.
  1577  		b := vsp.Block
  1578  		loop := s.loopnest.b2l[b.ID]
  1579  		dests := ts.dests
  1580  
  1581  		// Pre-check to be sure that spilled value is still in expected register on all exits where live.
  1582  	check_val_still_in_reg:
  1583  		for i := uint(0); i < 32 && dests != 0; i++ {
  1584  
  1585  			if dests&(1<<i) == 0 {
  1586  				continue
  1587  			}
  1588  			dests ^= 1 << i
  1589  			d := loop.exits[i]
  1590  			if len(d.Preds) > 1 {
  1591  				panic("Should be impossible given critical edges removed")
  1592  			}
  1593  			p := d.Preds[0].b // block in loop exiting to d.
  1594  
  1595  			endregs := s.endRegs[p.ID]
  1596  			for _, regrec := range endregs {
  1597  				if regrec.v == e && regrec.r != noRegister && regrec.c == e { // TODO: regrec.c != e implies different spill possible.
  1598  					continue check_val_still_in_reg
  1599  				}
  1600  			}
  1601  			// If here, the register assignment was lost down at least one exit and it can't be sunk
  1602  			if s.f.pass.debug > moveSpills {
  1603  				s.f.Config.Warnl(e.Line, "lost register assignment for spill %v in %v at exit %v to %v",
  1604  					vsp, b, p, d)
  1605  			}
  1606  			nSpillsChanged++
  1607  			continue sinking
  1608  		}
  1609  
  1610  		nSpillsSunk++
  1611  		nSpillsInner--
  1612  		// don't update nSpills, since spill is only moved, and if it is duplicated, the spills-on-a-path is not increased.
  1613  
  1614  		dests = ts.dests
  1615  
  1616  		// remove vsp from b.Values
  1617  		i := 0
  1618  		for _, w := range b.Values {
  1619  			if vsp == w {
  1620  				continue
  1621  			}
  1622  			b.Values[i] = w
  1623  			i++
  1624  		}
  1625  		b.Values = b.Values[:i]
  1626  
  1627  		first := true
  1628  		for i := uint(0); i < 32 && dests != 0; i++ {
  1629  
  1630  			if dests&(1<<i) == 0 {
  1631  				continue
  1632  			}
  1633  
  1634  			dests ^= 1 << i
  1635  
  1636  			d := loop.exits[i]
  1637  			vspnew := vsp // reuse original for first sunk spill, saves tracking down and renaming uses
  1638  			if !first {   // any sunk spills after first must make a copy
  1639  				vspnew = d.NewValue1(e.Line, OpStoreReg, e.Type, e)
  1640  				f.setHome(vspnew, f.getHome(vsp.ID)) // copy stack home
  1641  				if s.f.pass.debug > moveSpills {
  1642  					s.f.Config.Warnl(e.Line, "copied spill %v in %v for %v to %v in %v",
  1643  						vsp, b, e, vspnew, d)
  1644  				}
  1645  			} else {
  1646  				first = false
  1647  				vspnew.Block = d
  1648  				d.Values = append(d.Values, vspnew)
  1649  				if s.f.pass.debug > moveSpills {
  1650  					s.f.Config.Warnl(e.Line, "moved spill %v in %v for %v to %v in %v",
  1651  						vsp, b, e, vspnew, d)
  1652  				}
  1653  			}
  1654  
  1655  			// shuffle vspnew to the beginning of its block
  1656  			copy(d.Values[1:], d.Values[0:len(d.Values)-1])
  1657  			d.Values[0] = vspnew
  1658  
  1659  		}
  1660  	}
  1661  
  1662  	if f.pass.stats > 0 {
  1663  		f.LogStat("spills_info",
  1664  			nSpills, "spills", nSpillsInner, "inner_spills_remaining", nSpillsSunk, "inner_spills_sunk", nSpillsSunkUnused, "inner_spills_unused", nSpillsNotSunkLateUse, "inner_spills_shuffled", nSpillsChanged, "inner_spills_changed")
  1665  	}
  1666  }
  1667  
  1668  // isLoopSpillCandidate indicates whether the spill for v satisfies preliminary
  1669  // spill-sinking conditions just after the last block of loop has been processed.
  1670  // In particular:
  1671  //   v needs a register.
  1672  //   v's spill is not (YET) used.
  1673  //   v's definition is within loop.
  1674  // The spill may be used in the future, either by an outright use
  1675  // in the code, or by shuffling code inserted after stack allocation.
  1676  // Outright uses cause sinking; shuffling (within the loop) inhibits it.
  1677  func (s *regAllocState) isLoopSpillCandidate(loop *loop, v *Value) bool {
  1678  	return s.values[v.ID].needReg && !s.values[v.ID].spillUsed && s.loopnest.b2l[v.Block.ID] == loop
  1679  }
  1680  
  1681  // lateSpillUse notes a late (after stack allocation) use of the spill of value with ID vid.
  1682  // This will inhibit spill sinking.
  1683  func (s *regAllocState) lateSpillUse(vid ID) {
  1684  	// TODO investigate why this is necessary.
  1685  	// It appears that an outside-the-loop use of
  1686  	// an otherwise sinkable spill makes the spill
  1687  	// a candidate for shuffling, when it would not
  1688  	// otherwise have been the case (spillUsed was not
  1689  	// true when isLoopSpillCandidate was called, yet
  1690  	// it was shuffled).  Such shuffling cuts the amount
  1691  	// of spill sinking by more than half (in make.bash)
  1692  	s.values[vid].spillUsedShuffle = true
  1693  }
  1694  
  1695  // shuffle fixes up all the merge edges (those going into blocks of indegree > 1).
  1696  func (s *regAllocState) shuffle(stacklive [][]ID) {
  1697  	var e edgeState
  1698  	e.s = s
  1699  	e.cache = map[ID][]*Value{}
  1700  	e.contents = map[Location]contentRecord{}
  1701  	if s.f.pass.debug > regDebug {
  1702  		fmt.Printf("shuffle %s\n", s.f.Name)
  1703  		fmt.Println(s.f.String())
  1704  	}
  1705  
  1706  	for _, b := range s.f.Blocks {
  1707  		if len(b.Preds) <= 1 {
  1708  			continue
  1709  		}
  1710  		e.b = b
  1711  		for i, edge := range b.Preds {
  1712  			p := edge.b
  1713  			e.p = p
  1714  			e.setup(i, s.endRegs[p.ID], s.startRegs[b.ID], stacklive[p.ID])
  1715  			e.process()
  1716  		}
  1717  	}
  1718  }
  1719  
  1720  type edgeState struct {
  1721  	s    *regAllocState
  1722  	p, b *Block // edge goes from p->b.
  1723  
  1724  	// for each pre-regalloc value, a list of equivalent cached values
  1725  	cache      map[ID][]*Value
  1726  	cachedVals []ID // (superset of) keys of the above map, for deterministic iteration
  1727  
  1728  	// map from location to the value it contains
  1729  	contents map[Location]contentRecord
  1730  
  1731  	// desired destination locations
  1732  	destinations []dstRecord
  1733  	extra        []dstRecord
  1734  
  1735  	usedRegs   regMask // registers currently holding something
  1736  	uniqueRegs regMask // registers holding the only copy of a value
  1737  	finalRegs  regMask // registers holding final target
  1738  }
  1739  
  1740  type contentRecord struct {
  1741  	vid   ID     // pre-regalloc value
  1742  	c     *Value // cached value
  1743  	final bool   // this is a satisfied destination
  1744  }
  1745  
  1746  type dstRecord struct {
  1747  	loc    Location // register or stack slot
  1748  	vid    ID       // pre-regalloc value it should contain
  1749  	splice **Value  // place to store reference to the generating instruction
  1750  }
  1751  
  1752  // setup initializes the edge state for shuffling.
  1753  func (e *edgeState) setup(idx int, srcReg []endReg, dstReg []startReg, stacklive []ID) {
  1754  	if e.s.f.pass.debug > regDebug {
  1755  		fmt.Printf("edge %s->%s\n", e.p, e.b)
  1756  	}
  1757  
  1758  	// Clear state.
  1759  	for _, vid := range e.cachedVals {
  1760  		delete(e.cache, vid)
  1761  	}
  1762  	e.cachedVals = e.cachedVals[:0]
  1763  	for k := range e.contents {
  1764  		delete(e.contents, k)
  1765  	}
  1766  	e.usedRegs = 0
  1767  	e.uniqueRegs = 0
  1768  	e.finalRegs = 0
  1769  
  1770  	// Live registers can be sources.
  1771  	for _, x := range srcReg {
  1772  		e.set(&e.s.registers[x.r], x.v.ID, x.c, false)
  1773  	}
  1774  	// So can all of the spill locations.
  1775  	for _, spillID := range stacklive {
  1776  		v := e.s.orig[spillID]
  1777  		spill := e.s.values[v.ID].spill
  1778  		e.set(e.s.f.getHome(spillID), v.ID, spill, false)
  1779  	}
  1780  
  1781  	// Figure out all the destinations we need.
  1782  	dsts := e.destinations[:0]
  1783  	for _, x := range dstReg {
  1784  		dsts = append(dsts, dstRecord{&e.s.registers[x.r], x.vid, nil})
  1785  	}
  1786  	// Phis need their args to end up in a specific location.
  1787  	for _, v := range e.b.Values {
  1788  		if v.Op != OpPhi {
  1789  			break
  1790  		}
  1791  		loc := e.s.f.getHome(v.ID)
  1792  		if loc == nil {
  1793  			continue
  1794  		}
  1795  		dsts = append(dsts, dstRecord{loc, v.Args[idx].ID, &v.Args[idx]})
  1796  	}
  1797  	e.destinations = dsts
  1798  
  1799  	if e.s.f.pass.debug > regDebug {
  1800  		for _, vid := range e.cachedVals {
  1801  			a := e.cache[vid]
  1802  			for _, c := range a {
  1803  				fmt.Printf("src %s: v%d cache=%s\n", e.s.f.getHome(c.ID).Name(), vid, c)
  1804  			}
  1805  		}
  1806  		for _, d := range e.destinations {
  1807  			fmt.Printf("dst %s: v%d\n", d.loc.Name(), d.vid)
  1808  		}
  1809  	}
  1810  }
  1811  
  1812  // process generates code to move all the values to the right destination locations.
  1813  func (e *edgeState) process() {
  1814  	dsts := e.destinations
  1815  
  1816  	// Process the destinations until they are all satisfied.
  1817  	for len(dsts) > 0 {
  1818  		i := 0
  1819  		for _, d := range dsts {
  1820  			if !e.processDest(d.loc, d.vid, d.splice) {
  1821  				// Failed - save for next iteration.
  1822  				dsts[i] = d
  1823  				i++
  1824  			}
  1825  		}
  1826  		if i < len(dsts) {
  1827  			// Made some progress. Go around again.
  1828  			dsts = dsts[:i]
  1829  
  1830  			// Append any extras destinations we generated.
  1831  			dsts = append(dsts, e.extra...)
  1832  			e.extra = e.extra[:0]
  1833  			continue
  1834  		}
  1835  
  1836  		// We made no progress. That means that any
  1837  		// remaining unsatisfied moves are in simple cycles.
  1838  		// For example, A -> B -> C -> D -> A.
  1839  		//   A ----> B
  1840  		//   ^       |
  1841  		//   |       |
  1842  		//   |       v
  1843  		//   D <---- C
  1844  
  1845  		// To break the cycle, we pick an unused register, say R,
  1846  		// and put a copy of B there.
  1847  		//   A ----> B
  1848  		//   ^       |
  1849  		//   |       |
  1850  		//   |       v
  1851  		//   D <---- C <---- R=copyofB
  1852  		// When we resume the outer loop, the A->B move can now proceed,
  1853  		// and eventually the whole cycle completes.
  1854  
  1855  		// Copy any cycle location to a temp register. This duplicates
  1856  		// one of the cycle entries, allowing the just duplicated value
  1857  		// to be overwritten and the cycle to proceed.
  1858  		loc := dsts[0].loc
  1859  		vid := e.contents[loc].vid
  1860  		c := e.contents[loc].c
  1861  		r := e.findRegFor(c.Type)
  1862  		if e.s.f.pass.debug > regDebug {
  1863  			fmt.Printf("breaking cycle with v%d in %s:%s\n", vid, loc.Name(), c)
  1864  		}
  1865  		if _, isReg := loc.(*Register); isReg {
  1866  			c = e.p.NewValue1(c.Line, OpCopy, c.Type, c)
  1867  		} else {
  1868  			e.s.lateSpillUse(vid)
  1869  			c = e.p.NewValue1(c.Line, OpLoadReg, c.Type, c)
  1870  		}
  1871  		e.set(r, vid, c, false)
  1872  	}
  1873  }
  1874  
  1875  // processDest generates code to put value vid into location loc. Returns true
  1876  // if progress was made.
  1877  func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool {
  1878  	occupant := e.contents[loc]
  1879  	if occupant.vid == vid {
  1880  		// Value is already in the correct place.
  1881  		e.contents[loc] = contentRecord{vid, occupant.c, true}
  1882  		if splice != nil {
  1883  			(*splice).Uses--
  1884  			*splice = occupant.c
  1885  			occupant.c.Uses++
  1886  			if occupant.c.Op == OpStoreReg {
  1887  				e.s.lateSpillUse(vid)
  1888  			}
  1889  		}
  1890  		// Note: if splice==nil then c will appear dead. This is
  1891  		// non-SSA formed code, so be careful after this pass not to run
  1892  		// deadcode elimination.
  1893  		return true
  1894  	}
  1895  
  1896  	// Check if we're allowed to clobber the destination location.
  1897  	if len(e.cache[occupant.vid]) == 1 && !e.s.values[occupant.vid].rematerializeable {
  1898  		// We can't overwrite the last copy
  1899  		// of a value that needs to survive.
  1900  		return false
  1901  	}
  1902  
  1903  	// Copy from a source of v, register preferred.
  1904  	v := e.s.orig[vid]
  1905  	var c *Value
  1906  	var src Location
  1907  	if e.s.f.pass.debug > regDebug {
  1908  		fmt.Printf("moving v%d to %s\n", vid, loc.Name())
  1909  		fmt.Printf("sources of v%d:", vid)
  1910  	}
  1911  	for _, w := range e.cache[vid] {
  1912  		h := e.s.f.getHome(w.ID)
  1913  		if e.s.f.pass.debug > regDebug {
  1914  			fmt.Printf(" %s:%s", h.Name(), w)
  1915  		}
  1916  		_, isreg := h.(*Register)
  1917  		if src == nil || isreg {
  1918  			c = w
  1919  			src = h
  1920  		}
  1921  	}
  1922  	if e.s.f.pass.debug > regDebug {
  1923  		if src != nil {
  1924  			fmt.Printf(" [use %s]\n", src.Name())
  1925  		} else {
  1926  			fmt.Printf(" [no source]\n")
  1927  		}
  1928  	}
  1929  	_, dstReg := loc.(*Register)
  1930  	var x *Value
  1931  	if c == nil {
  1932  		if !e.s.values[vid].rematerializeable {
  1933  			e.s.f.Fatalf("can't find source for %s->%s: v%d\n", e.p, e.b, vid)
  1934  		}
  1935  		if dstReg {
  1936  			x = v.copyInto(e.p)
  1937  		} else {
  1938  			// Rematerialize into stack slot. Need a free
  1939  			// register to accomplish this.
  1940  			e.erase(loc) // see pre-clobber comment below
  1941  			r := e.findRegFor(v.Type)
  1942  			x = v.copyInto(e.p)
  1943  			e.set(r, vid, x, false)
  1944  			// Make sure we spill with the size of the slot, not the
  1945  			// size of x (which might be wider due to our dropping
  1946  			// of narrowing conversions).
  1947  			x = e.p.NewValue1(x.Line, OpStoreReg, loc.(LocalSlot).Type, x)
  1948  		}
  1949  	} else {
  1950  		// Emit move from src to dst.
  1951  		_, srcReg := src.(*Register)
  1952  		if srcReg {
  1953  			if dstReg {
  1954  				x = e.p.NewValue1(c.Line, OpCopy, c.Type, c)
  1955  			} else {
  1956  				x = e.p.NewValue1(c.Line, OpStoreReg, loc.(LocalSlot).Type, c)
  1957  			}
  1958  		} else {
  1959  			if dstReg {
  1960  				e.s.lateSpillUse(vid)
  1961  				x = e.p.NewValue1(c.Line, OpLoadReg, c.Type, c)
  1962  			} else {
  1963  				// mem->mem. Use temp register.
  1964  
  1965  				// Pre-clobber destination. This avoids the
  1966  				// following situation:
  1967  				//   - v is currently held in R0 and stacktmp0.
  1968  				//   - We want to copy stacktmp1 to stacktmp0.
  1969  				//   - We choose R0 as the temporary register.
  1970  				// During the copy, both R0 and stacktmp0 are
  1971  				// clobbered, losing both copies of v. Oops!
  1972  				// Erasing the destination early means R0 will not
  1973  				// be chosen as the temp register, as it will then
  1974  				// be the last copy of v.
  1975  				e.erase(loc)
  1976  
  1977  				r := e.findRegFor(c.Type)
  1978  				e.s.lateSpillUse(vid)
  1979  				t := e.p.NewValue1(c.Line, OpLoadReg, c.Type, c)
  1980  				e.set(r, vid, t, false)
  1981  				x = e.p.NewValue1(c.Line, OpStoreReg, loc.(LocalSlot).Type, t)
  1982  			}
  1983  		}
  1984  	}
  1985  	e.set(loc, vid, x, true)
  1986  	if splice != nil {
  1987  		(*splice).Uses--
  1988  		*splice = x
  1989  		x.Uses++
  1990  	}
  1991  	return true
  1992  }
  1993  
  1994  // set changes the contents of location loc to hold the given value and its cached representative.
  1995  func (e *edgeState) set(loc Location, vid ID, c *Value, final bool) {
  1996  	e.s.f.setHome(c, loc)
  1997  	e.erase(loc)
  1998  	e.contents[loc] = contentRecord{vid, c, final}
  1999  	a := e.cache[vid]
  2000  	if len(a) == 0 {
  2001  		e.cachedVals = append(e.cachedVals, vid)
  2002  	}
  2003  	a = append(a, c)
  2004  	e.cache[vid] = a
  2005  	if r, ok := loc.(*Register); ok {
  2006  		e.usedRegs |= regMask(1) << uint(r.Num)
  2007  		if final {
  2008  			e.finalRegs |= regMask(1) << uint(r.Num)
  2009  		}
  2010  		if len(a) == 1 {
  2011  			e.uniqueRegs |= regMask(1) << uint(r.Num)
  2012  		}
  2013  		if len(a) == 2 {
  2014  			if t, ok := e.s.f.getHome(a[0].ID).(*Register); ok {
  2015  				e.uniqueRegs &^= regMask(1) << uint(t.Num)
  2016  			}
  2017  		}
  2018  	}
  2019  	if e.s.f.pass.debug > regDebug {
  2020  		fmt.Printf("%s\n", c.LongString())
  2021  		fmt.Printf("v%d now available in %s:%s\n", vid, loc.Name(), c)
  2022  	}
  2023  }
  2024  
  2025  // erase removes any user of loc.
  2026  func (e *edgeState) erase(loc Location) {
  2027  	cr := e.contents[loc]
  2028  	if cr.c == nil {
  2029  		return
  2030  	}
  2031  	vid := cr.vid
  2032  
  2033  	if cr.final {
  2034  		// Add a destination to move this value back into place.
  2035  		// Make sure it gets added to the tail of the destination queue
  2036  		// so we make progress on other moves first.
  2037  		e.extra = append(e.extra, dstRecord{loc, cr.vid, nil})
  2038  	}
  2039  
  2040  	// Remove c from the list of cached values.
  2041  	a := e.cache[vid]
  2042  	for i, c := range a {
  2043  		if e.s.f.getHome(c.ID) == loc {
  2044  			if e.s.f.pass.debug > regDebug {
  2045  				fmt.Printf("v%d no longer available in %s:%s\n", vid, loc.Name(), c)
  2046  			}
  2047  			a[i], a = a[len(a)-1], a[:len(a)-1]
  2048  			break
  2049  		}
  2050  	}
  2051  	e.cache[vid] = a
  2052  
  2053  	// Update register masks.
  2054  	if r, ok := loc.(*Register); ok {
  2055  		e.usedRegs &^= regMask(1) << uint(r.Num)
  2056  		if cr.final {
  2057  			e.finalRegs &^= regMask(1) << uint(r.Num)
  2058  		}
  2059  	}
  2060  	if len(a) == 1 {
  2061  		if r, ok := e.s.f.getHome(a[0].ID).(*Register); ok {
  2062  			e.uniqueRegs |= regMask(1) << uint(r.Num)
  2063  		}
  2064  	}
  2065  }
  2066  
  2067  // findRegFor finds a register we can use to make a temp copy of type typ.
  2068  func (e *edgeState) findRegFor(typ Type) Location {
  2069  	// Which registers are possibilities.
  2070  	var m regMask
  2071  	if typ.IsFloat() {
  2072  		m = e.s.compatRegs(e.s.f.Config.fe.TypeFloat64())
  2073  	} else {
  2074  		m = e.s.compatRegs(e.s.f.Config.fe.TypeInt64())
  2075  	}
  2076  
  2077  	// Pick a register. In priority order:
  2078  	// 1) an unused register
  2079  	// 2) a non-unique register not holding a final value
  2080  	// 3) a non-unique register
  2081  	x := m &^ e.usedRegs
  2082  	if x != 0 {
  2083  		return &e.s.registers[pickReg(x)]
  2084  	}
  2085  	x = m &^ e.uniqueRegs &^ e.finalRegs
  2086  	if x != 0 {
  2087  		return &e.s.registers[pickReg(x)]
  2088  	}
  2089  	x = m &^ e.uniqueRegs
  2090  	if x != 0 {
  2091  		return &e.s.registers[pickReg(x)]
  2092  	}
  2093  
  2094  	// No register is available. Allocate a temp location to spill a register to.
  2095  	// The type of the slot is immaterial - it will not be live across
  2096  	// any safepoint. Just use a type big enough to hold any register.
  2097  	typ = e.s.f.Config.fe.TypeInt64()
  2098  	t := LocalSlot{e.s.f.Config.fe.Auto(typ), typ, 0}
  2099  	// TODO: reuse these slots.
  2100  
  2101  	// Pick a register to spill.
  2102  	for _, vid := range e.cachedVals {
  2103  		a := e.cache[vid]
  2104  		for _, c := range a {
  2105  			if r, ok := e.s.f.getHome(c.ID).(*Register); ok && m>>uint(r.Num)&1 != 0 {
  2106  				x := e.p.NewValue1(c.Line, OpStoreReg, c.Type, c)
  2107  				e.set(t, vid, x, false)
  2108  				if e.s.f.pass.debug > regDebug {
  2109  					fmt.Printf("  SPILL %s->%s %s\n", r.Name(), t.Name(), x.LongString())
  2110  				}
  2111  				// r will now be overwritten by the caller. At some point
  2112  				// later, the newly saved value will be moved back to its
  2113  				// final destination in processDest.
  2114  				return r
  2115  			}
  2116  		}
  2117  	}
  2118  
  2119  	fmt.Printf("m:%d unique:%d final:%d\n", m, e.uniqueRegs, e.finalRegs)
  2120  	for _, vid := range e.cachedVals {
  2121  		a := e.cache[vid]
  2122  		for _, c := range a {
  2123  			fmt.Printf("v%d: %s %s\n", vid, c, e.s.f.getHome(c.ID).Name())
  2124  		}
  2125  	}
  2126  	e.s.f.Fatalf("can't find empty register on edge %s->%s", e.p, e.b)
  2127  	return nil
  2128  }
  2129  
  2130  // rematerializeable reports whether the register allocator should recompute
  2131  // a value instead of spilling/restoring it.
  2132  func (v *Value) rematerializeable() bool {
  2133  	if !opcodeTable[v.Op].rematerializeable {
  2134  		return false
  2135  	}
  2136  	for _, a := range v.Args {
  2137  		// SP and SB (generated by OpSP and OpSB) are always available.
  2138  		if a.Op != OpSP && a.Op != OpSB {
  2139  			return false
  2140  		}
  2141  	}
  2142  	return true
  2143  }
  2144  
  2145  type liveInfo struct {
  2146  	ID   ID    // ID of value
  2147  	dist int32 // # of instructions before next use
  2148  }
  2149  
  2150  // dblock contains information about desired & avoid registers at the end of a block.
  2151  type dblock struct {
  2152  	prefers []desiredStateEntry
  2153  	avoid   regMask
  2154  }
  2155  
  2156  // computeLive computes a map from block ID to a list of value IDs live at the end
  2157  // of that block. Together with the value ID is a count of how many instructions
  2158  // to the next use of that value. The resulting map is stored in s.live.
  2159  // computeLive also computes the desired register information at the end of each block.
  2160  // This desired register information is stored in s.desired.
  2161  // TODO: this could be quadratic if lots of variables are live across lots of
  2162  // basic blocks. Figure out a way to make this function (or, more precisely, the user
  2163  // of this function) require only linear size & time.
  2164  func (s *regAllocState) computeLive() {
  2165  	f := s.f
  2166  	s.live = make([][]liveInfo, f.NumBlocks())
  2167  	s.desired = make([]desiredState, f.NumBlocks())
  2168  	var phis []*Value
  2169  
  2170  	live := newSparseMap(f.NumValues())
  2171  	t := newSparseMap(f.NumValues())
  2172  
  2173  	// Keep track of which value we want in each register.
  2174  	var desired desiredState
  2175  
  2176  	// Instead of iterating over f.Blocks, iterate over their postordering.
  2177  	// Liveness information flows backward, so starting at the end
  2178  	// increases the probability that we will stabilize quickly.
  2179  	// TODO: Do a better job yet. Here's one possibility:
  2180  	// Calculate the dominator tree and locate all strongly connected components.
  2181  	// If a value is live in one block of an SCC, it is live in all.
  2182  	// Walk the dominator tree from end to beginning, just once, treating SCC
  2183  	// components as single blocks, duplicated calculated liveness information
  2184  	// out to all of them.
  2185  	s.loopnest = loopnestfor(f)
  2186  	po := s.loopnest.po
  2187  	for {
  2188  		changed := false
  2189  
  2190  		for _, b := range po {
  2191  			// Start with known live values at the end of the block.
  2192  			// Add len(b.Values) to adjust from end-of-block distance
  2193  			// to beginning-of-block distance.
  2194  			live.clear()
  2195  			d := int32(len(b.Values))
  2196  			if b.Kind == BlockCall || b.Kind == BlockDefer {
  2197  				// Because we keep no values in registers across a call,
  2198  				// make every use past a call appear very far away.
  2199  				d += unlikelyDistance
  2200  			}
  2201  			for _, e := range s.live[b.ID] {
  2202  				live.set(e.ID, e.dist+d)
  2203  			}
  2204  
  2205  			// Mark control value as live
  2206  			if b.Control != nil && s.values[b.Control.ID].needReg {
  2207  				live.set(b.Control.ID, int32(len(b.Values)))
  2208  			}
  2209  
  2210  			// Propagate backwards to the start of the block
  2211  			// Assumes Values have been scheduled.
  2212  			phis = phis[:0]
  2213  			for i := len(b.Values) - 1; i >= 0; i-- {
  2214  				v := b.Values[i]
  2215  				live.remove(v.ID)
  2216  				if v.Op == OpPhi {
  2217  					// save phi ops for later
  2218  					phis = append(phis, v)
  2219  					continue
  2220  				}
  2221  				for _, a := range v.Args {
  2222  					if s.values[a.ID].needReg {
  2223  						live.set(a.ID, int32(i))
  2224  					}
  2225  				}
  2226  			}
  2227  			// Propagate desired registers backwards.
  2228  			desired.copy(&s.desired[b.ID])
  2229  			for i := len(b.Values) - 1; i >= 0; i-- {
  2230  				v := b.Values[i]
  2231  				prefs := desired.remove(v.ID)
  2232  				if v.Op == OpPhi {
  2233  					// TODO: if v is a phi, save desired register for phi inputs.
  2234  					// For now, we just drop it and don't propagate
  2235  					// desired registers back though phi nodes.
  2236  					continue
  2237  				}
  2238  				// Cancel desired registers if they get clobbered.
  2239  				desired.clobber(opcodeTable[v.Op].reg.clobbers)
  2240  				// Update desired registers if there are any fixed register inputs.
  2241  				for _, j := range opcodeTable[v.Op].reg.inputs {
  2242  					if countRegs(j.regs) != 1 {
  2243  						continue
  2244  					}
  2245  					desired.clobber(j.regs)
  2246  					desired.add(v.Args[j.idx].ID, pickReg(j.regs))
  2247  				}
  2248  				// Set desired register of input 0 if this is a 2-operand instruction.
  2249  				if opcodeTable[v.Op].resultInArg0 {
  2250  					if opcodeTable[v.Op].commutative {
  2251  						desired.addList(v.Args[1].ID, prefs)
  2252  					}
  2253  					desired.addList(v.Args[0].ID, prefs)
  2254  				}
  2255  			}
  2256  
  2257  			// For each predecessor of b, expand its list of live-at-end values.
  2258  			// invariant: live contains the values live at the start of b (excluding phi inputs)
  2259  			for i, e := range b.Preds {
  2260  				p := e.b
  2261  				// Compute additional distance for the edge.
  2262  				// Note: delta must be at least 1 to distinguish the control
  2263  				// value use from the first user in a successor block.
  2264  				delta := int32(normalDistance)
  2265  				if len(p.Succs) == 2 {
  2266  					if p.Succs[0].b == b && p.Likely == BranchLikely ||
  2267  						p.Succs[1].b == b && p.Likely == BranchUnlikely {
  2268  						delta = likelyDistance
  2269  					}
  2270  					if p.Succs[0].b == b && p.Likely == BranchUnlikely ||
  2271  						p.Succs[1].b == b && p.Likely == BranchLikely {
  2272  						delta = unlikelyDistance
  2273  					}
  2274  				}
  2275  
  2276  				// Update any desired registers at the end of p.
  2277  				s.desired[p.ID].merge(&desired)
  2278  
  2279  				// Start t off with the previously known live values at the end of p.
  2280  				t.clear()
  2281  				for _, e := range s.live[p.ID] {
  2282  					t.set(e.ID, e.dist)
  2283  				}
  2284  				update := false
  2285  
  2286  				// Add new live values from scanning this block.
  2287  				for _, e := range live.contents() {
  2288  					d := e.val + delta
  2289  					if !t.contains(e.key) || d < t.get(e.key) {
  2290  						update = true
  2291  						t.set(e.key, d)
  2292  					}
  2293  				}
  2294  				// Also add the correct arg from the saved phi values.
  2295  				// All phis are at distance delta (we consider them
  2296  				// simultaneously happening at the start of the block).
  2297  				for _, v := range phis {
  2298  					id := v.Args[i].ID
  2299  					if s.values[id].needReg && (!t.contains(id) || delta < t.get(id)) {
  2300  						update = true
  2301  						t.set(id, delta)
  2302  					}
  2303  				}
  2304  
  2305  				if !update {
  2306  					continue
  2307  				}
  2308  				// The live set has changed, update it.
  2309  				l := s.live[p.ID][:0]
  2310  				if cap(l) < t.size() {
  2311  					l = make([]liveInfo, 0, t.size())
  2312  				}
  2313  				for _, e := range t.contents() {
  2314  					l = append(l, liveInfo{e.key, e.val})
  2315  				}
  2316  				s.live[p.ID] = l
  2317  				changed = true
  2318  			}
  2319  		}
  2320  
  2321  		if !changed {
  2322  			break
  2323  		}
  2324  	}
  2325  	if f.pass.debug > regDebug {
  2326  		fmt.Println("live values at end of each block")
  2327  		for _, b := range f.Blocks {
  2328  			fmt.Printf("  %s:", b)
  2329  			for _, x := range s.live[b.ID] {
  2330  				fmt.Printf(" v%d", x.ID)
  2331  				for _, e := range s.desired[b.ID].entries {
  2332  					if e.ID != x.ID {
  2333  						continue
  2334  					}
  2335  					fmt.Printf("[")
  2336  					first := true
  2337  					for _, r := range e.regs {
  2338  						if r == noRegister {
  2339  							continue
  2340  						}
  2341  						if !first {
  2342  							fmt.Printf(",")
  2343  						}
  2344  						fmt.Print(s.registers[r].Name())
  2345  						first = false
  2346  					}
  2347  					fmt.Printf("]")
  2348  				}
  2349  			}
  2350  			fmt.Printf(" avoid=%x", int64(s.desired[b.ID].avoid))
  2351  			fmt.Println()
  2352  		}
  2353  	}
  2354  }
  2355  
  2356  // A desiredState represents desired register assignments.
  2357  type desiredState struct {
  2358  	// Desired assignments will be small, so we just use a list
  2359  	// of valueID+registers entries.
  2360  	entries []desiredStateEntry
  2361  	// Registers that other values want to be in.  This value will
  2362  	// contain at least the union of the regs fields of entries, but
  2363  	// may contain additional entries for values that were once in
  2364  	// this data structure but are no longer.
  2365  	avoid regMask
  2366  }
  2367  type desiredStateEntry struct {
  2368  	// (pre-regalloc) value
  2369  	ID ID
  2370  	// Registers it would like to be in, in priority order.
  2371  	// Unused slots are filled with noRegister.
  2372  	regs [4]register
  2373  }
  2374  
  2375  func (d *desiredState) clear() {
  2376  	d.entries = d.entries[:0]
  2377  	d.avoid = 0
  2378  }
  2379  
  2380  // get returns a list of desired registers for value vid.
  2381  func (d *desiredState) get(vid ID) [4]register {
  2382  	for _, e := range d.entries {
  2383  		if e.ID == vid {
  2384  			return e.regs
  2385  		}
  2386  	}
  2387  	return [4]register{noRegister, noRegister, noRegister, noRegister}
  2388  }
  2389  
  2390  // add records that we'd like value vid to be in register r.
  2391  func (d *desiredState) add(vid ID, r register) {
  2392  	d.avoid |= regMask(1) << r
  2393  	for i := range d.entries {
  2394  		e := &d.entries[i]
  2395  		if e.ID != vid {
  2396  			continue
  2397  		}
  2398  		if e.regs[0] == r {
  2399  			// Already known and highest priority
  2400  			return
  2401  		}
  2402  		for j := 1; j < len(e.regs); j++ {
  2403  			if e.regs[j] == r {
  2404  				// Move from lower priority to top priority
  2405  				copy(e.regs[1:], e.regs[:j])
  2406  				e.regs[0] = r
  2407  				return
  2408  			}
  2409  		}
  2410  		copy(e.regs[1:], e.regs[:])
  2411  		e.regs[0] = r
  2412  		return
  2413  	}
  2414  	d.entries = append(d.entries, desiredStateEntry{vid, [4]register{r, noRegister, noRegister, noRegister}})
  2415  }
  2416  
  2417  func (d *desiredState) addList(vid ID, regs [4]register) {
  2418  	// regs is in priority order, so iterate in reverse order.
  2419  	for i := len(regs) - 1; i >= 0; i-- {
  2420  		r := regs[i]
  2421  		if r != noRegister {
  2422  			d.add(vid, r)
  2423  		}
  2424  	}
  2425  }
  2426  
  2427  // clobber erases any desired registers in the set m.
  2428  func (d *desiredState) clobber(m regMask) {
  2429  	for i := 0; i < len(d.entries); {
  2430  		e := &d.entries[i]
  2431  		j := 0
  2432  		for _, r := range e.regs {
  2433  			if r != noRegister && m>>r&1 == 0 {
  2434  				e.regs[j] = r
  2435  				j++
  2436  			}
  2437  		}
  2438  		if j == 0 {
  2439  			// No more desired registers for this value.
  2440  			d.entries[i] = d.entries[len(d.entries)-1]
  2441  			d.entries = d.entries[:len(d.entries)-1]
  2442  			continue
  2443  		}
  2444  		for ; j < len(e.regs); j++ {
  2445  			e.regs[j] = noRegister
  2446  		}
  2447  		i++
  2448  	}
  2449  	d.avoid &^= m
  2450  }
  2451  
  2452  // copy copies a desired state from another desiredState x.
  2453  func (d *desiredState) copy(x *desiredState) {
  2454  	d.entries = append(d.entries[:0], x.entries...)
  2455  	d.avoid = x.avoid
  2456  }
  2457  
  2458  // remove removes the desired registers for vid and returns them.
  2459  func (d *desiredState) remove(vid ID) [4]register {
  2460  	for i := range d.entries {
  2461  		if d.entries[i].ID == vid {
  2462  			regs := d.entries[i].regs
  2463  			d.entries[i] = d.entries[len(d.entries)-1]
  2464  			d.entries = d.entries[:len(d.entries)-1]
  2465  			return regs
  2466  		}
  2467  	}
  2468  	return [4]register{noRegister, noRegister, noRegister, noRegister}
  2469  }
  2470  
  2471  // merge merges another desired state x into d.
  2472  func (d *desiredState) merge(x *desiredState) {
  2473  	d.avoid |= x.avoid
  2474  	// There should only be a few desired registers, so
  2475  	// linear insert is ok.
  2476  	for _, e := range x.entries {
  2477  		d.addList(e.ID, e.regs)
  2478  	}
  2479  }