github.com/bir3/gocompiler@v0.3.205/src/cmd/compile/internal/compare/compare.go (about)

     1  // Copyright 2022 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Package compare contains code for generating comparison
     6  // routines for structs, strings and interfaces.
     7  package compare
     8  
     9  import (
    10  	"github.com/bir3/gocompiler/src/cmd/compile/internal/base"
    11  	"github.com/bir3/gocompiler/src/cmd/compile/internal/ir"
    12  	"github.com/bir3/gocompiler/src/cmd/compile/internal/typecheck"
    13  	"github.com/bir3/gocompiler/src/cmd/compile/internal/types"
    14  	"fmt"
    15  	"math/bits"
    16  	"sort"
    17  )
    18  
    19  // IsRegularMemory reports whether t can be compared/hashed as regular memory.
    20  func IsRegularMemory(t *types.Type) bool {
    21  	a, _ := types.AlgType(t)
    22  	return a == types.AMEM
    23  }
    24  
    25  // Memrun finds runs of struct fields for which memory-only algs are appropriate.
    26  // t is the parent struct type, and start is the field index at which to start the run.
    27  // size is the length in bytes of the memory included in the run.
    28  // next is the index just after the end of the memory run.
    29  func Memrun(t *types.Type, start int) (size int64, next int) {
    30  	next = start
    31  	for {
    32  		next++
    33  		if next == t.NumFields() {
    34  			break
    35  		}
    36  		// Stop run after a padded field.
    37  		if types.IsPaddedField(t, next-1) {
    38  			break
    39  		}
    40  		// Also, stop before a blank or non-memory field.
    41  		if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) {
    42  			break
    43  		}
    44  		// For issue 46283, don't combine fields if the resulting load would
    45  		// require a larger alignment than the component fields.
    46  		if base.Ctxt.Arch.Alignment > 1 {
    47  			align := t.Alignment()
    48  			if off := t.Field(start).Offset; off&(align-1) != 0 {
    49  				// Offset is less aligned than the containing type.
    50  				// Use offset to determine alignment.
    51  				align = 1 << uint(bits.TrailingZeros64(uint64(off)))
    52  			}
    53  			size := t.Field(next).End() - t.Field(start).Offset
    54  			if size > align {
    55  				break
    56  			}
    57  		}
    58  	}
    59  	return t.Field(next-1).End() - t.Field(start).Offset, next
    60  }
    61  
    62  // EqCanPanic reports whether == on type t could panic (has an interface somewhere).
    63  // t must be comparable.
    64  func EqCanPanic(t *types.Type) bool {
    65  	switch t.Kind() {
    66  	default:
    67  		return false
    68  	case types.TINTER:
    69  		return true
    70  	case types.TARRAY:
    71  		return EqCanPanic(t.Elem())
    72  	case types.TSTRUCT:
    73  		for _, f := range t.FieldSlice() {
    74  			if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
    75  				return true
    76  			}
    77  		}
    78  		return false
    79  	}
    80  }
    81  
    82  // EqStructCost returns the cost of an equality comparison of two structs.
    83  //
    84  // The cost is determined using an algorithm which takes into consideration
    85  // the size of the registers in the current architecture and the size of the
    86  // memory-only fields in the struct.
    87  func EqStructCost(t *types.Type) int64 {
    88  	cost := int64(0)
    89  
    90  	for i, fields := 0, t.FieldSlice(); i < len(fields); {
    91  		f := fields[i]
    92  
    93  		// Skip blank-named fields.
    94  		if f.Sym.IsBlank() {
    95  			i++
    96  			continue
    97  		}
    98  
    99  		n, _, next := eqStructFieldCost(t, i)
   100  
   101  		cost += n
   102  		i = next
   103  	}
   104  
   105  	return cost
   106  }
   107  
   108  // eqStructFieldCost returns the cost of an equality comparison of two struct fields.
   109  // t is the parent struct type, and i is the index of the field in the parent struct type.
   110  // eqStructFieldCost may compute the cost of several adjacent fields at once. It returns
   111  // the cost, the size of the set of fields it computed the cost for (in bytes), and the
   112  // index of the first field not part of the set of fields for which the cost
   113  // has already been calculated.
   114  func eqStructFieldCost(t *types.Type, i int) (int64, int64, int) {
   115  	var (
   116  		cost    = int64(0)
   117  		regSize = int64(types.RegSize)
   118  
   119  		size int64
   120  		next int
   121  	)
   122  
   123  	if base.Ctxt.Arch.CanMergeLoads {
   124  		// If we can merge adjacent loads then we can calculate the cost of the
   125  		// comparison using the size of the memory run and the size of the registers.
   126  		size, next = Memrun(t, i)
   127  		cost = size / regSize
   128  		if size%regSize != 0 {
   129  			cost++
   130  		}
   131  		return cost, size, next
   132  	}
   133  
   134  	// If we cannot merge adjacent loads then we have to use the size of the
   135  	// field and take into account the type to determine how many loads and compares
   136  	// are needed.
   137  	ft := t.Field(i).Type
   138  	size = ft.Size()
   139  	next = i + 1
   140  
   141  	return calculateCostForType(ft), size, next
   142  }
   143  
   144  func calculateCostForType(t *types.Type) int64 {
   145  	var cost int64
   146  	switch t.Kind() {
   147  	case types.TSTRUCT:
   148  		return EqStructCost(t)
   149  	case types.TSLICE:
   150  		// Slices are not comparable.
   151  		base.Fatalf("eqStructFieldCost: unexpected slice type")
   152  	case types.TARRAY:
   153  		elemCost := calculateCostForType(t.Elem())
   154  		cost = t.NumElem() * elemCost
   155  	case types.TSTRING, types.TINTER, types.TCOMPLEX64, types.TCOMPLEX128:
   156  		cost = 2
   157  	case types.TINT64, types.TUINT64:
   158  		cost = 8 / int64(types.RegSize)
   159  	default:
   160  		cost = 1
   161  	}
   162  	return cost
   163  }
   164  
   165  // EqStruct compares two structs np and nq for equality.
   166  // It works by building a list of boolean conditions to satisfy.
   167  // Conditions must be evaluated in the returned order and
   168  // properly short-circuited by the caller.
   169  func EqStruct(t *types.Type, np, nq ir.Node) []ir.Node {
   170  	// The conditions are a list-of-lists. Conditions are reorderable
   171  	// within each inner list. The outer lists must be evaluated in order.
   172  	var conds [][]ir.Node
   173  	conds = append(conds, []ir.Node{})
   174  	and := func(n ir.Node) {
   175  		i := len(conds) - 1
   176  		conds[i] = append(conds[i], n)
   177  	}
   178  
   179  	// Walk the struct using memequal for runs of AMEM
   180  	// and calling specific equality tests for the others.
   181  	for i, fields := 0, t.FieldSlice(); i < len(fields); {
   182  		f := fields[i]
   183  
   184  		// Skip blank-named fields.
   185  		if f.Sym.IsBlank() {
   186  			i++
   187  			continue
   188  		}
   189  
   190  		// Compare non-memory fields with field equality.
   191  		if !IsRegularMemory(f.Type) {
   192  			if EqCanPanic(f.Type) {
   193  				// Enforce ordering by starting a new set of reorderable conditions.
   194  				conds = append(conds, []ir.Node{})
   195  			}
   196  			p := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym)
   197  			q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym)
   198  			switch {
   199  			case f.Type.IsString():
   200  				eqlen, eqmem := EqString(p, q)
   201  				and(eqlen)
   202  				and(eqmem)
   203  			default:
   204  				and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q))
   205  			}
   206  			if EqCanPanic(f.Type) {
   207  				// Also enforce ordering after something that can panic.
   208  				conds = append(conds, []ir.Node{})
   209  			}
   210  			i++
   211  			continue
   212  		}
   213  
   214  		cost, size, next := eqStructFieldCost(t, i)
   215  		if cost <= 4 {
   216  			// Cost of 4 or less: use plain field equality.
   217  			s := fields[i:next]
   218  			for _, f := range s {
   219  				and(eqfield(np, nq, ir.OEQ, f.Sym))
   220  			}
   221  		} else {
   222  			// Higher cost: use memequal.
   223  			cc := eqmem(np, nq, f.Sym, size)
   224  			and(cc)
   225  		}
   226  		i = next
   227  	}
   228  
   229  	// Sort conditions to put runtime calls last.
   230  	// Preserve the rest of the ordering.
   231  	var flatConds []ir.Node
   232  	for _, c := range conds {
   233  		isCall := func(n ir.Node) bool {
   234  			return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
   235  		}
   236  		sort.SliceStable(c, func(i, j int) bool {
   237  			return !isCall(c[i]) && isCall(c[j])
   238  		})
   239  		flatConds = append(flatConds, c...)
   240  	}
   241  	return flatConds
   242  }
   243  
   244  // EqString returns the nodes
   245  //
   246  //	len(s) == len(t)
   247  //
   248  // and
   249  //
   250  //	memequal(s.ptr, t.ptr, len(s))
   251  //
   252  // which can be used to construct string equality comparison.
   253  // eqlen must be evaluated before eqmem, and shortcircuiting is required.
   254  func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
   255  	s = typecheck.Conv(s, types.Types[types.TSTRING])
   256  	t = typecheck.Conv(t, types.Types[types.TSTRING])
   257  	sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
   258  	tptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, t)
   259  	slen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, s), types.Types[types.TUINTPTR])
   260  	tlen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, t), types.Types[types.TUINTPTR])
   261  
   262  	fn := typecheck.LookupRuntime("memequal")
   263  	fn = typecheck.SubstArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
   264  	call := typecheck.Call(base.Pos, fn, []ir.Node{sptr, tptr, ir.Copy(slen)}, false).(*ir.CallExpr)
   265  
   266  	cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
   267  	cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
   268  	cmp.SetType(types.Types[types.TBOOL])
   269  	return cmp, call
   270  }
   271  
   272  // EqInterface returns the nodes
   273  //
   274  //	s.tab == t.tab (or s.typ == t.typ, as appropriate)
   275  //
   276  // and
   277  //
   278  //	ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
   279  //
   280  // which can be used to construct interface equality comparison.
   281  // eqtab must be evaluated before eqdata, and shortcircuiting is required.
   282  func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
   283  	if !types.Identical(s.Type(), t.Type()) {
   284  		base.Fatalf("EqInterface %v %v", s.Type(), t.Type())
   285  	}
   286  	// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
   287  	// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
   288  	var fn ir.Node
   289  	if s.Type().IsEmptyInterface() {
   290  		fn = typecheck.LookupRuntime("efaceeq")
   291  	} else {
   292  		fn = typecheck.LookupRuntime("ifaceeq")
   293  	}
   294  
   295  	stab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s)
   296  	ttab := ir.NewUnaryExpr(base.Pos, ir.OITAB, t)
   297  	sdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s)
   298  	tdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, t)
   299  	sdata.SetType(types.Types[types.TUNSAFEPTR])
   300  	tdata.SetType(types.Types[types.TUNSAFEPTR])
   301  	sdata.SetTypecheck(1)
   302  	tdata.SetTypecheck(1)
   303  
   304  	call := typecheck.Call(base.Pos, fn, []ir.Node{stab, sdata, tdata}, false).(*ir.CallExpr)
   305  
   306  	cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab)
   307  	cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
   308  	cmp.SetType(types.Types[types.TBOOL])
   309  	return cmp, call
   310  }
   311  
   312  // eqfield returns the node
   313  //
   314  //	p.field == q.field
   315  func eqfield(p ir.Node, q ir.Node, op ir.Op, field *types.Sym) ir.Node {
   316  	nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)
   317  	ny := ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)
   318  	ne := ir.NewBinaryExpr(base.Pos, op, nx, ny)
   319  	return ne
   320  }
   321  
   322  // eqmem returns the node
   323  //
   324  //	memequal(&p.field, &q.field, size])
   325  func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
   326  	nx := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)))
   327  	ny := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)))
   328  
   329  	fn, needsize := eqmemfunc(size, nx.Type().Elem())
   330  	call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
   331  	call.Args.Append(nx)
   332  	call.Args.Append(ny)
   333  	if needsize {
   334  		call.Args.Append(ir.NewInt(size))
   335  	}
   336  
   337  	return call
   338  }
   339  
   340  func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
   341  	switch size {
   342  	default:
   343  		fn = typecheck.LookupRuntime("memequal")
   344  		needsize = true
   345  	case 1, 2, 4, 8, 16:
   346  		buf := fmt.Sprintf("memequal%d", int(size)*8)
   347  		fn = typecheck.LookupRuntime(buf)
   348  	}
   349  
   350  	fn = typecheck.SubstArgTypes(fn, t, t)
   351  	return fn, needsize
   352  }