Source file src/cmd/compile/internal/walk/compare.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package walk
     6  
     7  import (
     8  	"encoding/binary"
     9  	"fmt"
    10  	"go/constant"
    11  	"hash/fnv"
    12  	"io"
    13  
    14  	"cmd/compile/internal/base"
    15  	"cmd/compile/internal/compare"
    16  	"cmd/compile/internal/ir"
    17  	"cmd/compile/internal/reflectdata"
    18  	"cmd/compile/internal/ssagen"
    19  	"cmd/compile/internal/typecheck"
    20  	"cmd/compile/internal/types"
    21  )
    22  
    23  func fakePC(n ir.Node) ir.Node {
    24  	// In order to get deterministic IDs, we include the package path, absolute filename, line number, column number
    25  	// in the calculation of the fakePC for the IR node.
    26  	hash := fnv.New32()
    27  	// We ignore the errors here because the `io.Writer` in the `hash.Hash` interface never returns an error.
    28  	io.WriteString(hash, base.Ctxt.Pkgpath)
    29  	io.WriteString(hash, base.Ctxt.PosTable.Pos(n.Pos()).AbsFilename())
    30  	binary.Write(hash, binary.LittleEndian, int64(n.Pos().Line()))
    31  	binary.Write(hash, binary.LittleEndian, int64(n.Pos().Col()))
    32  	// We also include the string representation of the node to distinguish autogenerated expression since
    33  	// those get the same `src.XPos`
    34  	io.WriteString(hash, fmt.Sprintf("%v", n))
    35  
    36  	return ir.NewInt(base.Pos, int64(hash.Sum32()))
    37  }
    38  
    39  // The result of walkCompare MUST be assigned back to n, e.g.
    40  //
    41  //	n.Left = walkCompare(n.Left, init)
    42  func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
    43  	if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL {
    44  		return walkCompareInterface(n, init)
    45  	}
    46  
    47  	if n.X.Type().IsString() && n.Y.Type().IsString() {
    48  		return walkCompareString(n, init)
    49  	}
    50  
    51  	n.X = walkExpr(n.X, init)
    52  	n.Y = walkExpr(n.Y, init)
    53  
    54  	// Given mixed interface/concrete comparison,
    55  	// rewrite into types-equal && data-equal.
    56  	// This is efficient, avoids allocations, and avoids runtime calls.
    57  	//
    58  	// TODO(mdempsky): It would be more general and probably overall
    59  	// simpler to just extend walkCompareInterface to optimize when one
    60  	// operand is an OCONVIFACE.
    61  	if n.X.Type().IsInterface() != n.Y.Type().IsInterface() {
    62  		// Preserve side-effects in case of short-circuiting; see #32187.
    63  		l := cheapExpr(n.X, init)
    64  		r := cheapExpr(n.Y, init)
    65  		// Swap so that l is the interface value and r is the concrete value.
    66  		if n.Y.Type().IsInterface() {
    67  			l, r = r, l
    68  		}
    69  
    70  		// Handle both == and !=.
    71  		eq := n.Op()
    72  		andor := ir.OOROR
    73  		if eq == ir.OEQ {
    74  			andor = ir.OANDAND
    75  		}
    76  		// Check for types equal.
    77  		// For empty interface, this is:
    78  		//   l.tab == type(r)
    79  		// For non-empty interface, this is:
    80  		//   l.tab != nil && l.tab._type == type(r)
    81  		//
    82  		// TODO(mdempsky): For non-empty interface comparisons, just
    83  		// compare against the itab address directly?
    84  		var eqtype ir.Node
    85  		tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l)
    86  		rtyp := reflectdata.CompareRType(base.Pos, n)
    87  		if l.Type().IsEmptyInterface() {
    88  			tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
    89  			tab.SetTypecheck(1)
    90  			eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp)
    91  		} else {
    92  			nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), typecheck.NodNil(), tab)
    93  			match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp)
    94  			eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match)
    95  		}
    96  		// Check for data equal.
    97  		eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r)
    98  		// Put it all together.
    99  		expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata)
   100  		return finishCompare(n, expr, init)
   101  	}
   102  
   103  	// Must be comparison of array or struct.
   104  	// Otherwise back end handles it.
   105  	// While we're here, decide whether to
   106  	// inline or call an eq alg.
   107  	t := n.X.Type()
   108  	var inline bool
   109  
   110  	maxcmpsize := int64(4)
   111  	unalignedLoad := ssagen.Arch.LinkArch.CanMergeLoads
   112  	if unalignedLoad {
   113  		// Keep this low enough to generate less code than a function call.
   114  		maxcmpsize = 2 * int64(ssagen.Arch.LinkArch.RegSize)
   115  	}
   116  
   117  	switch t.Kind() {
   118  	default:
   119  		if base.Debug.Libfuzzer != 0 && t.IsInteger() && (n.X.Name() == nil || !n.X.Name().Libfuzzer8BitCounter()) {
   120  			n.X = cheapExpr(n.X, init)
   121  			n.Y = cheapExpr(n.Y, init)
   122  
   123  			// If exactly one comparison operand is
   124  			// constant, invoke the constcmp functions
   125  			// instead, and arrange for the constant
   126  			// operand to be the first argument.
   127  			l, r := n.X, n.Y
   128  			if r.Op() == ir.OLITERAL {
   129  				l, r = r, l
   130  			}
   131  			constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL
   132  
   133  			var fn string
   134  			var paramType *types.Type
   135  			switch t.Size() {
   136  			case 1:
   137  				fn = "libfuzzerTraceCmp1"
   138  				if constcmp {
   139  					fn = "libfuzzerTraceConstCmp1"
   140  				}
   141  				paramType = types.Types[types.TUINT8]
   142  			case 2:
   143  				fn = "libfuzzerTraceCmp2"
   144  				if constcmp {
   145  					fn = "libfuzzerTraceConstCmp2"
   146  				}
   147  				paramType = types.Types[types.TUINT16]
   148  			case 4:
   149  				fn = "libfuzzerTraceCmp4"
   150  				if constcmp {
   151  					fn = "libfuzzerTraceConstCmp4"
   152  				}
   153  				paramType = types.Types[types.TUINT32]
   154  			case 8:
   155  				fn = "libfuzzerTraceCmp8"
   156  				if constcmp {
   157  					fn = "libfuzzerTraceConstCmp8"
   158  				}
   159  				paramType = types.Types[types.TUINT64]
   160  			default:
   161  				base.Fatalf("unexpected integer size %d for %v", t.Size(), t)
   162  			}
   163  			init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init), fakePC(n)))
   164  		}
   165  		return n
   166  	case types.TARRAY:
   167  		// We can compare several elements at once with 2/4/8 byte integer compares
   168  		inline = t.NumElem() <= 1 || (types.IsSimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Size()*t.NumElem() <= maxcmpsize))
   169  	case types.TSTRUCT:
   170  		inline = compare.EqStructCost(t) <= 4
   171  	}
   172  
   173  	cmpl := n.X
   174  	for cmpl != nil && cmpl.Op() == ir.OCONVNOP {
   175  		cmpl = cmpl.(*ir.ConvExpr).X
   176  	}
   177  	cmpr := n.Y
   178  	for cmpr != nil && cmpr.Op() == ir.OCONVNOP {
   179  		cmpr = cmpr.(*ir.ConvExpr).X
   180  	}
   181  
   182  	// Chose not to inline. Call equality function directly.
   183  	if !inline {
   184  		// eq algs take pointers; cmpl and cmpr must be addressable
   185  		if !ir.IsAddressable(cmpl) || !ir.IsAddressable(cmpr) {
   186  			base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
   187  		}
   188  
   189  		// Should only arrive here with large memory or
   190  		// a struct/array containing a non-memory field/element.
   191  		// Small memory is handled inline, and single non-memory
   192  		// is handled by walkCompare.
   193  		fn, needsLength := reflectdata.EqFor(t)
   194  		call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
   195  		addrCmpl := typecheck.NodAddr(cmpl)
   196  		addrCmpR := typecheck.NodAddr(cmpr)
   197  		if !types.IsNoRacePkg(types.LocalPkg) && base.Flag.Race {
   198  			ptrL := typecheck.Conv(typecheck.Conv(addrCmpl, types.Types[types.TUNSAFEPTR]), types.Types[types.TUINTPTR])
   199  			ptrR := typecheck.Conv(typecheck.Conv(addrCmpR, types.Types[types.TUNSAFEPTR]), types.Types[types.TUINTPTR])
   200  			raceFn := typecheck.LookupRuntime("racereadrange")
   201  			size := ir.NewInt(base.Pos, t.Size())
   202  			call.PtrInit().Append(mkcall1(raceFn, nil, init, ptrL, size))
   203  			call.PtrInit().Append(mkcall1(raceFn, nil, init, ptrR, size))
   204  		}
   205  		call.Args.Append(addrCmpl)
   206  		call.Args.Append(addrCmpR)
   207  		if needsLength {
   208  			call.Args.Append(ir.NewInt(base.Pos, t.Size()))
   209  		}
   210  		res := ir.Node(call)
   211  		if n.Op() != ir.OEQ {
   212  			res = ir.NewUnaryExpr(base.Pos, ir.ONOT, res)
   213  		}
   214  		return finishCompare(n, res, init)
   215  	}
   216  
   217  	// inline: build boolean expression comparing element by element
   218  	andor := ir.OANDAND
   219  	if n.Op() == ir.ONE {
   220  		andor = ir.OOROR
   221  	}
   222  	var expr ir.Node
   223  	comp := func(el, er ir.Node) {
   224  		a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er)
   225  		if expr == nil {
   226  			expr = a
   227  		} else {
   228  			expr = ir.NewLogicalExpr(base.Pos, andor, expr, a)
   229  		}
   230  	}
   231  	and := func(cond ir.Node) {
   232  		if expr == nil {
   233  			expr = cond
   234  		} else {
   235  			expr = ir.NewLogicalExpr(base.Pos, andor, expr, cond)
   236  		}
   237  	}
   238  	cmpl = safeExpr(cmpl, init)
   239  	cmpr = safeExpr(cmpr, init)
   240  	if t.IsStruct() {
   241  		conds, _ := compare.EqStruct(t, cmpl, cmpr)
   242  		if n.Op() == ir.OEQ {
   243  			for _, cond := range conds {
   244  				and(cond)
   245  			}
   246  		} else {
   247  			for _, cond := range conds {
   248  				notCond := ir.NewUnaryExpr(base.Pos, ir.ONOT, cond)
   249  				and(notCond)
   250  			}
   251  		}
   252  	} else {
   253  		step := int64(1)
   254  		remains := t.NumElem() * t.Elem().Size()
   255  		combine64bit := unalignedLoad && types.RegSize == 8 && t.Elem().Size() <= 4 && t.Elem().IsInteger()
   256  		combine32bit := unalignedLoad && t.Elem().Size() <= 2 && t.Elem().IsInteger()
   257  		combine16bit := unalignedLoad && t.Elem().Size() == 1 && t.Elem().IsInteger()
   258  		for i := int64(0); remains > 0; {
   259  			var convType *types.Type
   260  			switch {
   261  			case remains >= 8 && combine64bit:
   262  				convType = types.Types[types.TINT64]
   263  				step = 8 / t.Elem().Size()
   264  			case remains >= 4 && combine32bit:
   265  				convType = types.Types[types.TUINT32]
   266  				step = 4 / t.Elem().Size()
   267  			case remains >= 2 && combine16bit:
   268  				convType = types.Types[types.TUINT16]
   269  				step = 2 / t.Elem().Size()
   270  			default:
   271  				step = 1
   272  			}
   273  			if step == 1 {
   274  				comp(
   275  					ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(base.Pos, i)),
   276  					ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(base.Pos, i)),
   277  				)
   278  				i++
   279  				remains -= t.Elem().Size()
   280  			} else {
   281  				elemType := t.Elem().ToUnsigned()
   282  				cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(base.Pos, i)))
   283  				cmplw = typecheck.Conv(cmplw, elemType) // convert to unsigned
   284  				cmplw = typecheck.Conv(cmplw, convType) // widen
   285  				cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(base.Pos, i)))
   286  				cmprw = typecheck.Conv(cmprw, elemType)
   287  				cmprw = typecheck.Conv(cmprw, convType)
   288  				// For code like this:  uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
   289  				// ssa will generate a single large load.
   290  				for offset := int64(1); offset < step; offset++ {
   291  					lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(base.Pos, i+offset)))
   292  					lb = typecheck.Conv(lb, elemType)
   293  					lb = typecheck.Conv(lb, convType)
   294  					lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, ir.NewInt(base.Pos, 8*t.Elem().Size()*offset))
   295  					cmplw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmplw, lb)
   296  					rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(base.Pos, i+offset)))
   297  					rb = typecheck.Conv(rb, elemType)
   298  					rb = typecheck.Conv(rb, convType)
   299  					rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(base.Pos, 8*t.Elem().Size()*offset))
   300  					cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb)
   301  				}
   302  				comp(cmplw, cmprw)
   303  				i += step
   304  				remains -= step * t.Elem().Size()
   305  			}
   306  		}
   307  	}
   308  	if expr == nil {
   309  		expr = ir.NewBool(base.Pos, n.Op() == ir.OEQ)
   310  		// We still need to use cmpl and cmpr, in case they contain
   311  		// an expression which might panic. See issue 23837.
   312  		a1 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, ir.BlankNode, cmpl))
   313  		a2 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, ir.BlankNode, cmpr))
   314  		init.Append(a1, a2)
   315  	}
   316  	return finishCompare(n, expr, init)
   317  }
   318  
   319  func walkCompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
   320  	swap := n.X.Op() != ir.OCONVIFACE && n.Y.Op() == ir.OCONVIFACE
   321  	n.Y = cheapExpr(n.Y, init)
   322  	n.X = cheapExpr(n.X, init)
   323  	if swap {
   324  		// Put the concrete type first in the comparison.
   325  		// This passes a constant type (itab) to efaceeq (ifaceeq)
   326  		// which is easier to match against in rewrite rules.
   327  		// See issue 70738.
   328  		n.X, n.Y = n.Y, n.X
   329  	}
   330  
   331  	eqtab, eqdata := compare.EqInterface(n.X, n.Y)
   332  	var cmp ir.Node
   333  	if n.Op() == ir.OEQ {
   334  		cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata)
   335  	} else {
   336  		eqtab.SetOp(ir.ONE)
   337  		cmp = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqtab, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqdata))
   338  	}
   339  	return finishCompare(n, cmp, init)
   340  }
   341  
   342  func walkCompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
   343  	if base.Debug.Libfuzzer != 0 {
   344  		if !ir.IsConst(n.X, constant.String) || !ir.IsConst(n.Y, constant.String) {
   345  			fn := "libfuzzerHookStrCmp"
   346  			n.X = cheapExpr(n.X, init)
   347  			n.Y = cheapExpr(n.Y, init)
   348  			paramType := types.Types[types.TSTRING]
   349  			init.Append(mkcall(fn, nil, init, tracecmpArg(n.X, paramType, init), tracecmpArg(n.Y, paramType, init), fakePC(n)))
   350  		}
   351  	}
   352  	// Rewrite comparisons to short constant strings as length+byte-wise comparisons.
   353  	var cs, ncs ir.Node // const string, non-const string
   354  	switch {
   355  	case ir.IsConst(n.X, constant.String) && ir.IsConst(n.Y, constant.String):
   356  		// ignore; will be constant evaluated
   357  	case ir.IsConst(n.X, constant.String):
   358  		cs = n.X
   359  		ncs = n.Y
   360  	case ir.IsConst(n.Y, constant.String):
   361  		cs = n.Y
   362  		ncs = n.X
   363  	}
   364  	if cs != nil {
   365  		cmp := n.Op()
   366  		// Our comparison below assumes that the non-constant string
   367  		// is on the left hand side, so rewrite "" cmp x to x cmp "".
   368  		// See issue 24817.
   369  		if ir.IsConst(n.X, constant.String) {
   370  			cmp = brrev(cmp)
   371  		}
   372  
   373  		// maxRewriteLen was chosen empirically.
   374  		// It is the value that minimizes cmd/go file size
   375  		// across most architectures.
   376  		// See the commit description for CL 26758 for details.
   377  		maxRewriteLen := 6
   378  		// Some architectures can load unaligned byte sequence as 1 word.
   379  		// So we can cover longer strings with the same amount of code.
   380  		canCombineLoads := ssagen.Arch.LinkArch.CanMergeLoads
   381  		combine64bit := false
   382  		if canCombineLoads {
   383  			// Keep this low enough to generate less code than a function call.
   384  			maxRewriteLen = 2 * ssagen.Arch.LinkArch.RegSize
   385  			combine64bit = ssagen.Arch.LinkArch.RegSize >= 8
   386  		}
   387  
   388  		var and ir.Op
   389  		switch cmp {
   390  		case ir.OEQ:
   391  			and = ir.OANDAND
   392  		case ir.ONE:
   393  			and = ir.OOROR
   394  		default:
   395  			// Don't do byte-wise comparisons for <, <=, etc.
   396  			// They're fairly complicated.
   397  			// Length-only checks are ok, though.
   398  			maxRewriteLen = 0
   399  		}
   400  		if s := ir.StringVal(cs); len(s) <= maxRewriteLen {
   401  			if len(s) > 0 {
   402  				ncs = safeExpr(ncs, init)
   403  			}
   404  			r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.NewUnaryExpr(base.Pos, ir.OLEN, ncs), ir.NewInt(base.Pos, int64(len(s)))))
   405  			remains := len(s)
   406  			for i := 0; remains > 0; {
   407  				if remains == 1 || !canCombineLoads {
   408  					cb := ir.NewInt(base.Pos, int64(s[i]))
   409  					ncb := ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(base.Pos, int64(i)))
   410  					r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb))
   411  					remains--
   412  					i++
   413  					continue
   414  				}
   415  				var step int
   416  				var convType *types.Type
   417  				switch {
   418  				case remains >= 8 && combine64bit:
   419  					convType = types.Types[types.TINT64]
   420  					step = 8
   421  				case remains >= 4:
   422  					convType = types.Types[types.TUINT32]
   423  					step = 4
   424  				case remains >= 2:
   425  					convType = types.Types[types.TUINT16]
   426  					step = 2
   427  				}
   428  				ncsubstr := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(base.Pos, int64(i))), convType)
   429  				csubstr := int64(s[i])
   430  				// Calculate large constant from bytes as sequence of shifts and ors.
   431  				// Like this:  uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
   432  				// ssa will combine this into a single large load.
   433  				for offset := 1; offset < step; offset++ {
   434  					b := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(base.Pos, int64(i+offset))), convType)
   435  					b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, ir.NewInt(base.Pos, int64(8*offset)))
   436  					ncsubstr = ir.NewBinaryExpr(base.Pos, ir.OOR, ncsubstr, b)
   437  					csubstr |= int64(s[i+offset]) << uint8(8*offset)
   438  				}
   439  				csubstrPart := ir.NewInt(base.Pos, csubstr)
   440  				// Compare "step" bytes as once
   441  				r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr))
   442  				remains -= step
   443  				i += step
   444  			}
   445  			return finishCompare(n, r, init)
   446  		}
   447  	}
   448  
   449  	var r ir.Node
   450  	if n.Op() == ir.OEQ || n.Op() == ir.ONE {
   451  		// prepare for rewrite below
   452  		n.X = cheapExpr(n.X, init)
   453  		n.Y = cheapExpr(n.Y, init)
   454  		eqlen, eqmem := compare.EqString(n.X, n.Y)
   455  		// quick check of len before full compare for == or !=.
   456  		// memequal then tests equality up to length len.
   457  		if n.Op() == ir.OEQ {
   458  			// len(left) == len(right) && memequal(left, right, len)
   459  			r = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqlen, eqmem)
   460  		} else {
   461  			// len(left) != len(right) || !memequal(left, right, len)
   462  			eqlen.SetOp(ir.ONE)
   463  			r = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqlen, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqmem))
   464  		}
   465  	} else {
   466  		// sys_cmpstring(s1, s2) :: 0
   467  		r = mkcall("cmpstring", types.Types[types.TINT], init, typecheck.Conv(n.X, types.Types[types.TSTRING]), typecheck.Conv(n.Y, types.Types[types.TSTRING]))
   468  		r = ir.NewBinaryExpr(base.Pos, n.Op(), r, ir.NewInt(base.Pos, 0))
   469  	}
   470  
   471  	return finishCompare(n, r, init)
   472  }
   473  
   474  // The result of finishCompare MUST be assigned back to n, e.g.
   475  //
   476  //	n.Left = finishCompare(n.Left, x, r, init)
   477  func finishCompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node {
   478  	r = typecheck.Expr(r)
   479  	r = typecheck.Conv(r, n.Type())
   480  	r = walkExpr(r, init)
   481  	return r
   482  }
   483  
   484  // brcom returns !(op).
   485  // For example, brcom(==) is !=.
   486  func brcom(op ir.Op) ir.Op {
   487  	switch op {
   488  	case ir.OEQ:
   489  		return ir.ONE
   490  	case ir.ONE:
   491  		return ir.OEQ
   492  	case ir.OLT:
   493  		return ir.OGE
   494  	case ir.OGT:
   495  		return ir.OLE
   496  	case ir.OLE:
   497  		return ir.OGT
   498  	case ir.OGE:
   499  		return ir.OLT
   500  	}
   501  	base.Fatalf("brcom: no com for %v\n", op)
   502  	return op
   503  }
   504  
   505  // brrev returns reverse(op).
   506  // For example, Brrev(<) is >.
   507  func brrev(op ir.Op) ir.Op {
   508  	switch op {
   509  	case ir.OEQ:
   510  		return ir.OEQ
   511  	case ir.ONE:
   512  		return ir.ONE
   513  	case ir.OLT:
   514  		return ir.OGT
   515  	case ir.OGT:
   516  		return ir.OLT
   517  	case ir.OLE:
   518  		return ir.OGE
   519  	case ir.OGE:
   520  		return ir.OLE
   521  	}
   522  	base.Fatalf("brrev: no rev for %v\n", op)
   523  	return op
   524  }
   525  
   526  func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
   527  	// Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
   528  	if n.Op() == ir.OLITERAL && n.Type().IsSigned() && ir.Int64Val(n) < 0 {
   529  		n = copyExpr(n, n.Type(), init)
   530  	}
   531  
   532  	return typecheck.Conv(n, t)
   533  }
   534  

View as plain text