Text file src/cmd/compile/internal/ssa/_gen/AMD64.rules

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add(64|32|16|8) ...) => (ADD(Q|L|L|L) ...)
     7  (AddPtr ...) => (ADDQ ...)
     8  (Add(32|64)F ...) => (ADDS(S|D) ...)
     9  
    10  (Sub(64|32|16|8) ...) => (SUB(Q|L|L|L) ...)
    11  (SubPtr ...) => (SUBQ ...)
    12  (Sub(32|64)F ...) => (SUBS(S|D) ...)
    13  
    14  (Mul(64|32|16|8) ...) => (MUL(Q|L|L|L) ...)
    15  (Mul(32|64)F ...) => (MULS(S|D) ...)
    16  
    17  (Select0 (Mul64uover x y)) => (Select0 <typ.UInt64> (MULQU x y))
    18  (Select0 (Mul32uover x y)) => (Select0 <typ.UInt32> (MULLU x y))
    19  (Select1 (Mul(64|32)uover x y)) => (SETO (Select1 <types.TypeFlags> (MUL(Q|L)U x y)))
    20  
    21  (Hmul(64|32) ...) => (HMUL(Q|L) ...)
    22  (Hmul(64|32)u ...) => (HMUL(Q|L)U ...)
    23  
    24  (Div(64|32|16) [a] x y) => (Select0 (DIV(Q|L|W) [a] x y))
    25  (Div8  x y) => (Select0 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
    26  (Div(64|32|16)u x y) => (Select0 (DIV(Q|L|W)U x y))
    27  (Div8u x y) => (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
    28  (Div(32|64)F ...) => (DIVS(S|D) ...)
    29  
    30  (Select0 (Add64carry x y c)) =>
    31  	(Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
    32  (Select1 (Add64carry x y c)) =>
    33  	(NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
    34  (Select0 (Sub64borrow x y c)) =>
    35  	(Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
    36  (Select1 (Sub64borrow x y c)) =>
    37  	(NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
    38  
    39  // Optimize ADCQ and friends
    40  (ADCQ x (MOVQconst [c]) carry) && is32Bit(c) => (ADCQconst x [int32(c)] carry)
    41  (ADCQ x y (FlagEQ)) => (ADDQcarry x y)
    42  (ADCQconst x [c] (FlagEQ)) => (ADDQconstcarry x [c])
    43  (ADDQcarry x (MOVQconst [c])) && is32Bit(c) => (ADDQconstcarry x [int32(c)])
    44  (SBBQ x (MOVQconst [c]) borrow) && is32Bit(c) => (SBBQconst x [int32(c)] borrow)
    45  (SBBQ x y (FlagEQ)) => (SUBQborrow x y)
    46  (SBBQconst x [c] (FlagEQ)) => (SUBQconstborrow x [c])
    47  (SUBQborrow x (MOVQconst [c])) && is32Bit(c) => (SUBQconstborrow x [int32(c)])
    48  (Select1 (NEGLflags (MOVQconst [0]))) => (FlagEQ)
    49  (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) => x
    50  
    51  
    52  (Mul64uhilo ...) => (MULQU2 ...)
    53  (Div128u ...) => (DIVQU2 ...)
    54  
    55  (Avg64u ...) => (AVGQU ...)
    56  
    57  (Mod(64|32|16) [a] x y) => (Select1 (DIV(Q|L|W) [a] x y))
    58  (Mod8  x y) => (Select1 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
    59  (Mod(64|32|16)u x y) => (Select1 (DIV(Q|L|W)U x y))
    60  (Mod8u x y) => (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
    61  
    62  (And(64|32|16|8) ...) => (AND(Q|L|L|L) ...)
    63  (Or(64|32|16|8) ...) => (OR(Q|L|L|L) ...)
    64  (Xor(64|32|16|8) ...) => (XOR(Q|L|L|L) ...)
    65  (Com(64|32|16|8) ...) => (NOT(Q|L|L|L) ...)
    66  
    67  (Neg(64|32|16|8) ...) => (NEG(Q|L|L|L) ...)
    68  (Neg32F x) => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
    69  (Neg64F x) => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
    70  
    71  // Lowering boolean ops
    72  (AndB ...) => (ANDL ...)
    73  (OrB ...) => (ORL ...)
    74  (Not x) => (XORLconst [1] x)
    75  
    76  // Lowering pointer arithmetic
    77  (OffPtr [off] ptr) && is32Bit(off) => (ADDQconst [int32(off)] ptr)
    78  (OffPtr [off] ptr) => (ADDQ (MOVQconst [off]) ptr)
    79  
    80  // Lowering other arithmetic
    81  (Ctz64 x)     && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
    82  (Ctz32 x)     && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
    83  (Ctz64 <t> x) && buildcfg.GOAMD64 <  3 => (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
    84  (Ctz32 x)     && buildcfg.GOAMD64 <  3 => (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
    85  (Ctz16 x) => (BSFL (ORLconst <typ.UInt32> [1<<16] x))
    86  (Ctz8  x) => (BSFL (ORLconst <typ.UInt32> [1<<8 ] x))
    87  
    88  (Ctz64NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
    89  (Ctz32NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
    90  (Ctz16NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
    91  (Ctz8NonZero  x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
    92  (Ctz64NonZero x) && buildcfg.GOAMD64 <  3 => (Select0 (BSFQ x))
    93  (Ctz32NonZero x) && buildcfg.GOAMD64 <  3 => (BSFL x)
    94  (Ctz16NonZero x) && buildcfg.GOAMD64 <  3 => (BSFL x)
    95  (Ctz8NonZero  x) && buildcfg.GOAMD64 <  3 => (BSFL x)
    96  
    97  // BitLen64 of a 64 bit value x requires checking whether x == 0, since BSRQ is undefined when x == 0.
    98  // However, for zero-extended values, we can cheat a bit, and calculate
    99  // BSR(x<<1 + 1), which is guaranteed to be non-zero, and which conveniently
   100  // places the index of the highest set bit where we want it.
   101  // For GOAMD64>=3, BitLen can be calculated by OperandSize - LZCNT(x).
   102  (BitLen64 <t> x) && buildcfg.GOAMD64 < 3 => (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
   103  (BitLen32 x) && buildcfg.GOAMD64 <  3 => (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
   104  (BitLen16 x) && buildcfg.GOAMD64 <  3 => (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
   105  (BitLen8  x) && buildcfg.GOAMD64 <  3 => (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
   106  (BitLen64 <t> x)        && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-64] (LZCNTQ x)))
   107  // Use 64-bit version to allow const-fold remove unnecessary arithmetic.
   108  (BitLen32 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-32] (LZCNTL x)))
   109  (BitLen16 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVWQZX <x.Type> x))))
   110  (BitLen8 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVBQZX <x.Type> x))))
   111  
   112  (Bswap(64|32) ...) => (BSWAP(Q|L) ...)
   113  (Bswap16 x) => (ROLWconst [8] x)
   114  
   115  (PopCount(64|32) ...) => (POPCNT(Q|L) ...)
   116  (PopCount16 x) => (POPCNTL (MOVWQZX <typ.UInt32> x))
   117  (PopCount8 x) => (POPCNTL (MOVBQZX <typ.UInt32> x))
   118  
   119  (Sqrt ...) => (SQRTSD ...)
   120  (Sqrt32 ...) => (SQRTSS ...)
   121  
   122  (RoundToEven x) => (ROUNDSD [0] x)
   123  (Floor x)       => (ROUNDSD [1] x)
   124  (Ceil x)        => (ROUNDSD [2] x)
   125  (Trunc x)       => (ROUNDSD [3] x)
   126  
   127  (FMA x y z) => (VFMADD231SD z x y)
   128  
   129  // Lowering extension
   130  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
   131  (SignExt8to16  ...) => (MOVBQSX ...)
   132  (SignExt8to32  ...) => (MOVBQSX ...)
   133  (SignExt8to64  ...) => (MOVBQSX ...)
   134  (SignExt16to32 ...) => (MOVWQSX ...)
   135  (SignExt16to64 ...) => (MOVWQSX ...)
   136  (SignExt32to64 ...) => (MOVLQSX ...)
   137  
   138  (ZeroExt8to16  ...) => (MOVBQZX ...)
   139  (ZeroExt8to32  ...) => (MOVBQZX ...)
   140  (ZeroExt8to64  ...) => (MOVBQZX ...)
   141  (ZeroExt16to32 ...) => (MOVWQZX ...)
   142  (ZeroExt16to64 ...) => (MOVWQZX ...)
   143  (ZeroExt32to64 ...) => (MOVLQZX ...)
   144  
   145  (Slicemask <t> x) => (SARQconst (NEGQ <t> x) [63])
   146  
   147  (SpectreIndex <t> x y) => (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
   148  (SpectreSliceIndex <t> x y) => (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
   149  
   150  // Lowering truncation
   151  // Because we ignore high parts of registers, truncates are just copies.
   152  (Trunc16to8  ...) => (Copy ...)
   153  (Trunc32to8  ...) => (Copy ...)
   154  (Trunc32to16 ...) => (Copy ...)
   155  (Trunc64to8  ...) => (Copy ...)
   156  (Trunc64to16 ...) => (Copy ...)
   157  (Trunc64to32 ...) => (Copy ...)
   158  
   159  // Lowering float <-> int
   160  (Cvt32to32F ...) => (CVTSL2SS ...)
   161  (Cvt32to64F ...) => (CVTSL2SD ...)
   162  (Cvt64to32F ...) => (CVTSQ2SS ...)
   163  (Cvt64to64F ...) => (CVTSQ2SD ...)
   164  
   165  (Cvt32Fto32 ...) => (CVTTSS2SL ...)
   166  (Cvt32Fto64 ...) => (CVTTSS2SQ ...)
   167  (Cvt64Fto32 ...) => (CVTTSD2SL ...)
   168  (Cvt64Fto64 ...) => (CVTTSD2SQ ...)
   169  
   170  (Cvt32Fto64F ...) => (CVTSS2SD ...)
   171  (Cvt64Fto32F ...) => (CVTSD2SS ...)
   172  
   173  (Round(32|64)F ...) => (LoweredRound(32|64)F ...)
   174  
   175  // Floating-point min is tricky, as the hardware op isn't right for various special
   176  // cases (-0 and NaN). We use two hardware ops organized just right to make the
   177  // result come out how we want it. See https://github.com/golang/go/issues/59488#issuecomment-1553493207
   178  // (although that comment isn't exactly right, as the value overwritten is not simulated correctly).
   179  //    t1 = MINSD x, y   => incorrect if x==NaN or x==-0,y==+0
   180  //    t2 = MINSD t1, x  => fixes x==NaN case
   181  //   res = POR t1, t2   => fixes x==-0,y==+0 case
   182  // Note that this trick depends on the special property that (NaN OR x) produces a NaN (although
   183  // it might not produce the same NaN as the input).
   184  (Min(64|32)F <t> x y) => (POR (MINS(D|S) <t> (MINS(D|S) <t> x y) x) (MINS(D|S) <t> x y))
   185  // Floating-point max is even trickier. Punt to using min instead.
   186  // max(x,y) == -min(-x,-y)
   187  (Max(64|32)F <t> x y) => (Neg(64|32)F <t> (Min(64|32)F <t> (Neg(64|32)F <t> x) (Neg(64|32)F <t> y)))
   188  
   189  (CvtBoolToUint8 ...) => (Copy ...)
   190  
   191  // Lowering shifts
   192  // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
   193  //   result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
   194  (Lsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
   195  (Lsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
   196  (Lsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
   197  (Lsh8x(64|32|16|8)  <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
   198  
   199  (Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLQ x y)
   200  (Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
   201  (Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
   202  (Lsh8x(64|32|16|8)  x y) && shiftIsBounded(v) => (SHLL x y)
   203  
   204  (Rsh64Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
   205  (Rsh32Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
   206  (Rsh16Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [16])))
   207  (Rsh8Ux(64|32|16|8)  <t> x y) && !shiftIsBounded(v) => (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [8])))
   208  
   209  (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRQ x y)
   210  (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRL x y)
   211  (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRW x y)
   212  (Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SHRB x y)
   213  
   214  // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
   215  // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
   216  (Rsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARQ <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [64])))))
   217  (Rsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARL <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [32])))))
   218  (Rsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARW <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [16])))))
   219  (Rsh8x(64|32|16|8)  <t> x y) && !shiftIsBounded(v) => (SARB <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [8])))))
   220  
   221  (Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SARQ x y)
   222  (Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SARL x y)
   223  (Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SARW x y)
   224  (Rsh8x(64|32|16|8) x y)  && shiftIsBounded(v) => (SARB x y)
   225  
   226  // Lowering integer comparisons
   227  (Less(64|32|16|8)      x y) => (SETL  (CMP(Q|L|W|B)     x y))
   228  (Less(64|32|16|8)U     x y) => (SETB  (CMP(Q|L|W|B)     x y))
   229  (Leq(64|32|16|8)       x y) => (SETLE (CMP(Q|L|W|B)     x y))
   230  (Leq(64|32|16|8)U      x y) => (SETBE (CMP(Q|L|W|B)     x y))
   231  (Eq(Ptr|64|32|16|8|B)  x y) => (SETEQ (CMP(Q|Q|L|W|B|B) x y))
   232  (Neq(Ptr|64|32|16|8|B) x y) => (SETNE (CMP(Q|Q|L|W|B|B) x y))
   233  
   234  // Lowering floating point comparisons
   235  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   236  // and the operands are reversed when generating assembly language.
   237  (Eq(32|64)F   x y) => (SETEQF (UCOMIS(S|D) x y))
   238  (Neq(32|64)F  x y) => (SETNEF (UCOMIS(S|D) x y))
   239  // Use SETGF/SETGEF with reversed operands to dodge NaN case.
   240  (Less(32|64)F x y) => (SETGF  (UCOMIS(S|D) y x))
   241  (Leq(32|64)F  x y) => (SETGEF (UCOMIS(S|D) y x))
   242  
   243  // Lowering loads
   244  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVQload ptr mem)
   245  (Load <t> ptr mem) && is32BitInt(t) => (MOVLload ptr mem)
   246  (Load <t> ptr mem) && is16BitInt(t) => (MOVWload ptr mem)
   247  (Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem)
   248  (Load <t> ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem)
   249  (Load <t> ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem)
   250  
   251  // Lowering stores
   252  (Store {t} ptr val mem) && t.Size() == 8 &&  t.IsFloat() => (MOVSDstore ptr val mem)
   253  (Store {t} ptr val mem) && t.Size() == 4 &&  t.IsFloat() => (MOVSSstore ptr val mem)
   254  (Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVQstore ptr val mem)
   255  (Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVLstore ptr val mem)
   256  (Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem)
   257  (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
   258  
   259  // Lowering moves
   260  (Move [0] _ _ mem) => mem
   261  (Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
   262  (Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
   263  (Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem)
   264  (Move [8] dst src mem) => (MOVQstore dst (MOVQload src mem) mem)
   265  (Move [16] dst src mem) => (MOVOstore dst (MOVOload src mem) mem)
   266  
   267  (Move [3] dst src mem) =>
   268  	(MOVBstore [2] dst (MOVBload [2] src mem)
   269  		(MOVWstore dst (MOVWload src mem) mem))
   270  (Move [5] dst src mem) =>
   271  	(MOVBstore [4] dst (MOVBload [4] src mem)
   272  		(MOVLstore dst (MOVLload src mem) mem))
   273  (Move [6] dst src mem) =>
   274  	(MOVWstore [4] dst (MOVWload [4] src mem)
   275  		(MOVLstore dst (MOVLload src mem) mem))
   276  (Move [7] dst src mem) =>
   277  	(MOVLstore [3] dst (MOVLload [3] src mem)
   278  		(MOVLstore dst (MOVLload src mem) mem))
   279  (Move [9] dst src mem) =>
   280  	(MOVBstore [8] dst (MOVBload [8] src mem)
   281  		(MOVQstore dst (MOVQload src mem) mem))
   282  (Move [10] dst src mem) =>
   283  	(MOVWstore [8] dst (MOVWload [8] src mem)
   284  		(MOVQstore dst (MOVQload src mem) mem))
   285  (Move [11] dst src mem) =>
   286  	(MOVLstore [7] dst (MOVLload [7] src mem)
   287  		(MOVQstore dst (MOVQload src mem) mem))
   288  (Move [12] dst src mem) =>
   289  	(MOVLstore [8] dst (MOVLload [8] src mem)
   290  		(MOVQstore dst (MOVQload src mem) mem))
   291  (Move [s] dst src mem) && s >= 13 && s <= 15 =>
   292  	(MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem)
   293  		(MOVQstore dst (MOVQload src mem) mem))
   294  
   295  // Copying up to 192 bytes uses straightline code.
   296  (Move [s] dst src mem) && s > 16 && s < 192 && logLargeCopy(v, s) => (LoweredMove [s] dst src mem)
   297  
   298  // Copying up to ~1KB uses a small loop.
   299  (Move [s] dst src mem) && s >= 192 && s <= repMoveThreshold && logLargeCopy(v, s) => (LoweredMoveLoop [s] dst src mem)
   300  
   301  // Large copying uses REP MOVSQ.
   302  (Move [s] dst src mem) && s > repMoveThreshold && s%8 != 0 =>
   303  	(Move [s-s%8]
   304  		(OffPtr <dst.Type> dst [s%8])
   305  		(OffPtr <src.Type> src [s%8])
   306  		(MOVQstore dst (MOVQload src mem) mem))
   307  (Move [s] dst src mem) && s > repMoveThreshold && s%8 == 0 && logLargeCopy(v, s) =>
   308  	(REPMOVSQ dst src (MOVQconst [s/8]) mem)
   309  
   310  // Lowering Zero instructions
   311  (Zero [0] _ mem) => mem
   312  (Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff(0,0)] destptr mem)
   313  (Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)
   314  (Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)
   315  (Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)
   316  
   317  (Zero [3] destptr mem) =>
   318  	(MOVBstoreconst [makeValAndOff(0,2)] destptr
   319  		(MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
   320  (Zero [5] destptr mem) =>
   321  	(MOVBstoreconst [makeValAndOff(0,4)] destptr
   322  		(MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
   323  (Zero [6] destptr mem) =>
   324  	(MOVWstoreconst [makeValAndOff(0,4)] destptr
   325  		(MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
   326  (Zero [7] destptr mem) =>
   327  	(MOVLstoreconst [makeValAndOff(0,3)] destptr
   328  		(MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
   329  
   330  // Zero small numbers of words directly.
   331  (Zero [9] destptr mem) =>
   332  	(MOVBstoreconst [makeValAndOff(0,8)] destptr
   333  		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
   334  
   335  (Zero [10] destptr mem) =>
   336  	(MOVWstoreconst [makeValAndOff(0,8)] destptr
   337  		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
   338  
   339  (Zero [11] destptr mem) =>
   340  	(MOVLstoreconst [makeValAndOff(0,7)] destptr
   341  		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
   342  
   343  (Zero [12] destptr mem) =>
   344  	(MOVLstoreconst [makeValAndOff(0,8)] destptr
   345  		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
   346  
   347  (Zero [s] destptr mem) && s > 12 && s < 16 =>
   348  	(MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr
   349  		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
   350  
   351  // Zeroing up to 192 bytes uses straightline code.
   352  (Zero [s] destptr mem)	&& s >= 16 && s < 192 => (LoweredZero [s] destptr mem)
   353  
   354  // Zeroing up to ~1KB uses a small loop.
   355  (Zero [s] destptr mem)	&& s >= 192 && s <= repZeroThreshold => (LoweredZeroLoop [s] destptr mem)
   356  
   357  // Large zeroing uses REP STOSQ.
   358  (Zero [s] destptr mem) && s > repZeroThreshold && s%8 != 0 =>
   359         (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
   360                 (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
   361  (Zero [s] destptr mem) && s > repZeroThreshold && s%8 == 0 =>
   362  	(REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
   363  
   364  // Lowering constants
   365  (Const8   [c]) => (MOVLconst [int32(c)])
   366  (Const16  [c]) => (MOVLconst [int32(c)])
   367  (Const32  ...) => (MOVLconst ...)
   368  (Const64  ...) => (MOVQconst ...)
   369  (Const32F ...) => (MOVSSconst ...)
   370  (Const64F ...) => (MOVSDconst ...)
   371  (ConstNil    ) => (MOVQconst [0])
   372  (ConstBool [c]) => (MOVLconst [b2i32(c)])
   373  
   374  // Lowering calls
   375  (StaticCall ...) => (CALLstatic ...)
   376  (ClosureCall ...) => (CALLclosure ...)
   377  (InterCall ...) => (CALLinter ...)
   378  (TailCall ...) => (CALLtail ...)
   379  
   380  // Lowering conditional moves
   381  // If the condition is a SETxx, we can just run a CMOV from the comparison that was
   382  // setting the flags.
   383  // Legend: HI=unsigned ABOVE, CS=unsigned BELOW, CC=unsigned ABOVE EQUAL, LS=unsigned BELOW EQUAL
   384  (CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && (is64BitInt(t) || isPtr(t))
   385      => (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
   386  (CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is32BitInt(t)
   387      => (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
   388  (CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is16BitInt(t)
   389      => (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
   390  
   391  // If the condition does not set the flags, we need to generate a comparison.
   392  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 1
   393      => (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
   394  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 2
   395      => (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
   396  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 4
   397      => (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
   398  
   399  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
   400      => (CMOVQNE y x (CMPQconst [0] check))
   401  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
   402      => (CMOVLNE y x (CMPQconst [0] check))
   403  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
   404      => (CMOVWNE y x (CMPQconst [0] check))
   405  
   406  // Absorb InvertFlags
   407  (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
   408      => (CMOVQ(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
   409  (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
   410      => (CMOVL(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
   411  (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
   412      => (CMOVW(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
   413  
   414  // Absorb constants generated during lower
   415  (CMOV(QEQ|QLE|QGE|QCC|QLS|LEQ|LLE|LGE|LCC|LLS|WEQ|WLE|WGE|WCC|WLS) _ x (FlagEQ)) => x
   416  (CMOV(QNE|QLT|QGT|QCS|QHI|LNE|LLT|LGT|LCS|LHI|WNE|WLT|WGT|WCS|WHI) y _ (FlagEQ)) => y
   417  (CMOV(QNE|QGT|QGE|QHI|QCC|LNE|LGT|LGE|LHI|LCC|WNE|WGT|WGE|WHI|WCC) _ x (FlagGT_UGT)) => x
   418  (CMOV(QEQ|QLE|QLT|QLS|QCS|LEQ|LLE|LLT|LLS|LCS|WEQ|WLE|WLT|WLS|WCS) y _ (FlagGT_UGT)) => y
   419  (CMOV(QNE|QGT|QGE|QLS|QCS|LNE|LGT|LGE|LLS|LCS|WNE|WGT|WGE|WLS|WCS) _ x (FlagGT_ULT)) => x
   420  (CMOV(QEQ|QLE|QLT|QHI|QCC|LEQ|LLE|LLT|LHI|LCC|WEQ|WLE|WLT|WHI|WCC) y _ (FlagGT_ULT)) => y
   421  (CMOV(QNE|QLT|QLE|QCS|QLS|LNE|LLT|LLE|LCS|LLS|WNE|WLT|WLE|WCS|WLS) _ x (FlagLT_ULT)) => x
   422  (CMOV(QEQ|QGT|QGE|QHI|QCC|LEQ|LGT|LGE|LHI|LCC|WEQ|WGT|WGE|WHI|WCC) y _ (FlagLT_ULT)) => y
   423  (CMOV(QNE|QLT|QLE|QHI|QCC|LNE|LLT|LLE|LHI|LCC|WNE|WLT|WLE|WHI|WCC) _ x (FlagLT_UGT)) => x
   424  (CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) => y
   425  
   426  // Miscellaneous
   427  (IsNonNil p) => (SETNE (TESTQ p p))
   428  (IsInBounds idx len) => (SETB (CMPQ idx len))
   429  (IsSliceInBounds idx len) => (SETBE (CMPQ idx len))
   430  (NilCheck ...) => (LoweredNilCheck ...)
   431  (GetG mem) && v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
   432  (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
   433  (GetCallerPC ...) => (LoweredGetCallerPC ...)
   434  (GetCallerSP ...) => (LoweredGetCallerSP ...)
   435  
   436  (HasCPUFeature {s}) => (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s})))
   437  (Addr {sym} base) => (LEAQ {sym} base)
   438  (LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (LEAQ {sym} (SPanchored base mem))
   439  (LocalAddr <t> {sym} base _)  && !t.Elem().HasPointers() => (LEAQ {sym} base)
   440  
   441  (MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 => (SETLstore [off] {sym} ptr x mem)
   442  (MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 => (SETLEstore [off] {sym} ptr x mem)
   443  (MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 => (SETGstore [off] {sym} ptr x mem)
   444  (MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 => (SETGEstore [off] {sym} ptr x mem)
   445  (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 => (SETEQstore [off] {sym} ptr x mem)
   446  (MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 => (SETNEstore [off] {sym} ptr x mem)
   447  (MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 => (SETBstore [off] {sym} ptr x mem)
   448  (MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 => (SETBEstore [off] {sym} ptr x mem)
   449  (MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 => (SETAstore [off] {sym} ptr x mem)
   450  (MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 => (SETAEstore [off] {sym} ptr x mem)
   451  
   452  // block rewrites
   453  (If (SETL  cmp) yes no) => (LT  cmp yes no)
   454  (If (SETLE cmp) yes no) => (LE  cmp yes no)
   455  (If (SETG  cmp) yes no) => (GT  cmp yes no)
   456  (If (SETGE cmp) yes no) => (GE  cmp yes no)
   457  (If (SETEQ cmp) yes no) => (EQ  cmp yes no)
   458  (If (SETNE cmp) yes no) => (NE  cmp yes no)
   459  (If (SETB  cmp) yes no) => (ULT cmp yes no)
   460  (If (SETBE cmp) yes no) => (ULE cmp yes no)
   461  (If (SETA  cmp) yes no) => (UGT cmp yes no)
   462  (If (SETAE cmp) yes no) => (UGE cmp yes no)
   463  (If (SETO cmp) yes no) => (OS cmp yes no)
   464  
   465  // Special case for floating point - LF/LEF not generated
   466  (If (SETGF  cmp) yes no) => (UGT  cmp yes no)
   467  (If (SETGEF cmp) yes no) => (UGE  cmp yes no)
   468  (If (SETEQF cmp) yes no) => (EQF  cmp yes no)
   469  (If (SETNEF cmp) yes no) => (NEF  cmp yes no)
   470  
   471  (If cond yes no) => (NE (TESTB cond cond) yes no)
   472  
   473  (JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (LEAQ <typ.Uintptr> {makeJumpTableSym(b)} (SB)))
   474  
   475  // Atomic loads.  Other than preserving their ordering with respect to other loads, nothing special here.
   476  (AtomicLoad8 ptr mem) => (MOVBatomicload ptr mem)
   477  (AtomicLoad32 ptr mem) => (MOVLatomicload ptr mem)
   478  (AtomicLoad64 ptr mem) => (MOVQatomicload ptr mem)
   479  (AtomicLoadPtr ptr mem) => (MOVQatomicload ptr mem)
   480  
   481  // Atomic stores.  We use XCHG to prevent the hardware reordering a subsequent load.
   482  // TODO: most runtime uses of atomic stores don't need that property.  Use normal stores for those?
   483  (AtomicStore8 ptr val mem) => (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
   484  (AtomicStore32 ptr val mem) => (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
   485  (AtomicStore64 ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
   486  (AtomicStorePtrNoWB ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
   487  
   488  // Atomic exchanges.
   489  (AtomicExchange8 ptr val mem) => (XCHGB val ptr mem)
   490  (AtomicExchange32 ptr val mem) => (XCHGL val ptr mem)
   491  (AtomicExchange64 ptr val mem) => (XCHGQ val ptr mem)
   492  
   493  // Atomic adds.
   494  (AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (XADDLlock val ptr mem))
   495  (AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (XADDQlock val ptr mem))
   496  (Select0 <t> (AddTupleFirst32 val tuple)) => (ADDL val (Select0 <t> tuple))
   497  (Select1     (AddTupleFirst32   _ tuple)) => (Select1 tuple)
   498  (Select0 <t> (AddTupleFirst64 val tuple)) => (ADDQ val (Select0 <t> tuple))
   499  (Select1     (AddTupleFirst64   _ tuple)) => (Select1 tuple)
   500  
   501  // Atomic compare and swap.
   502  (AtomicCompareAndSwap32 ptr old new_ mem) => (CMPXCHGLlock ptr old new_ mem)
   503  (AtomicCompareAndSwap64 ptr old new_ mem) => (CMPXCHGQlock ptr old new_ mem)
   504  
   505  // Atomic memory logical operations (old style).
   506  (AtomicAnd8  ptr val mem) => (ANDBlock ptr val mem)
   507  (AtomicAnd32 ptr val mem) => (ANDLlock ptr val mem)
   508  (AtomicOr8   ptr val mem) => (ORBlock  ptr val mem)
   509  (AtomicOr32  ptr val mem) => (ORLlock  ptr val mem)
   510  
   511  // Atomic memory logical operations (new style).
   512  (Atomic(And64|And32|Or64|Or32)value ptr val mem) => (LoweredAtomic(And64|And32|Or64|Or32) ptr val mem)
   513  
   514  // Write barrier.
   515  (WB ...) => (LoweredWB ...)
   516  
   517  (PanicBounds ...) => (LoweredPanicBoundsRR ...)
   518  (LoweredPanicBoundsRR [kind] x (MOVQconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
   519  (LoweredPanicBoundsRR [kind] (MOVQconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
   520  (LoweredPanicBoundsRC [kind] {p} (MOVQconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
   521  (LoweredPanicBoundsCR [kind] {p} (MOVQconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
   522  
   523  // lowering rotates
   524  (RotateLeft8  ...) => (ROLB ...)
   525  (RotateLeft16 ...) => (ROLW ...)
   526  (RotateLeft32 ...) => (ROLL ...)
   527  (RotateLeft64 ...) => (ROLQ ...)
   528  
   529  // ***************************
   530  // Above: lowering rules
   531  // Below: optimizations
   532  // ***************************
   533  // TODO: Should the optimizations be a separate pass?
   534  
   535  // Fold boolean tests into blocks
   536  (NE (TESTB (SETL  cmp) (SETL  cmp)) yes no) => (LT  cmp yes no)
   537  (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE  cmp yes no)
   538  (NE (TESTB (SETG  cmp) (SETG  cmp)) yes no) => (GT  cmp yes no)
   539  (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE  cmp yes no)
   540  (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ  cmp yes no)
   541  (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE  cmp yes no)
   542  (NE (TESTB (SETB  cmp) (SETB  cmp)) yes no) => (ULT cmp yes no)
   543  (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no)
   544  (NE (TESTB (SETA  cmp) (SETA  cmp)) yes no) => (UGT cmp yes no)
   545  (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no)
   546  (NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no)
   547  
   548  // Unsigned comparisons to 0/1
   549  (ULT (TEST(Q|L|W|B) x x) yes no) => (First no yes)
   550  (UGE (TEST(Q|L|W|B) x x) yes no) => (First yes no)
   551  (SETB (TEST(Q|L|W|B) x x)) => (ConstBool [false])
   552  (SETAE (TEST(Q|L|W|B) x x)) => (ConstBool [true])
   553  
   554  // x & 1 != 0 -> x & 1
   555  (SETNE (TEST(B|W)const [1] x)) => (AND(L|L)const [1] x)
   556  (SETB (BT(L|Q)const [0] x)) => (AND(L|Q)const [1] x)
   557  // x & 1 == 0 -> (x & 1) ^ 1
   558  (SETAE (BT(L|Q)const [0] x)) => (XORLconst [1] (ANDLconst <typ.Bool> [1] x))
   559  
   560  // Shorten compare by rewriting x < 128 as x <= 127, which can be encoded in a single-byte immediate on x86.
   561  (SETL c:(CMP(Q|L)const [128] x)) && c.Uses == 1 => (SETLE (CMP(Q|L)const [127] x))
   562  (SETB c:(CMP(Q|L)const [128] x)) && c.Uses == 1 => (SETBE (CMP(Q|L)const [127] x))
   563  
   564  // x >= 128 -> x > 127
   565  (SETGE c:(CMP(Q|L)const [128] x)) && c.Uses == 1 => (SETG (CMP(Q|L)const [127] x))
   566  (SETAE c:(CMP(Q|L)const [128] x)) && c.Uses == 1 => (SETA (CMP(Q|L)const [127] x))
   567  
   568  (CMOVQLT x y c:(CMP(Q|L)const [128] z)) && c.Uses == 1 => (CMOVQLE x y (CMP(Q|L)const [127] z))
   569  (CMOVLLT x y c:(CMP(Q|L)const [128] z)) && c.Uses == 1 => (CMOVLLE x y (CMP(Q|L)const [127] z))
   570  (LT          c:(CMP(Q|L)const [128] z) yes no) && c.Uses == 1 => (LE (CMP(Q|L)const [127] z) yes no)
   571  (CMOVQGE x y c:(CMP(Q|L)const [128] z)) && c.Uses == 1 => (CMOVQGT x y (CMP(Q|L)const [127] z))
   572  (CMOVLGE x y c:(CMP(Q|L)const [128] z)) && c.Uses == 1 => (CMOVLGT x y (CMP(Q|L)const [127] z))
   573  (GE          c:(CMP(Q|L)const [128] z) yes no) && c.Uses == 1 => (GT (CMP(Q|L)const [127] z)  yes no)
   574  
   575  // Recognize bit tests: a&(1<<b) != 0 for b suitably bounded
   576  // Note that BTx instructions use the carry bit, so we need to convert tests for zero flag
   577  // into tests for carry flags.
   578  // ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis
   579  // mutandis, for UGE and SETAE, and CC and SETCC.
   580  ((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y))
   581  ((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y))
   582  ((NE|EQ) (TESTLconst [c] x)) && isUnsignedPowerOfTwo(uint32(c))
   583      => ((ULT|UGE) (BTLconst [int8(log32u(uint32(c)))] x))
   584  ((NE|EQ) (TESTQconst [c] x)) && isUnsignedPowerOfTwo(uint64(c))
   585      => ((ULT|UGE) (BTQconst [int8(log32u(uint32(c)))] x))
   586  ((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUnsignedPowerOfTwo(uint64(c))
   587      => ((ULT|UGE) (BTQconst [int8(log64u(uint64(c)))] x))
   588  (SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE)  (BTL x y))
   589  (SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE)  (BTQ x y))
   590  (SET(NE|EQ) (TESTLconst [c] x)) && isUnsignedPowerOfTwo(uint32(c))
   591      => (SET(B|AE)  (BTLconst [int8(log32u(uint32(c)))] x))
   592  (SET(NE|EQ) (TESTQconst [c] x)) && isUnsignedPowerOfTwo(uint64(c))
   593      => (SET(B|AE)  (BTQconst [int8(log32u(uint32(c)))] x))
   594  (SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUnsignedPowerOfTwo(uint64(c))
   595      => (SET(B|AE)  (BTQconst [int8(log64u(uint64(c)))] x))
   596  // SET..store variant
   597  (SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
   598      => (SET(B|AE)store  [off] {sym} ptr (BTL x y) mem)
   599  (SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
   600      => (SET(B|AE)store  [off] {sym} ptr (BTQ x y) mem)
   601  (SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUnsignedPowerOfTwo(uint32(c))
   602      => (SET(B|AE)store  [off] {sym} ptr (BTLconst [int8(log32u(uint32(c)))] x) mem)
   603  (SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUnsignedPowerOfTwo(uint64(c))
   604      => (SET(B|AE)store  [off] {sym} ptr (BTQconst [int8(log32u(uint32(c)))] x) mem)
   605  (SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUnsignedPowerOfTwo(uint64(c))
   606      => (SET(B|AE)store  [off] {sym} ptr (BTQconst [int8(log64u(uint64(c)))] x) mem)
   607  
   608  // Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
   609  // and further combining shifts.
   610  (BT(Q|L)const [c] (SHRQconst [d] x)) && (c+d)<64 => (BTQconst [c+d] x)
   611  (BT(Q|L)const [c] (ADDQ x x)) && c>1  => (BT(Q|L)const [c-1] x)
   612  (BT(Q|L)const [c] (SHLQconst [d] x)) && c>d      => (BT(Q|L)const [c-d] x)
   613  (BT(Q|L)const [0] s:(SHRQ x y)) => (BTQ y x)
   614  (BTLconst [c] (SHRLconst [d] x)) && (c+d)<32 => (BTLconst [c+d] x)
   615  (BTLconst [c] (ADDL x x)) && c>1 => (BTLconst [c-1] x)
   616  (BTLconst [c] (SHLLconst [d] x)) && c>d      => (BTLconst [c-d] x)
   617  (BTLconst [0] s:(SHR(L|XL) x y)) => (BTL y x)
   618  
   619  // Rewrite a & 1 != 1 into a & 1 == 0.
   620  // Among other things, this lets us turn (a>>b)&1 != 1 into a bit test.
   621  (SET(NE|EQ) (CMPLconst [1] s:(ANDLconst [1] _))) => (SET(EQ|NE) (CMPLconst [0] s))
   622  (SET(NE|EQ)store [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPLconst [0] s) mem)
   623  (SET(NE|EQ) (CMPQconst [1] s:(ANDQconst [1] _))) => (SET(EQ|NE) (CMPQconst [0] s))
   624  (SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem)
   625  
   626  // Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
   627  (OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y)
   628  (XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
   629  // Note: only convert OR/XOR to BTS/BTC if the constant wouldn't fit in
   630  // the constant field of the OR/XOR instruction. See issue 61694.
   631  ((OR|XOR)Q (MOVQconst [c]) x) && isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31 => (BT(S|C)Qconst [int8(log64u(uint64(c)))] x)
   632  
   633  // Recognize bit clearing: a &^= 1<<b
   634  (AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y)
   635  (ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
   636  // Note: only convert AND to BTR if the constant wouldn't fit in
   637  // the constant field of the AND instruction. See issue 61694.
   638  (ANDQ (MOVQconst [c]) x) && isUnsignedPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31 => (BTRQconst [int8(log64u(uint64(^c)))] x)
   639  
   640  // Special-case bit patterns on first/last bit.
   641  // generic.rules changes ANDs of high-part/low-part masks into a couple of shifts,
   642  // for instance:
   643  //    x & 0xFFFF0000 -> (x >> 16) << 16
   644  //    x & 0x80000000 -> (x >> 31) << 31
   645  //
   646  // In case the mask is just one bit (like second example above), it conflicts
   647  // with the above rules to detect bit-testing / bit-clearing of first/last bit.
   648  // We thus special-case them, by detecting the shift patterns.
   649  
   650  // Special case resetting first/last bit
   651  (ADD(L|Q) (SHR(L|Q)const [1] x) (SHR(L|Q)const [1] x))
   652  	=> (AND(L|Q)const [-2] x)
   653  (SHRLconst [1] (ADDL x x))
   654  	=> (ANDLconst [0x7fffffff] x)
   655  (SHRQconst [1] (ADDQ x x))
   656  	=> (BTRQconst [63] x)
   657  
   658  // Special case testing first/last bit (with double-shift generated by generic.rules)
   659  ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2
   660      => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
   661  ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2
   662      => ((SETB|SETAE|ULT|UGE) (BTQconst [31] x))
   663  (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2
   664      => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
   665  (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2
   666      => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
   667  
   668  ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2
   669      => ((SETB|SETAE|ULT|UGE)  (BTQconst [0] x))
   670  ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2
   671      => ((SETB|SETAE|ULT|UGE)  (BTLconst [0] x))
   672  (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2
   673      => (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem)
   674  (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2
   675      => (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem)
   676  
   677  // Special-case manually testing last bit with "a>>63 != 0" (without "&1")
   678  ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2
   679      => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
   680  ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2
   681      => ((SETB|SETAE|ULT|UGE) (BTLconst [31] x))
   682  (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2
   683      => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
   684  (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2
   685      => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
   686  
   687  // Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)
   688  (BTSQconst [c] (BTRQconst [c] x)) => (BTSQconst [c] x)
   689  (BTSQconst [c] (BTCQconst [c] x)) => (BTSQconst [c] x)
   690  (BTRQconst [c] (BTSQconst [c] x)) => (BTRQconst [c] x)
   691  (BTRQconst [c] (BTCQconst [c] x)) => (BTRQconst [c] x)
   692  
   693  // Fold boolean negation into SETcc.
   694  (XORLconst [1] (SETNE x)) => (SETEQ x)
   695  (XORLconst [1] (SETEQ x)) => (SETNE x)
   696  (XORLconst [1] (SETL  x)) => (SETGE x)
   697  (XORLconst [1] (SETGE x)) => (SETL  x)
   698  (XORLconst [1] (SETLE x)) => (SETG  x)
   699  (XORLconst [1] (SETG  x)) => (SETLE x)
   700  (XORLconst [1] (SETB  x)) => (SETAE x)
   701  (XORLconst [1] (SETAE x)) => (SETB  x)
   702  (XORLconst [1] (SETBE x)) => (SETA  x)
   703  (XORLconst [1] (SETA  x)) => (SETBE x)
   704  
   705  // Special case for floating point - LF/LEF not generated
   706  (NE (TESTB (SETGF  cmp) (SETGF  cmp)) yes no) => (UGT  cmp yes no)
   707  (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE  cmp yes no)
   708  (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF  cmp yes no)
   709  (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF  cmp yes no)
   710  
   711  // Disabled because it interferes with the pattern match above and makes worse code.
   712  // (SETNEF x) => (ORQ (SETNE <typ.Int8> x) (SETNAN <typ.Int8> x))
   713  // (SETEQF x) => (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x))
   714  
   715  // fold constants into instructions
   716  (ADDQ x (MOVQconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDQconst [int32(c)] x)
   717  (ADDQ x (MOVLconst [c])) => (ADDQconst [c] x)
   718  (ADDL x (MOVLconst [c])) => (ADDLconst [c] x)
   719  
   720  (SUBQ x (MOVQconst [c])) && is32Bit(c) => (SUBQconst x [int32(c)])
   721  (SUBQ (MOVQconst [c]) x) && is32Bit(c) => (NEGQ (SUBQconst <v.Type> x [int32(c)]))
   722  (SUBL x (MOVLconst [c])) => (SUBLconst x [c])
   723  (SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst <v.Type> x [c]))
   724  
   725  (MULQ x (MOVQconst [c])) && is32Bit(c) => (MULQconst [int32(c)] x)
   726  (MULL x (MOVLconst [c])) => (MULLconst [c] x)
   727  
   728  (ANDQ x (MOVQconst [c])) && is32Bit(c) => (ANDQconst [int32(c)] x)
   729  (ANDL x (MOVLconst [c])) => (ANDLconst [c] x)
   730  
   731  (AND(L|Q)const [c] (AND(L|Q)const [d] x)) => (AND(L|Q)const [c & d] x)
   732  (XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) => (XOR(L|Q)const [c ^ d] x)
   733  (OR(L|Q)const  [c] (OR(L|Q)const  [d] x)) => (OR(L|Q)const  [c | d] x)
   734  
   735  (MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x)
   736  (MULQconst [c] (MULQconst [d] x)) && is32Bit(int64(c)*int64(d)) => (MULQconst [c * d] x)
   737  
   738  (ORQ x (MOVQconst [c])) && is32Bit(c) => (ORQconst [int32(c)] x)
   739  (ORQ x (MOVLconst [c])) => (ORQconst [c] x)
   740  (ORL x (MOVLconst [c])) => (ORLconst [c] x)
   741  
   742  (XORQ x (MOVQconst [c])) && is32Bit(c) => (XORQconst [int32(c)] x)
   743  (XORL x (MOVLconst [c])) => (XORLconst [c] x)
   744  
   745  (SHLQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x)
   746  (SHLL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x)
   747  
   748  (SHRQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x)
   749  (SHRL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x)
   750  (SHRW x (MOV(Q|L)const [c])) && c&31 < 16 => (SHRWconst [int8(c&31)] x)
   751  (SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 => (MOVLconst [0])
   752  (SHRB x (MOV(Q|L)const [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x)
   753  (SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 => (MOVLconst [0])
   754  
   755  (SARQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x)
   756  (SARL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x)
   757  (SARW x (MOV(Q|L)const [c])) => (SARWconst [int8(min(int64(c)&31,15))] x)
   758  (SARB x (MOV(Q|L)const [c])) => (SARBconst [int8(min(int64(c)&31,7))] x)
   759  
   760  // Operations which don't affect the low 6/5 bits of the shift amount are NOPs.
   761  ((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0  => ((SHLQ|SHRQ|SARQ) x y)
   762  ((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0  => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
   763  ((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
   764  ((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
   765  
   766  ((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0  => ((SHLL|SHRL|SARL) x y)
   767  ((SHLL|SHRL|SARL) x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0  => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
   768  ((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
   769  ((SHLL|SHRL|SARL) x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
   770  
   771  ((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0  => ((SHLQ|SHRQ|SARQ) x y)
   772  ((SHLQ|SHRQ|SARQ) x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0  => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
   773  ((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
   774  ((SHLQ|SHRQ|SARQ) x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
   775  
   776  ((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0  => ((SHLL|SHRL|SARL) x y)
   777  ((SHLL|SHRL|SARL) x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0  => ((SHLL|SHRL|SARL) x (NEGL <t> y))
   778  ((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
   779  ((SHLL|SHRL|SARL) x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGL <t> y))
   780  
   781  // rotate left negative = rotate right
   782  (ROLQ x (NEG(Q|L) y)) => (RORQ x y)
   783  (ROLL x (NEG(Q|L) y)) => (RORL x y)
   784  (ROLW x (NEG(Q|L) y)) => (RORW x y)
   785  (ROLB x (NEG(Q|L) y)) => (RORB x y)
   786  
   787  // rotate right negative = rotate left
   788  (RORQ x (NEG(Q|L) y)) => (ROLQ x y)
   789  (RORL x (NEG(Q|L) y)) => (ROLL x y)
   790  (RORW x (NEG(Q|L) y)) => (ROLW x y)
   791  (RORB x (NEG(Q|L) y)) => (ROLB x y)
   792  
   793  // rotate by constants
   794  (ROLQ x (MOV(Q|L)const [c])) => (ROLQconst [int8(c&63)] x)
   795  (ROLL x (MOV(Q|L)const [c])) => (ROLLconst [int8(c&31)] x)
   796  (ROLW x (MOV(Q|L)const [c])) => (ROLWconst [int8(c&15)] x)
   797  (ROLB x (MOV(Q|L)const [c])) => (ROLBconst [int8(c&7) ] x)
   798  
   799  (RORQ x (MOV(Q|L)const [c])) => (ROLQconst [int8((-c)&63)] x)
   800  (RORL x (MOV(Q|L)const [c])) => (ROLLconst [int8((-c)&31)] x)
   801  (RORW x (MOV(Q|L)const [c])) => (ROLWconst [int8((-c)&15)] x)
   802  (RORB x (MOV(Q|L)const [c])) => (ROLBconst [int8((-c)&7) ] x)
   803  
   804  // Constant shift simplifications
   805  ((SHLQ|SHRQ|SARQ)const      x [0]) => x
   806  ((SHLL|SHRL|SARL)const      x [0]) => x
   807  ((SHRW|SARW)const           x [0]) => x
   808  ((SHRB|SARB)const           x [0]) => x
   809  ((ROLQ|ROLL|ROLW|ROLB)const x [0]) => x
   810  
   811  // Multi-register shifts
   812  (ORQ (SH(R|L)Q lo bits) (SH(L|R)Q hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits)
   813  (ORQ (SH(R|L)XQ lo bits) (SH(L|R)XQ hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits)
   814  
   815  // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
   816  // because the x86 instructions are defined to use all 5 bits of the shift even
   817  // for the small shifts. I don't think we'll ever generate a weird shift (e.g.
   818  // (SHRW x (MOVLconst [24])), but just in case.
   819  
   820  (CMPQ x (MOVQconst [c])) && is32Bit(c) => (CMPQconst x [int32(c)])
   821  (CMPQ (MOVQconst [c]) x) && is32Bit(c) => (InvertFlags (CMPQconst x [int32(c)]))
   822  (CMPL x (MOVLconst [c])) => (CMPLconst x [c])
   823  (CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c]))
   824  (CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)])
   825  (CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)]))
   826  (CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)])
   827  (CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
   828  
   829  // Canonicalize the order of arguments to comparisons - helps with CSE.
   830  (CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x))
   831  
   832  // Using MOVZX instead of AND is cheaper.
   833  (AND(Q|L)const [  0xFF] x) => (MOVBQZX x)
   834  (AND(Q|L)const [0xFFFF] x) => (MOVWQZX x)
   835  // This rule is currently invalid because 0xFFFFFFFF is not representable by a signed int32.
   836  // Commenting out for now, because it also can't trigger because of the is32bit guard on the
   837  // ANDQconst lowering-rule, above, prevents 0xFFFFFFFF from matching (for the same reason)
   838  // Using an alternate form of this rule segfaults some binaries because of
   839  // adverse interactions with other passes.
   840  // (ANDQconst [0xFFFFFFFF] x) => (MOVLQZX x)
   841  
   842  // strength reduction
   843  (MUL(Q|L)const [ 0] _) => (MOV(Q|L)const [0])
   844  (MUL(Q|L)const [ 1] x) => x
   845  (MULQconst [c] x) && canMulStrengthReduce(config, int64(c)) => {mulStrengthReduce(v, x, int64(c))}
   846  (MULLconst [c] x) && v.Type.Size() <= 4 && canMulStrengthReduce32(config, c) => {mulStrengthReduce32(v, x, c)}
   847  
   848  // Prefer addition when shifting left by one
   849  (SHL(Q|L)const [1] x) => (ADD(Q|L) x x)
   850  
   851  // combine add/shift into LEAQ/LEAL
   852  (ADD(L|Q) x (SHL(L|Q)const [3] y)) => (LEA(L|Q)8 x y)
   853  (ADD(L|Q) x (SHL(L|Q)const [2] y)) => (LEA(L|Q)4 x y)
   854  (ADD(L|Q) x (ADD(L|Q) y y))        => (LEA(L|Q)2 x y)
   855  (ADD(L|Q) x (ADD(L|Q) x y))        => (LEA(L|Q)2 y x)
   856  
   857  // combine ADDQ/ADDQconst into LEAQ1/LEAL1
   858  (ADD(Q|L)const [c] (ADD(Q|L) x y)) => (LEA(Q|L)1 [c] x y)
   859  (ADD(Q|L) (ADD(Q|L)const [c] x) y) => (LEA(Q|L)1 [c] x y)
   860  (ADD(Q|L)const [c] (ADD(Q|L) x x)) => (LEA(Q|L)1 [c] x x)
   861  
   862  // fold ADDQ/ADDL into LEAQ/LEAL
   863  (ADD(Q|L)const [c] (LEA(Q|L) [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
   864  (LEA(Q|L) [c] {s} (ADD(Q|L)const [d] x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
   865  (LEA(Q|L) [c] {s} (ADD(Q|L) x y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
   866  (ADD(Q|L) x (LEA(Q|L) [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
   867  
   868  // fold ADDQconst/ADDLconst into LEAQx/LEALx
   869  (ADD(Q|L)const [c] (LEA(Q|L)1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)1 [c+d] {s} x y)
   870  (ADD(Q|L)const [c] (LEA(Q|L)2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)2 [c+d] {s} x y)
   871  (ADD(Q|L)const [c] (LEA(Q|L)4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)4 [c+d] {s} x y)
   872  (ADD(Q|L)const [c] (LEA(Q|L)8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)8 [c+d] {s} x y)
   873  (LEA(Q|L)1 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d))   && x.Op != OpSB => (LEA(Q|L)1 [c+d] {s} x y)
   874  (LEA(Q|L)2 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d))   && x.Op != OpSB => (LEA(Q|L)2 [c+d] {s} x y)
   875  (LEA(Q|L)2 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEA(Q|L)2 [c+2*d] {s} x y)
   876  (LEA(Q|L)4 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d))   && x.Op != OpSB => (LEA(Q|L)4 [c+d] {s} x y)
   877  (LEA(Q|L)4 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEA(Q|L)4 [c+4*d] {s} x y)
   878  (LEA(Q|L)8 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d))   && x.Op != OpSB => (LEA(Q|L)8 [c+d] {s} x y)
   879  (LEA(Q|L)8 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEA(Q|L)8 [c+8*d] {s} x y)
   880  
   881  // fold shifts into LEAQx/LEALx
   882  (LEA(Q|L)1 [c] {s} x z:(ADD(Q|L) y y)) && x != z => (LEA(Q|L)2 [c] {s} x y)
   883  (LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)4 [c] {s} x y)
   884  (LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [3] y)) => (LEA(Q|L)8 [c] {s} x y)
   885  (LEA(Q|L)2 [c] {s} x z:(ADD(Q|L) y y)) && x != z => (LEA(Q|L)4 [c] {s} x y)
   886  (LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)8 [c] {s} x y)
   887  (LEA(Q|L)4 [c] {s} x z:(ADD(Q|L) y y)) && x != z => (LEA(Q|L)8 [c] {s} x y)
   888  
   889  // (x + x) << 1 -> x << 2
   890  (LEA(Q|L)2 [0] {s} (ADD(Q|L) x x) x) && s == nil => (SHL(Q|L)const [2] x)
   891  
   892  // (x + x) << 2 -> x << 3 and similar
   893  (SHL(Q|L)const [c] (ADD(Q|L) x x))  => (SHL(Q|L)const [c+1] x)
   894  
   895  // reverse ordering of compare instruction
   896  (SETL (InvertFlags x)) => (SETG x)
   897  (SETG (InvertFlags x)) => (SETL x)
   898  (SETB (InvertFlags x)) => (SETA x)
   899  (SETA (InvertFlags x)) => (SETB x)
   900  (SETLE (InvertFlags x)) => (SETGE x)
   901  (SETGE (InvertFlags x)) => (SETLE x)
   902  (SETBE (InvertFlags x)) => (SETAE x)
   903  (SETAE (InvertFlags x)) => (SETBE x)
   904  (SETEQ (InvertFlags x)) => (SETEQ x)
   905  (SETNE (InvertFlags x)) => (SETNE x)
   906  
   907  (SETLstore [off] {sym} ptr (InvertFlags x) mem) => (SETGstore [off] {sym} ptr x mem)
   908  (SETGstore [off] {sym} ptr (InvertFlags x) mem) => (SETLstore [off] {sym} ptr x mem)
   909  (SETBstore [off] {sym} ptr (InvertFlags x) mem) => (SETAstore [off] {sym} ptr x mem)
   910  (SETAstore [off] {sym} ptr (InvertFlags x) mem) => (SETBstore [off] {sym} ptr x mem)
   911  (SETLEstore [off] {sym} ptr (InvertFlags x) mem) => (SETGEstore [off] {sym} ptr x mem)
   912  (SETGEstore [off] {sym} ptr (InvertFlags x) mem) => (SETLEstore [off] {sym} ptr x mem)
   913  (SETBEstore [off] {sym} ptr (InvertFlags x) mem) => (SETAEstore [off] {sym} ptr x mem)
   914  (SETAEstore [off] {sym} ptr (InvertFlags x) mem) => (SETBEstore [off] {sym} ptr x mem)
   915  (SETEQstore [off] {sym} ptr (InvertFlags x) mem) => (SETEQstore [off] {sym} ptr x mem)
   916  (SETNEstore [off] {sym} ptr (InvertFlags x) mem) => (SETNEstore [off] {sym} ptr x mem)
   917  
   918  // sign extended loads
   919  // Note: The combined instruction must end up in the same block
   920  // as the original load. If not, we end up making a value with
   921  // memory type live in two different blocks, which can lead to
   922  // multiple memory values alive simultaneously.
   923  // Make sure we don't combine these ops if the load has another use.
   924  // This prevents a single load from being split into multiple loads
   925  // which then might return different values.  See test/atomicload.go.
   926  (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   927  (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   928  (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   929  (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   930  (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   931  (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   932  (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   933  (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   934  (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
   935  (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
   936  (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
   937  (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   938  (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   939  (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   940  (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
   941  (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
   942  (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
   943  (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
   944  
   945  // replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
   946  (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQZX x)
   947  (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQZX x)
   948  (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQZX x)
   949  (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
   950  (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQSX x)
   951  (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQSX x)
   952  (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQSX x)
   953  
   954  // Fold extensions and ANDs together.
   955  (MOVBQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x)
   956  (MOVWQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x)
   957  (MOVLQZX (ANDLconst [c] x)) => (ANDLconst [c] x)
   958  (MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x)
   959  (MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x)
   960  (MOVLQSX (ANDLconst [c] x)) && uint32(c) & 0x80000000 == 0 => (ANDLconst [c & 0x7fffffff] x)
   961  
   962  // Don't extend before storing
   963  (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) => (MOVLstore [off] {sym} ptr x mem)
   964  (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) => (MOVWstore [off] {sym} ptr x mem)
   965  (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) => (MOVBstore [off] {sym} ptr x mem)
   966  (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) => (MOVLstore [off] {sym} ptr x mem)
   967  (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) => (MOVWstore [off] {sym} ptr x mem)
   968  (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) => (MOVBstore [off] {sym} ptr x mem)
   969  
   970  // fold constants into memory operations
   971  // Note that this is not always a good idea because if not all the uses of
   972  // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
   973  // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
   974  // Nevertheless, let's do it!
   975  (MOV(Q|L|W|B|SS|SD|O)load  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
   976      (MOV(Q|L|W|B|SS|SD|O)load  [off1+off2] {sym} ptr mem)
   977  (MOV(Q|L|W|B|SS|SD|O)store  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) =>
   978  	(MOV(Q|L|W|B|SS|SD|O)store  [off1+off2] {sym} ptr val mem)
   979  (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
   980  	(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {sym} base val mem)
   981  ((ADD|SUB|AND|OR|XOR)Qload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
   982  	((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {sym} val base mem)
   983  ((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
   984  	((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
   985  (CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
   986  	(CMP(Q|L|W|B)load [off1+off2] {sym} base val mem)
   987  (CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
   988  	(CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
   989  
   990  ((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
   991  	((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
   992  ((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
   993  	((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
   994  ((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
   995  	((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
   996  ((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
   997  	((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
   998  ((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
   999  	((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {sym} base val mem)
  1000  ((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
  1001  	((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
  1002  
  1003  // Fold constants into stores.
  1004  (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validVal(c) =>
  1005  	(MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
  1006  (MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
  1007  	(MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
  1008  (MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
  1009  	(MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
  1010  (MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
  1011  	(MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
  1012  
  1013  // Fold address offsets into constant stores.
  1014  (MOV(Q|L|W|B|O)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd32(off) =>
  1015  	(MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
  1016  
  1017  // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
  1018  // what variables are being read/written by the ops.
  1019  (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  1020  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1021  	(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1022  (MOV(Q|L|W|B|SS|SD|O)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1023  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1024  	(MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1025  (MOV(Q|L|W|B|O)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) =>
  1026  	(MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
  1027  (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1028  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1029  	(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1030  ((ADD|SUB|AND|OR|XOR)Qload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  1031  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1032  	((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  1033  ((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  1034  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1035  	((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  1036  (CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1037  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1038  	(CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1039  (CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  1040  	&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
  1041  	(CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
  1042  
  1043  ((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  1044  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1045  	((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  1046  ((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  1047  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1048  	((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  1049  ((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  1050  	&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
  1051  	((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
  1052  ((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  1053  	&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
  1054  	((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
  1055  ((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1056  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1057  	((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1058  ((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1059  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1060  	((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1061  
  1062  // fold LEAQs together
  1063  (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1064        (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
  1065  
  1066  // LEAQ into LEAQ1
  1067  (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
  1068         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1069  
  1070  // LEAQ1 into LEAQ
  1071  (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1072         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1073  
  1074  // LEAQ into LEAQ[248]
  1075  (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
  1076         (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1077  (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
  1078         (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1079  (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
  1080         (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1081  
  1082  // LEAQ[248] into LEAQ
  1083  (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1084        (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1085  (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1086        (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1087  (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1088        (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1089  
  1090  // LEAQ[1248] into LEAQ[1248]. Only some such merges are possible.
  1091  (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1092        (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
  1093  (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1094        (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
  1095  (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil =>
  1096        (LEAQ4 [off1+2*off2] {sym1} x y)
  1097  (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil =>
  1098        (LEAQ8 [off1+4*off2] {sym1} x y)
  1099  // TODO: more?
  1100  
  1101  // Lower LEAQ2/4/8 when the offset is a constant
  1102  (LEAQ2 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*2) =>
  1103  	(LEAQ [off+int32(scale)*2] {sym} x)
  1104  (LEAQ4 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*4) =>
  1105  	(LEAQ [off+int32(scale)*4] {sym} x)
  1106  (LEAQ8 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*8) =>
  1107  	(LEAQ [off+int32(scale)*8] {sym} x)
  1108  
  1109  // Absorb InvertFlags into branches.
  1110  (LT (InvertFlags cmp) yes no) => (GT cmp yes no)
  1111  (GT (InvertFlags cmp) yes no) => (LT cmp yes no)
  1112  (LE (InvertFlags cmp) yes no) => (GE cmp yes no)
  1113  (GE (InvertFlags cmp) yes no) => (LE cmp yes no)
  1114  (ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
  1115  (UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
  1116  (ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
  1117  (UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
  1118  (EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
  1119  (NE (InvertFlags cmp) yes no) => (NE cmp yes no)
  1120  
  1121  // Constant comparisons.
  1122  (CMPQconst (MOVQconst [x]) [y]) && x==int64(y) => (FlagEQ)
  1123  (CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)<uint64(int64(y)) => (FlagLT_ULT)
  1124  (CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)>uint64(int64(y)) => (FlagLT_UGT)
  1125  (CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)<uint64(int64(y)) => (FlagGT_ULT)
  1126  (CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)>uint64(int64(y)) => (FlagGT_UGT)
  1127  (CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ)
  1128  (CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)<uint32(y) => (FlagLT_ULT)
  1129  (CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)>uint32(y) => (FlagLT_UGT)
  1130  (CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)<uint32(y) => (FlagGT_ULT)
  1131  (CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT)
  1132  (CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ)
  1133  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)<uint16(y) => (FlagLT_ULT)
  1134  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)>uint16(y) => (FlagLT_UGT)
  1135  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)<uint16(y) => (FlagGT_ULT)
  1136  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT)
  1137  (CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ)
  1138  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)<uint8(y) => (FlagLT_ULT)
  1139  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)>uint8(y) => (FlagLT_UGT)
  1140  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)<uint8(y) => (FlagGT_ULT)
  1141  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT)
  1142  
  1143  // CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts.
  1144  // In theory this applies to any of the simplifications above,
  1145  // but CMPQ is the only one I've actually seen occur.
  1146  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y => (FlagEQ)
  1147  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)<uint64(y) => (FlagLT_ULT)
  1148  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)>uint64(y) => (FlagLT_UGT)
  1149  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)<uint64(y) => (FlagGT_ULT)
  1150  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) => (FlagGT_UGT)
  1151  
  1152  // Other known comparisons.
  1153  (CMPQconst (MOVBQZX _) [c]) && 0xFF < c => (FlagLT_ULT)
  1154  (CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c => (FlagLT_ULT)
  1155  (CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) => (FlagLT_ULT)
  1156  (CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) => (FlagLT_ULT)
  1157  (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
  1158  (CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
  1159  (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
  1160  (CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT)
  1161  (CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m)  && int8(m)  < n => (FlagLT_ULT)
  1162  
  1163  // TESTQ c c sets flags like CMPQ c 0.
  1164  (TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ)
  1165  (TESTLconst [c] (MOVLconst [c])) && c == 0 => (FlagEQ)
  1166  (TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c < 0  => (FlagLT_UGT)
  1167  (TESTLconst [c] (MOVLconst [c])) && c < 0  => (FlagLT_UGT)
  1168  (TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c > 0  => (FlagGT_UGT)
  1169  (TESTLconst [c] (MOVLconst [c])) && c > 0  => (FlagGT_UGT)
  1170  
  1171  // TODO: DIVxU also.
  1172  
  1173  // Absorb flag constants into SBB ops.
  1174  (SBBQcarrymask (FlagEQ))     => (MOVQconst [0])
  1175  (SBBQcarrymask (FlagLT_ULT)) => (MOVQconst [-1])
  1176  (SBBQcarrymask (FlagLT_UGT)) => (MOVQconst [0])
  1177  (SBBQcarrymask (FlagGT_ULT)) => (MOVQconst [-1])
  1178  (SBBQcarrymask (FlagGT_UGT)) => (MOVQconst [0])
  1179  (SBBLcarrymask (FlagEQ))     => (MOVLconst [0])
  1180  (SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1])
  1181  (SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0])
  1182  (SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1])
  1183  (SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0])
  1184  
  1185  // Absorb flag constants into branches.
  1186  ((EQ|LE|GE|ULE|UGE) (FlagEQ) yes no)     => (First yes no)
  1187  ((NE|LT|GT|ULT|UGT) (FlagEQ) yes no)     => (First no yes)
  1188  ((NE|LT|LE|ULT|ULE) (FlagLT_ULT) yes no) => (First yes no)
  1189  ((EQ|GT|GE|UGT|UGE) (FlagLT_ULT) yes no) => (First no yes)
  1190  ((NE|LT|LE|UGT|UGE) (FlagLT_UGT) yes no) => (First yes no)
  1191  ((EQ|GT|GE|ULT|ULE) (FlagLT_UGT) yes no) => (First no yes)
  1192  ((NE|GT|GE|ULT|ULE) (FlagGT_ULT) yes no) => (First yes no)
  1193  ((EQ|LT|LE|UGT|UGE) (FlagGT_ULT) yes no) => (First no yes)
  1194  ((NE|GT|GE|UGT|UGE) (FlagGT_UGT) yes no) => (First yes no)
  1195  ((EQ|LT|LE|ULT|ULE) (FlagGT_UGT) yes no) => (First no yes)
  1196  
  1197  // Absorb flag constants into SETxx ops.
  1198  ((SETEQ|SETLE|SETGE|SETBE|SETAE) (FlagEQ))     => (MOVLconst [1])
  1199  ((SETNE|SETL|SETG|SETB|SETA)     (FlagEQ))     => (MOVLconst [0])
  1200  ((SETNE|SETL|SETLE|SETB|SETBE)   (FlagLT_ULT)) => (MOVLconst [1])
  1201  ((SETEQ|SETG|SETGE|SETA|SETAE)   (FlagLT_ULT)) => (MOVLconst [0])
  1202  ((SETNE|SETL|SETLE|SETA|SETAE)   (FlagLT_UGT)) => (MOVLconst [1])
  1203  ((SETEQ|SETG|SETGE|SETB|SETBE)   (FlagLT_UGT)) => (MOVLconst [0])
  1204  ((SETNE|SETG|SETGE|SETB|SETBE)   (FlagGT_ULT)) => (MOVLconst [1])
  1205  ((SETEQ|SETL|SETLE|SETA|SETAE)   (FlagGT_ULT)) => (MOVLconst [0])
  1206  ((SETNE|SETG|SETGE|SETA|SETAE)   (FlagGT_UGT)) => (MOVLconst [1])
  1207  ((SETEQ|SETL|SETLE|SETB|SETBE)   (FlagGT_UGT)) => (MOVLconst [0])
  1208  
  1209  (SETEQstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1210  (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1211  (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1212  (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1213  (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1214  
  1215  (SETNEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1216  (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1217  (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1218  (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1219  (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1220  
  1221  (SETLstore  [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1222  (SETLstore  [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1223  (SETLstore  [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1224  (SETLstore  [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1225  (SETLstore  [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1226  
  1227  (SETLEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1228  (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1229  (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1230  (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1231  (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1232  
  1233  (SETGstore  [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1234  (SETGstore  [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1235  (SETGstore  [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1236  (SETGstore  [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1237  (SETGstore  [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1238  
  1239  (SETGEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1240  (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1241  (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1242  (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1243  (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1244  
  1245  (SETBstore  [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1246  (SETBstore  [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1247  (SETBstore  [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1248  (SETBstore  [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1249  (SETBstore  [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1250  
  1251  (SETBEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1252  (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1253  (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1254  (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1255  (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1256  
  1257  (SETAstore  [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1258  (SETAstore  [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1259  (SETAstore  [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1260  (SETAstore  [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1261  (SETAstore  [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1262  
  1263  (SETAEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1264  (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1265  (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1266  (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1267  (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1268  
  1269  // Remove redundant *const ops
  1270  (ADDQconst [0] x)          => x
  1271  (ADDLconst [c] x) && c==0  => x
  1272  (SUBQconst [0] x)          => x
  1273  (SUBLconst [c] x) && c==0  => x
  1274  (ANDQconst [0] _)          => (MOVQconst [0])
  1275  (ANDLconst [c] _) && c==0  => (MOVLconst [0])
  1276  (ANDQconst [-1] x)         => x
  1277  (ANDLconst [c] x) && c==-1 => x
  1278  (ORQconst [0] x)           => x
  1279  (ORLconst [c] x)  && c==0  => x
  1280  (ORQconst [-1] _)          => (MOVQconst [-1])
  1281  (ORLconst [c] _)  && c==-1 => (MOVLconst [-1])
  1282  (XORQconst [0] x)          => x
  1283  (XORLconst [c] x) && c==0  => x
  1284  // TODO: since we got rid of the W/B versions, we might miss
  1285  // things like (ANDLconst [0x100] x) which were formerly
  1286  // (ANDBconst [0] x).  Probably doesn't happen very often.
  1287  // If we cared, we might do:
  1288  //  (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
  1289  
  1290  // Remove redundant ops
  1291  // Not in generic rules, because they may appear after lowering e. g. Slicemask
  1292  (NEG(Q|L) (NEG(Q|L) x)) => x
  1293  (NEG(Q|L) s:(SUB(Q|L) x y)) && s.Uses == 1 => (SUB(Q|L) y x)
  1294  
  1295  // Convert constant subtracts to constant adds
  1296  (SUBQconst [c] x) && c != -(1<<31) => (ADDQconst [-c] x)
  1297  (SUBLconst [c] x) => (ADDLconst [-c] x)
  1298  
  1299  // generic constant folding
  1300  // TODO: more of this
  1301  (ADDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)+d])
  1302  (ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d])
  1303  (ADDQconst [c] (ADDQconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDQconst [c+d] x)
  1304  (ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x)
  1305  (SUBQconst (MOVQconst [d]) [c]) => (MOVQconst [d-int64(c)])
  1306  (SUBQconst (SUBQconst x [d]) [c]) && is32Bit(int64(-c)-int64(d)) => (ADDQconst [-c-d] x)
  1307  (SARQconst [c] (MOVQconst [d])) => (MOVQconst [d>>uint64(c)])
  1308  (SARLconst [c] (MOVQconst [d])) => (MOVQconst [int64(int32(d))>>uint64(c)])
  1309  (SARWconst [c] (MOVQconst [d])) => (MOVQconst [int64(int16(d))>>uint64(c)])
  1310  (SARBconst [c] (MOVQconst [d])) => (MOVQconst [int64(int8(d))>>uint64(c)])
  1311  (NEGQ (MOVQconst [c])) => (MOVQconst [-c])
  1312  (NEGL (MOVLconst [c])) => (MOVLconst [-c])
  1313  (MULQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)*d])
  1314  (MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d])
  1315  (ANDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)&d])
  1316  (ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d])
  1317  (ORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)|d])
  1318  (ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d])
  1319  (XORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)^d])
  1320  (XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d])
  1321  (NOTQ (MOVQconst [c])) => (MOVQconst [^c])
  1322  (NOTL (MOVLconst [c])) => (MOVLconst [^c])
  1323  (BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1<<uint32(c))])
  1324  (BTRQconst [c] (MOVQconst [d])) => (MOVQconst [d&^(1<<uint32(c))])
  1325  (BTCQconst [c] (MOVQconst [d])) => (MOVQconst [d^(1<<uint32(c))])
  1326  
  1327  // If c or d doesn't fit into 32 bits, then we can't construct ORQconst,
  1328  // but we can still constant-fold.
  1329  // In theory this applies to any of the simplifications above,
  1330  // but ORQ is the only one I've actually seen occur.
  1331  (ORQ (MOVQconst [c]) (MOVQconst [d])) => (MOVQconst [c|d])
  1332  
  1333  // generic simplifications
  1334  // TODO: more of this
  1335  (ADDQ x (NEGQ y)) => (SUBQ x y)
  1336  (ADDL x (NEGL y)) => (SUBL x y)
  1337  (SUBQ x x) => (MOVQconst [0])
  1338  (SUBL x x) => (MOVLconst [0])
  1339  (ANDQ x x) => x
  1340  (ANDL x x) => x
  1341  (ORQ x x)  => x
  1342  (ORL x x)  => x
  1343  (XORQ x x) => (MOVQconst [0])
  1344  (XORL x x) => (MOVLconst [0])
  1345  
  1346  (SHLLconst [d] (MOVLconst [c])) => (MOVLconst [c << uint64(d)])
  1347  (SHLQconst [d] (MOVQconst [c])) => (MOVQconst [c << uint64(d)])
  1348  (SHLQconst [d] (MOVLconst [c])) => (MOVQconst [int64(c) << uint64(d)])
  1349  
  1350  // Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range.
  1351  (NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) => (ADDQconst [-c] x)
  1352  (MULQconst [c] (NEGQ x)) && c != -(1<<31) => (MULQconst [-c] x)
  1353  
  1354  // checking AND against 0.
  1355  (CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y)
  1356  (CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTL x y)
  1357  (CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTW x y)
  1358  (CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTB x y)
  1359  (CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 => (TESTQconst [c] x)
  1360  (CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTLconst [c] x)
  1361  (CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTWconst [int16(c)] x)
  1362  (CMPBconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTBconst [int8(c)] x)
  1363  
  1364  // Convert TESTx to TESTxconst if possible.
  1365  (TESTQ (MOVQconst [c]) x) && is32Bit(c) => (TESTQconst [int32(c)] x)
  1366  (TESTL (MOVLconst [c]) x) => (TESTLconst [c] x)
  1367  (TESTW (MOVLconst [c]) x) => (TESTWconst [int16(c)] x)
  1368  (TESTB (MOVLconst [c]) x) => (TESTBconst [int8(c)] x)
  1369  
  1370  // TEST %reg,%reg is shorter than CMP
  1371  (CMPQconst x [0]) => (TESTQ x x)
  1372  (CMPLconst x [0]) => (TESTL x x)
  1373  (CMPWconst x [0]) => (TESTW x x)
  1374  (CMPBconst x [0]) => (TESTB x x)
  1375  (TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst => (TESTQ x x)
  1376  (TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTL x x)
  1377  (TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTW x x)
  1378  (TESTBconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTB x x)
  1379  
  1380  // Convert LEAQ1 back to ADDQ if we can
  1381  (LEAQ1 [0] x y) && v.Aux == nil => (ADDQ x y)
  1382  
  1383  (MOVQstoreconst [c] {s} p1 x:(MOVQstoreconst [a] {s} p0 mem))
  1384    && x.Uses == 1
  1385    && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off()))
  1386    && a.Val() == 0
  1387    && c.Val() == 0
  1388    && setPos(v, x.Pos)
  1389    && clobber(x)
  1390    => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
  1391  (MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem))
  1392    && x.Uses == 1
  1393    && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off()))
  1394    && a.Val() == 0
  1395    && c.Val() == 0
  1396    && setPos(v, x.Pos)
  1397    && clobber(x)
  1398    => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
  1399  
  1400  // Merge load and op
  1401  // TODO: add indexed variants?
  1402  ((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem)
  1403  ((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem)
  1404  ((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
  1405  ((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
  1406  (MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
  1407  (MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
  1408  	((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
  1409  (MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
  1410  (MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
  1411  	((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
  1412  (MOVQstore {sym} [off] ptr x:(BT(S|R|C)Qconst [c] l:(MOVQload {sym} [off] ptr mem)) mem) && x.Uses == 1 && l.Uses == 1 && clobber(x, l) =>
  1413  	(BT(S|R|C)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
  1414  
  1415  // Merge ADDQconst and LEAQ into atomic loads.
  1416  (MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1417  	(MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem)
  1418  (MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1419  	(MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
  1420  
  1421  // Merge ADDQconst and LEAQ into atomic stores.
  1422  (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1423  	(XCHGQ [off1+off2] {sym} val ptr mem)
  1424  (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
  1425  	(XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
  1426  (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1427  	(XCHGL [off1+off2] {sym} val ptr mem)
  1428  (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
  1429  	(XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
  1430  
  1431  // Merge ADDQconst into atomic adds.
  1432  // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
  1433  (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1434  	(XADDQlock [off1+off2] {sym} val ptr mem)
  1435  (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1436  	(XADDLlock [off1+off2] {sym} val ptr mem)
  1437  
  1438  // Merge ADDQconst into atomic compare and swaps.
  1439  // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
  1440  (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
  1441  	(CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
  1442  (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
  1443  	(CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
  1444  
  1445  // We don't need the conditional move if we know the arg of BSF is not zero.
  1446  (CMOVQEQ x _ (Select1 (BS(F|R)Q (ORQconst [c] _)))) && c != 0 => x
  1447  // Extension is unnecessary for trailing zeros.
  1448  (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) => (BSFQ (ORQconst <t> [1<<8] x))
  1449  (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) => (BSFQ (ORQconst <t> [1<<16] x))
  1450  
  1451  // Redundant sign/zero extensions
  1452  // Note: see issue 21963. We have to make sure we use the right type on
  1453  // the resulting extension (the outer type, not the inner type).
  1454  (MOVLQSX (MOVLQSX x)) => (MOVLQSX x)
  1455  (MOVLQSX (MOVWQSX x)) => (MOVWQSX x)
  1456  (MOVLQSX (MOVBQSX x)) => (MOVBQSX x)
  1457  (MOVWQSX (MOVWQSX x)) => (MOVWQSX x)
  1458  (MOVWQSX (MOVBQSX x)) => (MOVBQSX x)
  1459  (MOVBQSX (MOVBQSX x)) => (MOVBQSX x)
  1460  (MOVLQZX (MOVLQZX x)) => (MOVLQZX x)
  1461  (MOVLQZX (MOVWQZX x)) => (MOVWQZX x)
  1462  (MOVLQZX (MOVBQZX x)) => (MOVBQZX x)
  1463  (MOVWQZX (MOVWQZX x)) => (MOVWQZX x)
  1464  (MOVWQZX (MOVBQZX x)) => (MOVBQZX x)
  1465  (MOVBQZX (MOVBQZX x)) => (MOVBQZX x)
  1466  
  1467  (MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
  1468  	&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
  1469  	((ADD|AND|OR|XOR)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
  1470  (MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
  1471  	&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
  1472  	((ADD|AND|OR|XOR)Lconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
  1473  
  1474  // float <-> int register moves, with no conversion.
  1475  // These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
  1476  (MOVQload  [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) => (MOVQf2i val)
  1477  (MOVLload  [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) => (MOVLf2i val)
  1478  (MOVSDload [off] {sym} ptr (MOVQstore  [off] {sym} ptr val _)) => (MOVQi2f val)
  1479  (MOVSSload [off] {sym} ptr (MOVLstore  [off] {sym} ptr val _)) => (MOVLi2f val)
  1480  
  1481  // Other load-like ops.
  1482  (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ADDQ x (MOVQf2i y))
  1483  (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ADDL x (MOVLf2i y))
  1484  (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (SUBQ x (MOVQf2i y))
  1485  (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (SUBL x (MOVLf2i y))
  1486  (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ANDQ x (MOVQf2i y))
  1487  (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ANDL x (MOVLf2i y))
  1488  ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => ( ORQ x (MOVQf2i y))
  1489  ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => ( ORL x (MOVLf2i y))
  1490  (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (XORQ x (MOVQf2i y))
  1491  (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (XORL x (MOVLf2i y))
  1492  
  1493  (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (ADDSD x (MOVQi2f y))
  1494  (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (ADDSS x (MOVLi2f y))
  1495  (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (SUBSD x (MOVQi2f y))
  1496  (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (SUBSS x (MOVLi2f y))
  1497  (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (MULSD x (MOVQi2f y))
  1498  (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (MULSS x (MOVLi2f y))
  1499  
  1500  // Detect FMA
  1501  (ADDS(S|D) (MULS(S|D) x y) z) && buildcfg.GOAMD64 >= 3 && z.Block.Func.useFMA(v) => (VFMADD231S(S|D) z x y)
  1502  
  1503  // Redirect stores to use the other register set.
  1504  (MOVQstore  [off] {sym} ptr (MOVQf2i val) mem) => (MOVSDstore [off] {sym} ptr val mem)
  1505  (MOVLstore  [off] {sym} ptr (MOVLf2i val) mem) => (MOVSSstore [off] {sym} ptr val mem)
  1506  (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) => (MOVQstore  [off] {sym} ptr val mem)
  1507  (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) => (MOVLstore  [off] {sym} ptr val mem)
  1508  
  1509  (MOVSDstore [off] {sym} ptr (MOVSDconst [f]) mem) && f == f => (MOVQstore [off] {sym} ptr (MOVQconst [int64(math.Float64bits(f))]) mem)
  1510  (MOVSSstore [off] {sym} ptr (MOVSSconst [f]) mem) && f == f => (MOVLstore [off] {sym} ptr (MOVLconst [int32(math.Float32bits(f))]) mem)
  1511  
  1512  // Load args directly into the register class where it will be used.
  1513  // We do this by just modifying the type of the Arg.
  1514  (MOVQf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
  1515  (MOVLf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
  1516  (MOVQi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
  1517  (MOVLi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
  1518  
  1519  // LEAQ is rematerializeable, so this helps to avoid register spill.
  1520  // See issue 22947 for details
  1521  (ADD(Q|L)const [off] x:(SP)) => (LEA(Q|L) [off] x)
  1522  
  1523  // HMULx is commutative, but its first argument must go in AX.
  1524  // If possible, put a rematerializeable value in the first argument slot,
  1525  // to reduce the odds that another value will be have to spilled
  1526  // specifically to free up AX.
  1527  (HMUL(Q|L)  x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)  y x)
  1528  (HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)U y x)
  1529  
  1530  // Fold loads into compares
  1531  // Note: these may be undone by the flagalloc pass.
  1532  (CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(Q|L|W|B)load {sym} [off] ptr x mem)
  1533  (CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem))
  1534  
  1535  (CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c])
  1536  	&& l.Uses == 1
  1537  	&& clobber(l) =>
  1538  @l.Block (CMP(Q|L)constload {sym} [makeValAndOff(c,off)] ptr mem)
  1539  (CMP(W|B)const l:(MOV(W|B)load {sym} [off] ptr mem) [c])
  1540  	&& l.Uses == 1
  1541  	&& clobber(l) =>
  1542  @l.Block (CMP(W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem)
  1543  
  1544  (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validVal(c) => (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
  1545  (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
  1546  (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
  1547  (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
  1548  
  1549  (TEST(Q|L|W|B)  l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2)
  1550          && l == l2
  1551  	&& l.Uses == 2
  1552  	&& clobber(l) =>
  1553    @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0, off)] ptr mem)
  1554  
  1555  // Convert ANDload to MOVload when we can do the AND in a containing TEST op.
  1556  // Only do when it's within the same block, so we don't have flags live across basic block boundaries.
  1557  // See issue 44228.
  1558  (TEST(Q|L) a:(AND(Q|L)load [off] {sym} x ptr mem) a) && a.Uses == 2 && a.Block == v.Block && clobber(a) => (TEST(Q|L) (MOV(Q|L)load <a.Type> [off] {sym} ptr mem) x)
  1559  
  1560  (MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
  1561  (MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  1562  (MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  1563  (MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  1564  (MOVBQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int8(read8(sym, int64(off))))])
  1565  (MOVWQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
  1566  (MOVLQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
  1567  
  1568  
  1569  (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) =>
  1570    (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))])
  1571      (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
  1572  
  1573  // Arch-specific inlining for small or disjoint runtime.memmove
  1574  // Match post-lowering calls, memory version.
  1575  (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem)))))
  1576  	&& sc.Val64() >= 0
  1577  	&& isSameCall(sym, "runtime.memmove")
  1578  	&& s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
  1579  	&& isInlinableMemmove(dst, src, sc.Val64(), config)
  1580  	&& clobber(s1, s2, s3, call)
  1581  	=> (Move [sc.Val64()] dst src mem)
  1582  
  1583  // Match post-lowering calls, register version.
  1584  (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem))
  1585  	&& sz >= 0
  1586  	&& isSameCall(sym, "runtime.memmove")
  1587  	&& call.Uses == 1
  1588  	&& isInlinableMemmove(dst, src, sz, config)
  1589  	&& clobber(call)
  1590  	=> (Move [sz] dst src mem)
  1591  
  1592  // Prefetch instructions
  1593  (PrefetchCache ...)   => (PrefetchT0 ...)
  1594  (PrefetchCacheStreamed ...) => (PrefetchNTA ...)
  1595  
  1596  // CPUID feature: BMI1.
  1597  (AND(Q|L) x (NOT(Q|L) y))               && buildcfg.GOAMD64 >= 3 => (ANDN(Q|L) x y)
  1598  (AND(Q|L) x (NEG(Q|L) x))               && buildcfg.GOAMD64 >= 3 => (BLSI(Q|L) x)
  1599  (XOR(Q|L) x (ADD(Q|L)const [-1] x))     && buildcfg.GOAMD64 >= 3 => (BLSMSK(Q|L) x)
  1600  (AND(Q|L) <t> x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (Select0 <t> (BLSR(Q|L) x))
  1601  // eliminate TEST instruction in classical "isPowerOfTwo" check
  1602  (SETEQ       (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s))        => (SETEQ       (Select1 <types.TypeFlags> blsr))
  1603  (CMOVQEQ x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s))        => (CMOVQEQ x y (Select1 <types.TypeFlags> blsr))
  1604  (CMOVLEQ x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s))        => (CMOVLEQ x y (Select1 <types.TypeFlags> blsr))
  1605  (EQ          (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s) yes no) => (EQ          (Select1 <types.TypeFlags> blsr) yes no)
  1606  (SETNE       (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s))        => (SETNE       (Select1 <types.TypeFlags> blsr))
  1607  (CMOVQNE x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s))        => (CMOVQNE x y (Select1 <types.TypeFlags> blsr))
  1608  (CMOVLNE x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s))        => (CMOVLNE x y (Select1 <types.TypeFlags> blsr))
  1609  (NE          (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s) yes no) => (NE          (Select1 <types.TypeFlags> blsr) yes no)
  1610  
  1611  (BSWAP(Q|L) (BSWAP(Q|L) p)) => p
  1612  
  1613  // CPUID feature: MOVBE.
  1614  (MOV(Q|L)store   [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBE(Q|L)store [i] {s} p w mem)
  1615  (MOVBE(Q|L)store [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1                          => (MOV(Q|L)store   [i] {s} p w mem)
  1616  (BSWAP(Q|L) x:(MOV(Q|L)load   [i] {s} p mem))  && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => @x.Block (MOVBE(Q|L)load [i] {s} p mem)
  1617  (BSWAP(Q|L) x:(MOVBE(Q|L)load [i] {s} p mem))  && x.Uses == 1                          => @x.Block (MOV(Q|L)load   [i] {s} p mem)
  1618  (MOVWstore [i] {s} p x:(ROLWconst [8] w) mem)   && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBEWstore [i] {s} p w mem)
  1619  (MOVBEWstore [i] {s} p x:(ROLWconst [8] w) mem) && x.Uses == 1 => (MOVWstore [i] {s} p w mem)
  1620  
  1621  (SAR(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SARX(Q|L)load [off] {sym} ptr x mem)
  1622  (SHL(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHLX(Q|L)load [off] {sym} ptr x mem)
  1623  (SHR(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHRX(Q|L)load [off] {sym} ptr x mem)
  1624  
  1625  ((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVQconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
  1626  ((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
  1627  ((SHL|SHR|SAR)XLload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Lconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
  1628  
  1629  // Convert atomic logical operations to easier ones if we don't use the result.
  1630  (Select1 a:(LoweredAtomic(And64|And32|Or64|Or32) ptr val mem)) && a.Uses == 1 && clobber(a) => ((ANDQ|ANDL|ORQ|ORL)lock ptr val mem)
  1631  
  1632  // If we are checking the results of an add, use the flags directly from the add.
  1633  // Note that this only works for EQ/NE. ADD sets the CF/OF flags differently
  1634  // than TEST sets them.
  1635  // Note also that a.Args[0] here refers to the post-flagify'd value.
  1636  ((EQ|NE) t:(TESTQ a:(ADDQconst [c] x) a)) && t.Uses == 1 && flagify(a) => ((EQ|NE) (Select1 <types.TypeFlags> a.Args[0]))
  1637  ((EQ|NE) t:(TESTL a:(ADDLconst [c] x) a)) && t.Uses == 1 && flagify(a) => ((EQ|NE) (Select1 <types.TypeFlags> a.Args[0]))
  1638  
  1639  // If we don't use the flags any more, just use the standard op.
  1640  (Select0 a:(ADD(Q|L)constflags [c] x)) && a.Uses == 1 => (ADD(Q|L)const [c] x)
  1641  

View as plain text