Text file src/cmd/compile/internal/ssa/_gen/AMD64.rules

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add(64|32|16|8) ...) => (ADD(Q|L|L|L) ...)
     7  (AddPtr ...) => (ADDQ ...)
     8  (Add(32|64)F ...) => (ADDS(S|D) ...)
     9  
    10  (Sub(64|32|16|8) ...) => (SUB(Q|L|L|L) ...)
    11  (SubPtr ...) => (SUBQ ...)
    12  (Sub(32|64)F ...) => (SUBS(S|D) ...)
    13  
    14  (Mul(64|32|16|8) ...) => (MUL(Q|L|L|L) ...)
    15  (Mul(32|64)F ...) => (MULS(S|D) ...)
    16  
    17  (Select0 (Mul64uover x y)) => (Select0 <typ.UInt64> (MULQU x y))
    18  (Select0 (Mul32uover x y)) => (Select0 <typ.UInt32> (MULLU x y))
    19  (Select1 (Mul(64|32)uover x y)) => (SETO (Select1 <types.TypeFlags> (MUL(Q|L)U x y)))
    20  
    21  (Hmul(64|32) ...) => (HMUL(Q|L) ...)
    22  (Hmul(64|32)u ...) => (HMUL(Q|L)U ...)
    23  
    24  (Div(64|32|16) [a] x y) => (Select0 (DIV(Q|L|W) [a] x y))
    25  (Div8  x y) => (Select0 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
    26  (Div(64|32|16)u x y) => (Select0 (DIV(Q|L|W)U x y))
    27  (Div8u x y) => (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
    28  (Div(32|64)F ...) => (DIVS(S|D) ...)
    29  
    30  (Select0 (Add64carry x y c)) =>
    31  	(Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
    32  (Select1 (Add64carry x y c)) =>
    33  	(NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
    34  (Select0 (Sub64borrow x y c)) =>
    35  	(Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
    36  (Select1 (Sub64borrow x y c)) =>
    37  	(NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
    38  
    39  // Optimize ADCQ and friends
    40  (ADCQ x (MOVQconst [c]) carry) && is32Bit(c) => (ADCQconst x [int32(c)] carry)
    41  (ADCQ x y (FlagEQ)) => (ADDQcarry x y)
    42  (ADCQconst x [c] (FlagEQ)) => (ADDQconstcarry x [c])
    43  (ADDQcarry x (MOVQconst [c])) && is32Bit(c) => (ADDQconstcarry x [int32(c)])
    44  (SBBQ x (MOVQconst [c]) borrow) && is32Bit(c) => (SBBQconst x [int32(c)] borrow)
    45  (SBBQ x y (FlagEQ)) => (SUBQborrow x y)
    46  (SBBQconst x [c] (FlagEQ)) => (SUBQconstborrow x [c])
    47  (SUBQborrow x (MOVQconst [c])) && is32Bit(c) => (SUBQconstborrow x [int32(c)])
    48  (Select1 (NEGLflags (MOVQconst [0]))) => (FlagEQ)
    49  (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) => x
    50  
    51  
    52  (Mul64uhilo ...) => (MULQU2 ...)
    53  (Div128u ...) => (DIVQU2 ...)
    54  
    55  (Avg64u ...) => (AVGQU ...)
    56  
    57  (Mod(64|32|16) [a] x y) => (Select1 (DIV(Q|L|W) [a] x y))
    58  (Mod8  x y) => (Select1 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
    59  (Mod(64|32|16)u x y) => (Select1 (DIV(Q|L|W)U x y))
    60  (Mod8u x y) => (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
    61  
    62  (And(64|32|16|8) ...) => (AND(Q|L|L|L) ...)
    63  (Or(64|32|16|8) ...) => (OR(Q|L|L|L) ...)
    64  (Xor(64|32|16|8) ...) => (XOR(Q|L|L|L) ...)
    65  (Com(64|32|16|8) ...) => (NOT(Q|L|L|L) ...)
    66  
    67  (Neg(64|32|16|8) ...) => (NEG(Q|L|L|L) ...)
    68  (Neg32F x) => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
    69  (Neg64F x) => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
    70  
    71  // Lowering boolean ops
    72  (AndB ...) => (ANDL ...)
    73  (OrB ...) => (ORL ...)
    74  (Not x) => (XORLconst [1] x)
    75  
    76  // Lowering pointer arithmetic
    77  (OffPtr [off] ptr) && is32Bit(off) => (ADDQconst [int32(off)] ptr)
    78  (OffPtr [off] ptr) => (ADDQ (MOVQconst [off]) ptr)
    79  
    80  // Lowering other arithmetic
    81  (Ctz64 x)     && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
    82  (Ctz32 x)     && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
    83  (Ctz64 <t> x) && buildcfg.GOAMD64 <  3 => (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
    84  (Ctz32 x)     && buildcfg.GOAMD64 <  3 => (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
    85  (Ctz16 x) => (BSFL (ORLconst <typ.UInt32> [1<<16] x))
    86  (Ctz8  x) => (BSFL (ORLconst <typ.UInt32> [1<<8 ] x))
    87  
    88  (Ctz64NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
    89  (Ctz32NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
    90  (Ctz16NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
    91  (Ctz8NonZero  x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
    92  (Ctz64NonZero x) && buildcfg.GOAMD64 <  3 => (Select0 (BSFQ x))
    93  (Ctz32NonZero x) && buildcfg.GOAMD64 <  3 => (BSFL x)
    94  (Ctz16NonZero x) && buildcfg.GOAMD64 <  3 => (BSFL x)
    95  (Ctz8NonZero  x) && buildcfg.GOAMD64 <  3 => (BSFL x)
    96  
    97  // BitLen64 of a 64 bit value x requires checking whether x == 0, since BSRQ is undefined when x == 0.
    98  // However, for zero-extended values, we can cheat a bit, and calculate
    99  // BSR(x<<1 + 1), which is guaranteed to be non-zero, and which conveniently
   100  // places the index of the highest set bit where we want it.
   101  // For GOAMD64>=3, BitLen can be calculated by OperandSize - LZCNT(x).
   102  (BitLen64 <t> x) && buildcfg.GOAMD64 < 3 => (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
   103  (BitLen32 x) && buildcfg.GOAMD64 <  3 => (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
   104  (BitLen16 x) && buildcfg.GOAMD64 <  3 => (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
   105  (BitLen8  x) && buildcfg.GOAMD64 <  3 => (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
   106  (BitLen64 <t> x)        && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-64] (LZCNTQ x)))
   107  // Use 64-bit version to allow const-fold remove unnecessary arithmetic.
   108  (BitLen32 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-32] (LZCNTL x)))
   109  (BitLen16 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVWQZX <x.Type> x))))
   110  (BitLen8 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVBQZX <x.Type> x))))
   111  
   112  (Bswap(64|32) ...) => (BSWAP(Q|L) ...)
   113  (Bswap16 x) => (ROLWconst [8] x)
   114  
   115  (PopCount(64|32) ...) => (POPCNT(Q|L) ...)
   116  (PopCount16 x) => (POPCNTL (MOVWQZX <typ.UInt32> x))
   117  (PopCount8 x) => (POPCNTL (MOVBQZX <typ.UInt32> x))
   118  
   119  (Sqrt ...) => (SQRTSD ...)
   120  (Sqrt32 ...) => (SQRTSS ...)
   121  
   122  (RoundToEven x) => (ROUNDSD [0] x)
   123  (Floor x)       => (ROUNDSD [1] x)
   124  (Ceil x)        => (ROUNDSD [2] x)
   125  (Trunc x)       => (ROUNDSD [3] x)
   126  
   127  (FMA x y z) => (VFMADD231SD z x y)
   128  
   129  // Lowering extension
   130  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
   131  (SignExt8to16  ...) => (MOVBQSX ...)
   132  (SignExt8to32  ...) => (MOVBQSX ...)
   133  (SignExt8to64  ...) => (MOVBQSX ...)
   134  (SignExt16to32 ...) => (MOVWQSX ...)
   135  (SignExt16to64 ...) => (MOVWQSX ...)
   136  (SignExt32to64 ...) => (MOVLQSX ...)
   137  
   138  (ZeroExt8to16  ...) => (MOVBQZX ...)
   139  (ZeroExt8to32  ...) => (MOVBQZX ...)
   140  (ZeroExt8to64  ...) => (MOVBQZX ...)
   141  (ZeroExt16to32 ...) => (MOVWQZX ...)
   142  (ZeroExt16to64 ...) => (MOVWQZX ...)
   143  (ZeroExt32to64 ...) => (MOVLQZX ...)
   144  
   145  (Slicemask <t> x) => (SARQconst (NEGQ <t> x) [63])
   146  
   147  (SpectreIndex <t> x y) => (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
   148  (SpectreSliceIndex <t> x y) => (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
   149  
   150  // Lowering truncation
   151  // Because we ignore high parts of registers, truncates are just copies.
   152  (Trunc16to8  ...) => (Copy ...)
   153  (Trunc32to8  ...) => (Copy ...)
   154  (Trunc32to16 ...) => (Copy ...)
   155  (Trunc64to8  ...) => (Copy ...)
   156  (Trunc64to16 ...) => (Copy ...)
   157  (Trunc64to32 ...) => (Copy ...)
   158  
   159  // Lowering float <-> int
   160  (Cvt32to32F ...) => (CVTSL2SS ...)
   161  (Cvt32to64F ...) => (CVTSL2SD ...)
   162  (Cvt64to32F ...) => (CVTSQ2SS ...)
   163  (Cvt64to64F ...) => (CVTSQ2SD ...)
   164  
   165  (Cvt32Fto32 ...) => (CVTTSS2SL ...)
   166  (Cvt32Fto64 ...) => (CVTTSS2SQ ...)
   167  (Cvt64Fto32 ...) => (CVTTSD2SL ...)
   168  (Cvt64Fto64 ...) => (CVTTSD2SQ ...)
   169  
   170  (Cvt32Fto64F ...) => (CVTSS2SD ...)
   171  (Cvt64Fto32F ...) => (CVTSD2SS ...)
   172  
   173  (Round(32|64)F ...) => (LoweredRound(32|64)F ...)
   174  
   175  // Floating-point min is tricky, as the hardware op isn't right for various special
   176  // cases (-0 and NaN). We use two hardware ops organized just right to make the
   177  // result come out how we want it. See https://github.com/golang/go/issues/59488#issuecomment-1553493207
   178  // (although that comment isn't exactly right, as the value overwritten is not simulated correctly).
   179  //    t1 = MINSD x, y   => incorrect if x==NaN or x==-0,y==+0
   180  //    t2 = MINSD t1, x  => fixes x==NaN case
   181  //   res = POR t1, t2   => fixes x==-0,y==+0 case
   182  // Note that this trick depends on the special property that (NaN OR x) produces a NaN (although
   183  // it might not produce the same NaN as the input).
   184  (Min(64|32)F <t> x y) => (POR (MINS(D|S) <t> (MINS(D|S) <t> x y) x) (MINS(D|S) <t> x y))
   185  // Floating-point max is even trickier. Punt to using min instead.
   186  // max(x,y) == -min(-x,-y)
   187  (Max(64|32)F <t> x y) => (Neg(64|32)F <t> (Min(64|32)F <t> (Neg(64|32)F <t> x) (Neg(64|32)F <t> y)))
   188  
   189  (CvtBoolToUint8 ...) => (Copy ...)
   190  
   191  // Lowering shifts
   192  // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
   193  //   result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
   194  (Lsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
   195  (Lsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
   196  (Lsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
   197  (Lsh8x(64|32|16|8)  <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
   198  
   199  (Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLQ x y)
   200  (Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
   201  (Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
   202  (Lsh8x(64|32|16|8)  x y) && shiftIsBounded(v) => (SHLL x y)
   203  
   204  (Rsh64Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
   205  (Rsh32Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
   206  (Rsh16Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [16])))
   207  (Rsh8Ux(64|32|16|8)  <t> x y) && !shiftIsBounded(v) => (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [8])))
   208  
   209  (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRQ x y)
   210  (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRL x y)
   211  (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRW x y)
   212  (Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SHRB x y)
   213  
   214  // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
   215  // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
   216  (Rsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARQ <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [64])))))
   217  (Rsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARL <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [32])))))
   218  (Rsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARW <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [16])))))
   219  (Rsh8x(64|32|16|8)  <t> x y) && !shiftIsBounded(v) => (SARB <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [8])))))
   220  
   221  (Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SARQ x y)
   222  (Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SARL x y)
   223  (Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SARW x y)
   224  (Rsh8x(64|32|16|8) x y)  && shiftIsBounded(v) => (SARB x y)
   225  
   226  // Lowering integer comparisons
   227  (Less(64|32|16|8)      x y) => (SETL  (CMP(Q|L|W|B)     x y))
   228  (Less(64|32|16|8)U     x y) => (SETB  (CMP(Q|L|W|B)     x y))
   229  (Leq(64|32|16|8)       x y) => (SETLE (CMP(Q|L|W|B)     x y))
   230  (Leq(64|32|16|8)U      x y) => (SETBE (CMP(Q|L|W|B)     x y))
   231  (Eq(Ptr|64|32|16|8|B)  x y) => (SETEQ (CMP(Q|Q|L|W|B|B) x y))
   232  (Neq(Ptr|64|32|16|8|B) x y) => (SETNE (CMP(Q|Q|L|W|B|B) x y))
   233  
   234  // Lowering floating point comparisons
   235  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   236  // and the operands are reversed when generating assembly language.
   237  (Eq(32|64)F   x y) => (SETEQF (UCOMIS(S|D) x y))
   238  (Neq(32|64)F  x y) => (SETNEF (UCOMIS(S|D) x y))
   239  // Use SETGF/SETGEF with reversed operands to dodge NaN case.
   240  (Less(32|64)F x y) => (SETGF  (UCOMIS(S|D) y x))
   241  (Leq(32|64)F  x y) => (SETGEF (UCOMIS(S|D) y x))
   242  
   243  // Lowering loads
   244  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVQload ptr mem)
   245  (Load <t> ptr mem) && is32BitInt(t) => (MOVLload ptr mem)
   246  (Load <t> ptr mem) && is16BitInt(t) => (MOVWload ptr mem)
   247  (Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem)
   248  (Load <t> ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem)
   249  (Load <t> ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem)
   250  
   251  // Lowering stores
   252  (Store {t} ptr val mem) && t.Size() == 8 &&  t.IsFloat() => (MOVSDstore ptr val mem)
   253  (Store {t} ptr val mem) && t.Size() == 4 &&  t.IsFloat() => (MOVSSstore ptr val mem)
   254  (Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVQstore ptr val mem)
   255  (Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVLstore ptr val mem)
   256  (Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem)
   257  (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
   258  
   259  // Lowering moves
   260  (Move [0] _ _ mem) => mem
   261  (Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
   262  (Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
   263  (Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem)
   264  (Move [8] dst src mem) => (MOVQstore dst (MOVQload src mem) mem)
   265  (Move [16] dst src mem) => (MOVOstore dst (MOVOload src mem) mem)
   266  
   267  (Move [32] dst src mem) =>
   268  	(Move [16]
   269  		(OffPtr <dst.Type> dst [16])
   270  		(OffPtr <src.Type> src [16])
   271  		(Move [16] dst src mem))
   272  
   273  (Move [48] dst src mem) =>
   274  	(Move [32]
   275  		(OffPtr <dst.Type> dst [16])
   276  		(OffPtr <src.Type> src [16])
   277  		(Move [16] dst src mem))
   278  
   279  (Move [64] dst src mem) =>
   280  	(Move [32]
   281  		(OffPtr <dst.Type> dst [32])
   282  		(OffPtr <src.Type> src [32])
   283  		(Move [32] dst src mem))
   284  
   285  (Move [3] dst src mem) =>
   286  	(MOVBstore [2] dst (MOVBload [2] src mem)
   287  		(MOVWstore dst (MOVWload src mem) mem))
   288  (Move [5] dst src mem) =>
   289  	(MOVBstore [4] dst (MOVBload [4] src mem)
   290  		(MOVLstore dst (MOVLload src mem) mem))
   291  (Move [6] dst src mem) =>
   292  	(MOVWstore [4] dst (MOVWload [4] src mem)
   293  		(MOVLstore dst (MOVLload src mem) mem))
   294  (Move [7] dst src mem) =>
   295  	(MOVLstore [3] dst (MOVLload [3] src mem)
   296  		(MOVLstore dst (MOVLload src mem) mem))
   297  (Move [9] dst src mem) =>
   298  	(MOVBstore [8] dst (MOVBload [8] src mem)
   299  		(MOVQstore dst (MOVQload src mem) mem))
   300  (Move [10] dst src mem) =>
   301  	(MOVWstore [8] dst (MOVWload [8] src mem)
   302  		(MOVQstore dst (MOVQload src mem) mem))
   303  (Move [11] dst src mem) =>
   304  	(MOVLstore [7] dst (MOVLload [7] src mem)
   305  		(MOVQstore dst (MOVQload src mem) mem))
   306  (Move [12] dst src mem) =>
   307  	(MOVLstore [8] dst (MOVLload [8] src mem)
   308  		(MOVQstore dst (MOVQload src mem) mem))
   309  (Move [s] dst src mem) && s >= 13 && s <= 15 =>
   310  	(MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem)
   311  		(MOVQstore dst (MOVQload src mem) mem))
   312  
   313  // Adjust moves to be a multiple of 16 bytes.
   314  (Move [s] dst src mem)
   315  	&& s > 16 && s%16 != 0 && s%16 <= 8 =>
   316  	(Move [s-s%16]
   317  		(OffPtr <dst.Type> dst [s%16])
   318  		(OffPtr <src.Type> src [s%16])
   319  		(MOVQstore dst (MOVQload src mem) mem))
   320  (Move [s] dst src mem)
   321  	&& s > 16 && s%16 != 0 && s%16 > 8 =>
   322  	(Move [s-s%16]
   323  		(OffPtr <dst.Type> dst [s%16])
   324  		(OffPtr <src.Type> src [s%16])
   325  		(MOVOstore dst (MOVOload src mem) mem))
   326  
   327  // Medium copying uses a duff device.
   328  (Move [s] dst src mem)
   329  	&& s > 64 && s <= 16*64 && s%16 == 0
   330  	&& logLargeCopy(v, s) =>
   331  	(DUFFCOPY [s] dst src mem)
   332  
   333  // Large copying uses REP MOVSQ.
   334  (Move [s] dst src mem) && s > 16*64 && s%8 == 0 && logLargeCopy(v, s) =>
   335  	(REPMOVSQ dst src (MOVQconst [s/8]) mem)
   336  
   337  // Lowering Zero instructions
   338  (Zero [0] _ mem) => mem
   339  (Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff(0,0)] destptr mem)
   340  (Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)
   341  (Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)
   342  (Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)
   343  
   344  (Zero [3] destptr mem) =>
   345  	(MOVBstoreconst [makeValAndOff(0,2)] destptr
   346  		(MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
   347  (Zero [5] destptr mem) =>
   348  	(MOVBstoreconst [makeValAndOff(0,4)] destptr
   349  		(MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
   350  (Zero [6] destptr mem) =>
   351  	(MOVWstoreconst [makeValAndOff(0,4)] destptr
   352  		(MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
   353  (Zero [7] destptr mem) =>
   354  	(MOVLstoreconst [makeValAndOff(0,3)] destptr
   355  		(MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
   356  
   357  // Zero small numbers of words directly.
   358  (Zero [9] destptr mem) =>
   359  	(MOVBstoreconst [makeValAndOff(0,8)] destptr
   360  		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
   361  
   362  (Zero [10] destptr mem) =>
   363  	(MOVWstoreconst [makeValAndOff(0,8)] destptr
   364  		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
   365  
   366  (Zero [11] destptr mem) =>
   367  	(MOVLstoreconst [makeValAndOff(0,7)] destptr
   368  		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
   369  
   370  (Zero [12] destptr mem) =>
   371  	(MOVLstoreconst [makeValAndOff(0,8)] destptr
   372  		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
   373  
   374  (Zero [s] destptr mem) && s > 12 && s < 16 =>
   375  	(MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr
   376  		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
   377  
   378  // Adjust zeros to be a multiple of 16 bytes.
   379  (Zero [s] destptr mem) && s%16 != 0 && s > 16 =>
   380  	(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
   381  		(MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
   382  
   383  (Zero [16] destptr mem) =>
   384  	(MOVOstoreconst [makeValAndOff(0,0)] destptr mem)
   385  (Zero [32] destptr mem) =>
   386  	(MOVOstoreconst [makeValAndOff(0,16)] destptr
   387  		(MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
   388  (Zero [48] destptr mem) =>
   389  	(MOVOstoreconst [makeValAndOff(0,32)] destptr
   390  		(MOVOstoreconst [makeValAndOff(0,16)] destptr
   391  			(MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))
   392  (Zero [64] destptr mem) =>
   393  	(MOVOstoreconst [makeValAndOff(0,48)] destptr
   394  		(MOVOstoreconst [makeValAndOff(0,32)] destptr
   395  			(MOVOstoreconst [makeValAndOff(0,16)] destptr
   396  				(MOVOstoreconst [makeValAndOff(0,0)] destptr mem))))
   397  
   398  // Medium zeroing uses a duff device.
   399  (Zero [s] destptr mem)
   400  	&& s > 64 && s <= 1024 && s%16 == 0 =>
   401  	(DUFFZERO [s] destptr mem)
   402  
   403  // Large zeroing uses REP STOSQ.
   404  (Zero [s] destptr mem)
   405  	&& s > 1024 && s%8 == 0 =>
   406  	(REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
   407  
   408  // Lowering constants
   409  (Const8   [c]) => (MOVLconst [int32(c)])
   410  (Const16  [c]) => (MOVLconst [int32(c)])
   411  (Const32  ...) => (MOVLconst ...)
   412  (Const64  ...) => (MOVQconst ...)
   413  (Const32F ...) => (MOVSSconst ...)
   414  (Const64F ...) => (MOVSDconst ...)
   415  (ConstNil    ) => (MOVQconst [0])
   416  (ConstBool [c]) => (MOVLconst [b2i32(c)])
   417  
   418  // Lowering calls
   419  (StaticCall ...) => (CALLstatic ...)
   420  (ClosureCall ...) => (CALLclosure ...)
   421  (InterCall ...) => (CALLinter ...)
   422  (TailCall ...) => (CALLtail ...)
   423  
   424  // Lowering conditional moves
   425  // If the condition is a SETxx, we can just run a CMOV from the comparison that was
   426  // setting the flags.
   427  // Legend: HI=unsigned ABOVE, CS=unsigned BELOW, CC=unsigned ABOVE EQUAL, LS=unsigned BELOW EQUAL
   428  (CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && (is64BitInt(t) || isPtr(t))
   429      => (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
   430  (CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is32BitInt(t)
   431      => (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
   432  (CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is16BitInt(t)
   433      => (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
   434  
   435  // If the condition does not set the flags, we need to generate a comparison.
   436  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 1
   437      => (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
   438  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 2
   439      => (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
   440  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 4
   441      => (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
   442  
   443  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
   444      => (CMOVQNE y x (CMPQconst [0] check))
   445  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
   446      => (CMOVLNE y x (CMPQconst [0] check))
   447  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
   448      => (CMOVWNE y x (CMPQconst [0] check))
   449  
   450  // Absorb InvertFlags
   451  (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
   452      => (CMOVQ(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
   453  (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
   454      => (CMOVL(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
   455  (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
   456      => (CMOVW(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
   457  
   458  // Absorb constants generated during lower
   459  (CMOV(QEQ|QLE|QGE|QCC|QLS|LEQ|LLE|LGE|LCC|LLS|WEQ|WLE|WGE|WCC|WLS) _ x (FlagEQ)) => x
   460  (CMOV(QNE|QLT|QGT|QCS|QHI|LNE|LLT|LGT|LCS|LHI|WNE|WLT|WGT|WCS|WHI) y _ (FlagEQ)) => y
   461  (CMOV(QNE|QGT|QGE|QHI|QCC|LNE|LGT|LGE|LHI|LCC|WNE|WGT|WGE|WHI|WCC) _ x (FlagGT_UGT)) => x
   462  (CMOV(QEQ|QLE|QLT|QLS|QCS|LEQ|LLE|LLT|LLS|LCS|WEQ|WLE|WLT|WLS|WCS) y _ (FlagGT_UGT)) => y
   463  (CMOV(QNE|QGT|QGE|QLS|QCS|LNE|LGT|LGE|LLS|LCS|WNE|WGT|WGE|WLS|WCS) _ x (FlagGT_ULT)) => x
   464  (CMOV(QEQ|QLE|QLT|QHI|QCC|LEQ|LLE|LLT|LHI|LCC|WEQ|WLE|WLT|WHI|WCC) y _ (FlagGT_ULT)) => y
   465  (CMOV(QNE|QLT|QLE|QCS|QLS|LNE|LLT|LLE|LCS|LLS|WNE|WLT|WLE|WCS|WLS) _ x (FlagLT_ULT)) => x
   466  (CMOV(QEQ|QGT|QGE|QHI|QCC|LEQ|LGT|LGE|LHI|LCC|WEQ|WGT|WGE|WHI|WCC) y _ (FlagLT_ULT)) => y
   467  (CMOV(QNE|QLT|QLE|QHI|QCC|LNE|LLT|LLE|LHI|LCC|WNE|WLT|WLE|WHI|WCC) _ x (FlagLT_UGT)) => x
   468  (CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) => y
   469  
   470  // Miscellaneous
   471  (IsNonNil p) => (SETNE (TESTQ p p))
   472  (IsInBounds idx len) => (SETB (CMPQ idx len))
   473  (IsSliceInBounds idx len) => (SETBE (CMPQ idx len))
   474  (NilCheck ...) => (LoweredNilCheck ...)
   475  (GetG mem) && v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
   476  (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
   477  (GetCallerPC ...) => (LoweredGetCallerPC ...)
   478  (GetCallerSP ...) => (LoweredGetCallerSP ...)
   479  
   480  (HasCPUFeature {s}) => (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s})))
   481  (Addr {sym} base) => (LEAQ {sym} base)
   482  (LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (LEAQ {sym} (SPanchored base mem))
   483  (LocalAddr <t> {sym} base _)  && !t.Elem().HasPointers() => (LEAQ {sym} base)
   484  
   485  (MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 => (SETLstore [off] {sym} ptr x mem)
   486  (MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 => (SETLEstore [off] {sym} ptr x mem)
   487  (MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 => (SETGstore [off] {sym} ptr x mem)
   488  (MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 => (SETGEstore [off] {sym} ptr x mem)
   489  (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 => (SETEQstore [off] {sym} ptr x mem)
   490  (MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 => (SETNEstore [off] {sym} ptr x mem)
   491  (MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 => (SETBstore [off] {sym} ptr x mem)
   492  (MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 => (SETBEstore [off] {sym} ptr x mem)
   493  (MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 => (SETAstore [off] {sym} ptr x mem)
   494  (MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 => (SETAEstore [off] {sym} ptr x mem)
   495  
   496  // block rewrites
   497  (If (SETL  cmp) yes no) => (LT  cmp yes no)
   498  (If (SETLE cmp) yes no) => (LE  cmp yes no)
   499  (If (SETG  cmp) yes no) => (GT  cmp yes no)
   500  (If (SETGE cmp) yes no) => (GE  cmp yes no)
   501  (If (SETEQ cmp) yes no) => (EQ  cmp yes no)
   502  (If (SETNE cmp) yes no) => (NE  cmp yes no)
   503  (If (SETB  cmp) yes no) => (ULT cmp yes no)
   504  (If (SETBE cmp) yes no) => (ULE cmp yes no)
   505  (If (SETA  cmp) yes no) => (UGT cmp yes no)
   506  (If (SETAE cmp) yes no) => (UGE cmp yes no)
   507  (If (SETO cmp) yes no) => (OS cmp yes no)
   508  
   509  // Special case for floating point - LF/LEF not generated
   510  (If (SETGF  cmp) yes no) => (UGT  cmp yes no)
   511  (If (SETGEF cmp) yes no) => (UGE  cmp yes no)
   512  (If (SETEQF cmp) yes no) => (EQF  cmp yes no)
   513  (If (SETNEF cmp) yes no) => (NEF  cmp yes no)
   514  
   515  (If cond yes no) => (NE (TESTB cond cond) yes no)
   516  
   517  (JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (LEAQ <typ.Uintptr> {makeJumpTableSym(b)} (SB)))
   518  
   519  // Atomic loads.  Other than preserving their ordering with respect to other loads, nothing special here.
   520  (AtomicLoad8 ptr mem) => (MOVBatomicload ptr mem)
   521  (AtomicLoad32 ptr mem) => (MOVLatomicload ptr mem)
   522  (AtomicLoad64 ptr mem) => (MOVQatomicload ptr mem)
   523  (AtomicLoadPtr ptr mem) => (MOVQatomicload ptr mem)
   524  
   525  // Atomic stores.  We use XCHG to prevent the hardware reordering a subsequent load.
   526  // TODO: most runtime uses of atomic stores don't need that property.  Use normal stores for those?
   527  (AtomicStore8 ptr val mem) => (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
   528  (AtomicStore32 ptr val mem) => (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
   529  (AtomicStore64 ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
   530  (AtomicStorePtrNoWB ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
   531  
   532  // Atomic exchanges.
   533  (AtomicExchange8 ptr val mem) => (XCHGB val ptr mem)
   534  (AtomicExchange32 ptr val mem) => (XCHGL val ptr mem)
   535  (AtomicExchange64 ptr val mem) => (XCHGQ val ptr mem)
   536  
   537  // Atomic adds.
   538  (AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (XADDLlock val ptr mem))
   539  (AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (XADDQlock val ptr mem))
   540  (Select0 <t> (AddTupleFirst32 val tuple)) => (ADDL val (Select0 <t> tuple))
   541  (Select1     (AddTupleFirst32   _ tuple)) => (Select1 tuple)
   542  (Select0 <t> (AddTupleFirst64 val tuple)) => (ADDQ val (Select0 <t> tuple))
   543  (Select1     (AddTupleFirst64   _ tuple)) => (Select1 tuple)
   544  
   545  // Atomic compare and swap.
   546  (AtomicCompareAndSwap32 ptr old new_ mem) => (CMPXCHGLlock ptr old new_ mem)
   547  (AtomicCompareAndSwap64 ptr old new_ mem) => (CMPXCHGQlock ptr old new_ mem)
   548  
   549  // Atomic memory logical operations (old style).
   550  (AtomicAnd8  ptr val mem) => (ANDBlock ptr val mem)
   551  (AtomicAnd32 ptr val mem) => (ANDLlock ptr val mem)
   552  (AtomicOr8   ptr val mem) => (ORBlock  ptr val mem)
   553  (AtomicOr32  ptr val mem) => (ORLlock  ptr val mem)
   554  
   555  // Atomic memory logical operations (new style).
   556  (Atomic(And64|And32|Or64|Or32)value ptr val mem) => (LoweredAtomic(And64|And32|Or64|Or32) ptr val mem)
   557  
   558  // Write barrier.
   559  (WB ...) => (LoweredWB ...)
   560  
   561  (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
   562  (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
   563  (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
   564  
   565  // lowering rotates
   566  (RotateLeft8  ...) => (ROLB ...)
   567  (RotateLeft16 ...) => (ROLW ...)
   568  (RotateLeft32 ...) => (ROLL ...)
   569  (RotateLeft64 ...) => (ROLQ ...)
   570  
   571  // ***************************
   572  // Above: lowering rules
   573  // Below: optimizations
   574  // ***************************
   575  // TODO: Should the optimizations be a separate pass?
   576  
   577  // Fold boolean tests into blocks
   578  (NE (TESTB (SETL  cmp) (SETL  cmp)) yes no) => (LT  cmp yes no)
   579  (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE  cmp yes no)
   580  (NE (TESTB (SETG  cmp) (SETG  cmp)) yes no) => (GT  cmp yes no)
   581  (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE  cmp yes no)
   582  (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ  cmp yes no)
   583  (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE  cmp yes no)
   584  (NE (TESTB (SETB  cmp) (SETB  cmp)) yes no) => (ULT cmp yes no)
   585  (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no)
   586  (NE (TESTB (SETA  cmp) (SETA  cmp)) yes no) => (UGT cmp yes no)
   587  (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no)
   588  (NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no)
   589  
   590  // Unsigned comparisons to 0/1
   591  (ULT (TEST(Q|L|W|B) x x) yes no) => (First no yes)
   592  (UGE (TEST(Q|L|W|B) x x) yes no) => (First yes no)
   593  (SETB (TEST(Q|L|W|B) x x)) => (ConstBool [false])
   594  (SETAE (TEST(Q|L|W|B) x x)) => (ConstBool [true])
   595  
   596  // x & 1 != 0 -> x & 1
   597  (SETNE (TEST(B|W)const [1] x)) => (AND(L|L)const [1] x)
   598  (SETB (BT(L|Q)const [0] x)) => (AND(L|Q)const [1] x)
   599  // x & 1 == 0 -> (x & 1) ^ 1
   600  (SETAE (BT(L|Q)const [0] x)) => (XORLconst [1] (ANDLconst <typ.Bool> [1] x))
   601  
   602  // Shorten compare by rewriting x < 128 as x <= 127, which can be encoded in a single-byte immediate on x86.
   603  (SETL c:(CMP(Q|L)const [128] x)) && c.Uses == 1 => (SETLE (CMP(Q|L)const [127] x))
   604  (SETB c:(CMP(Q|L)const [128] x)) && c.Uses == 1 => (SETBE (CMP(Q|L)const [127] x))
   605  
   606  // x >= 128 -> x > 127
   607  (SETGE c:(CMP(Q|L)const [128] x)) && c.Uses == 1 => (SETG (CMP(Q|L)const [127] x))
   608  (SETAE c:(CMP(Q|L)const [128] x)) && c.Uses == 1 => (SETA (CMP(Q|L)const [127] x))
   609  
   610  (CMOVQLT x y c:(CMP(Q|L)const [128] z)) && c.Uses == 1 => (CMOVQLE x y (CMP(Q|L)const [127] z))
   611  (CMOVLLT x y c:(CMP(Q|L)const [128] z)) && c.Uses == 1 => (CMOVLLE x y (CMP(Q|L)const [127] z))
   612  (LT          c:(CMP(Q|L)const [128] z) yes no) && c.Uses == 1 => (LE (CMP(Q|L)const [127] z) yes no)
   613  (CMOVQGE x y c:(CMP(Q|L)const [128] z)) && c.Uses == 1 => (CMOVQGT x y (CMP(Q|L)const [127] z))
   614  (CMOVLGE x y c:(CMP(Q|L)const [128] z)) && c.Uses == 1 => (CMOVLGT x y (CMP(Q|L)const [127] z))
   615  (GE          c:(CMP(Q|L)const [128] z) yes no) && c.Uses == 1 => (GT (CMP(Q|L)const [127] z)  yes no)
   616  
   617  // Recognize bit tests: a&(1<<b) != 0 for b suitably bounded
   618  // Note that BTx instructions use the carry bit, so we need to convert tests for zero flag
   619  // into tests for carry flags.
   620  // ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis
   621  // mutandis, for UGE and SETAE, and CC and SETCC.
   622  ((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y))
   623  ((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y))
   624  ((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
   625      => ((ULT|UGE) (BTLconst [int8(log32(c))] x))
   626  ((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
   627      => ((ULT|UGE) (BTQconst [int8(log32(c))] x))
   628  ((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
   629      => ((ULT|UGE) (BTQconst [int8(log64(c))] x))
   630  (SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE)  (BTL x y))
   631  (SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE)  (BTQ x y))
   632  (SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
   633      => (SET(B|AE)  (BTLconst [int8(log32(c))] x))
   634  (SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
   635      => (SET(B|AE)  (BTQconst [int8(log32(c))] x))
   636  (SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
   637      => (SET(B|AE)  (BTQconst [int8(log64(c))] x))
   638  // SET..store variant
   639  (SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
   640      => (SET(B|AE)store  [off] {sym} ptr (BTL x y) mem)
   641  (SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
   642      => (SET(B|AE)store  [off] {sym} ptr (BTQ x y) mem)
   643  (SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(int64(c))
   644      => (SET(B|AE)store  [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
   645  (SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c))
   646      => (SET(B|AE)store  [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
   647  (SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c)
   648      => (SET(B|AE)store  [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
   649  
   650  // Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
   651  // and further combining shifts.
   652  (BT(Q|L)const [c] (SHRQconst [d] x)) && (c+d)<64 => (BTQconst [c+d] x)
   653  (BT(Q|L)const [c] (ADDQ x x)) && c>1  => (BT(Q|L)const [c-1] x)
   654  (BT(Q|L)const [c] (SHLQconst [d] x)) && c>d      => (BT(Q|L)const [c-d] x)
   655  (BT(Q|L)const [0] s:(SHRQ x y)) => (BTQ y x)
   656  (BTLconst [c] (SHRLconst [d] x)) && (c+d)<32 => (BTLconst [c+d] x)
   657  (BTLconst [c] (ADDL x x)) && c>1 => (BTLconst [c-1] x)
   658  (BTLconst [c] (SHLLconst [d] x)) && c>d      => (BTLconst [c-d] x)
   659  (BTLconst [0] s:(SHR(L|XL) x y)) => (BTL y x)
   660  
   661  // Rewrite a & 1 != 1 into a & 1 == 0.
   662  // Among other things, this lets us turn (a>>b)&1 != 1 into a bit test.
   663  (SET(NE|EQ) (CMPLconst [1] s:(ANDLconst [1] _))) => (SET(EQ|NE) (CMPLconst [0] s))
   664  (SET(NE|EQ)store [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPLconst [0] s) mem)
   665  (SET(NE|EQ) (CMPQconst [1] s:(ANDQconst [1] _))) => (SET(EQ|NE) (CMPQconst [0] s))
   666  (SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem)
   667  
   668  // Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
   669  (OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y)
   670  (XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
   671  // Note: only convert OR/XOR to BTS/BTC if the constant wouldn't fit in
   672  // the constant field of the OR/XOR instruction. See issue 61694.
   673  ((OR|XOR)Q (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 1<<31 => (BT(S|C)Qconst [int8(log64(c))] x)
   674  
   675  // Recognize bit clearing: a &^= 1<<b
   676  (AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y)
   677  (ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
   678  // Note: only convert AND to BTR if the constant wouldn't fit in
   679  // the constant field of the AND instruction. See issue 61694.
   680  (ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31 => (BTRQconst [int8(log64(^c))] x)
   681  
   682  // Special-case bit patterns on first/last bit.
   683  // generic.rules changes ANDs of high-part/low-part masks into a couple of shifts,
   684  // for instance:
   685  //    x & 0xFFFF0000 -> (x >> 16) << 16
   686  //    x & 0x80000000 -> (x >> 31) << 31
   687  //
   688  // In case the mask is just one bit (like second example above), it conflicts
   689  // with the above rules to detect bit-testing / bit-clearing of first/last bit.
   690  // We thus special-case them, by detecting the shift patterns.
   691  
   692  // Special case resetting first/last bit
   693  (ADD(L|Q) (SHR(L|Q)const [1] x) (SHR(L|Q)const [1] x))
   694  	=> (AND(L|Q)const [-2] x)
   695  (SHRLconst [1] (ADDL x x))
   696  	=> (ANDLconst [0x7fffffff] x)
   697  (SHRQconst [1] (ADDQ x x))
   698  	=> (BTRQconst [63] x)
   699  
   700  // Special case testing first/last bit (with double-shift generated by generic.rules)
   701  ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2
   702      => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
   703  ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2
   704      => ((SETB|SETAE|ULT|UGE) (BTQconst [31] x))
   705  (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2
   706      => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
   707  (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2
   708      => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
   709  
   710  ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2
   711      => ((SETB|SETAE|ULT|UGE)  (BTQconst [0] x))
   712  ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2
   713      => ((SETB|SETAE|ULT|UGE)  (BTLconst [0] x))
   714  (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2
   715      => (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem)
   716  (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2
   717      => (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem)
   718  
   719  // Special-case manually testing last bit with "a>>63 != 0" (without "&1")
   720  ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2
   721      => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
   722  ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2
   723      => ((SETB|SETAE|ULT|UGE) (BTLconst [31] x))
   724  (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2
   725      => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
   726  (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2
   727      => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
   728  
   729  // Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)
   730  (BTSQconst [c] (BTRQconst [c] x)) => (BTSQconst [c] x)
   731  (BTSQconst [c] (BTCQconst [c] x)) => (BTSQconst [c] x)
   732  (BTRQconst [c] (BTSQconst [c] x)) => (BTRQconst [c] x)
   733  (BTRQconst [c] (BTCQconst [c] x)) => (BTRQconst [c] x)
   734  
   735  // Fold boolean negation into SETcc.
   736  (XORLconst [1] (SETNE x)) => (SETEQ x)
   737  (XORLconst [1] (SETEQ x)) => (SETNE x)
   738  (XORLconst [1] (SETL  x)) => (SETGE x)
   739  (XORLconst [1] (SETGE x)) => (SETL  x)
   740  (XORLconst [1] (SETLE x)) => (SETG  x)
   741  (XORLconst [1] (SETG  x)) => (SETLE x)
   742  (XORLconst [1] (SETB  x)) => (SETAE x)
   743  (XORLconst [1] (SETAE x)) => (SETB  x)
   744  (XORLconst [1] (SETBE x)) => (SETA  x)
   745  (XORLconst [1] (SETA  x)) => (SETBE x)
   746  
   747  // Special case for floating point - LF/LEF not generated
   748  (NE (TESTB (SETGF  cmp) (SETGF  cmp)) yes no) => (UGT  cmp yes no)
   749  (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE  cmp yes no)
   750  (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF  cmp yes no)
   751  (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF  cmp yes no)
   752  
   753  // Disabled because it interferes with the pattern match above and makes worse code.
   754  // (SETNEF x) => (ORQ (SETNE <typ.Int8> x) (SETNAN <typ.Int8> x))
   755  // (SETEQF x) => (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x))
   756  
   757  // fold constants into instructions
   758  (ADDQ x (MOVQconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDQconst [int32(c)] x)
   759  (ADDQ x (MOVLconst [c])) => (ADDQconst [c] x)
   760  (ADDL x (MOVLconst [c])) => (ADDLconst [c] x)
   761  
   762  (SUBQ x (MOVQconst [c])) && is32Bit(c) => (SUBQconst x [int32(c)])
   763  (SUBQ (MOVQconst [c]) x) && is32Bit(c) => (NEGQ (SUBQconst <v.Type> x [int32(c)]))
   764  (SUBL x (MOVLconst [c])) => (SUBLconst x [c])
   765  (SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst <v.Type> x [c]))
   766  
   767  (MULQ x (MOVQconst [c])) && is32Bit(c) => (MULQconst [int32(c)] x)
   768  (MULL x (MOVLconst [c])) => (MULLconst [c] x)
   769  
   770  (ANDQ x (MOVQconst [c])) && is32Bit(c) => (ANDQconst [int32(c)] x)
   771  (ANDL x (MOVLconst [c])) => (ANDLconst [c] x)
   772  
   773  (AND(L|Q)const [c] (AND(L|Q)const [d] x)) => (AND(L|Q)const [c & d] x)
   774  (XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) => (XOR(L|Q)const [c ^ d] x)
   775  (OR(L|Q)const  [c] (OR(L|Q)const  [d] x)) => (OR(L|Q)const  [c | d] x)
   776  
   777  (MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x)
   778  (MULQconst [c] (MULQconst [d] x)) && is32Bit(int64(c)*int64(d)) => (MULQconst [c * d] x)
   779  
   780  (ORQ x (MOVQconst [c])) && is32Bit(c) => (ORQconst [int32(c)] x)
   781  (ORQ x (MOVLconst [c])) => (ORQconst [c] x)
   782  (ORL x (MOVLconst [c])) => (ORLconst [c] x)
   783  
   784  (XORQ x (MOVQconst [c])) && is32Bit(c) => (XORQconst [int32(c)] x)
   785  (XORL x (MOVLconst [c])) => (XORLconst [c] x)
   786  
   787  (SHLQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x)
   788  (SHLL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x)
   789  
   790  (SHRQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x)
   791  (SHRL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x)
   792  (SHRW x (MOV(Q|L)const [c])) && c&31 < 16 => (SHRWconst [int8(c&31)] x)
   793  (SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 => (MOVLconst [0])
   794  (SHRB x (MOV(Q|L)const [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x)
   795  (SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 => (MOVLconst [0])
   796  
   797  (SARQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x)
   798  (SARL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x)
   799  (SARW x (MOV(Q|L)const [c])) => (SARWconst [int8(min(int64(c)&31,15))] x)
   800  (SARB x (MOV(Q|L)const [c])) => (SARBconst [int8(min(int64(c)&31,7))] x)
   801  
   802  // Operations which don't affect the low 6/5 bits of the shift amount are NOPs.
   803  ((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0  => ((SHLQ|SHRQ|SARQ) x y)
   804  ((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0  => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
   805  ((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
   806  ((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
   807  
   808  ((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0  => ((SHLL|SHRL|SARL) x y)
   809  ((SHLL|SHRL|SARL) x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0  => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
   810  ((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
   811  ((SHLL|SHRL|SARL) x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
   812  
   813  ((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0  => ((SHLQ|SHRQ|SARQ) x y)
   814  ((SHLQ|SHRQ|SARQ) x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0  => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
   815  ((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
   816  ((SHLQ|SHRQ|SARQ) x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
   817  
   818  ((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0  => ((SHLL|SHRL|SARL) x y)
   819  ((SHLL|SHRL|SARL) x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0  => ((SHLL|SHRL|SARL) x (NEGL <t> y))
   820  ((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
   821  ((SHLL|SHRL|SARL) x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGL <t> y))
   822  
   823  // rotate left negative = rotate right
   824  (ROLQ x (NEG(Q|L) y)) => (RORQ x y)
   825  (ROLL x (NEG(Q|L) y)) => (RORL x y)
   826  (ROLW x (NEG(Q|L) y)) => (RORW x y)
   827  (ROLB x (NEG(Q|L) y)) => (RORB x y)
   828  
   829  // rotate right negative = rotate left
   830  (RORQ x (NEG(Q|L) y)) => (ROLQ x y)
   831  (RORL x (NEG(Q|L) y)) => (ROLL x y)
   832  (RORW x (NEG(Q|L) y)) => (ROLW x y)
   833  (RORB x (NEG(Q|L) y)) => (ROLB x y)
   834  
   835  // rotate by constants
   836  (ROLQ x (MOV(Q|L)const [c])) => (ROLQconst [int8(c&63)] x)
   837  (ROLL x (MOV(Q|L)const [c])) => (ROLLconst [int8(c&31)] x)
   838  (ROLW x (MOV(Q|L)const [c])) => (ROLWconst [int8(c&15)] x)
   839  (ROLB x (MOV(Q|L)const [c])) => (ROLBconst [int8(c&7) ] x)
   840  
   841  (RORQ x (MOV(Q|L)const [c])) => (ROLQconst [int8((-c)&63)] x)
   842  (RORL x (MOV(Q|L)const [c])) => (ROLLconst [int8((-c)&31)] x)
   843  (RORW x (MOV(Q|L)const [c])) => (ROLWconst [int8((-c)&15)] x)
   844  (RORB x (MOV(Q|L)const [c])) => (ROLBconst [int8((-c)&7) ] x)
   845  
   846  // Constant shift simplifications
   847  ((SHLQ|SHRQ|SARQ)const      x [0]) => x
   848  ((SHLL|SHRL|SARL)const      x [0]) => x
   849  ((SHRW|SARW)const           x [0]) => x
   850  ((SHRB|SARB)const           x [0]) => x
   851  ((ROLQ|ROLL|ROLW|ROLB)const x [0]) => x
   852  
   853  // Multi-register shifts
   854  (ORQ (SH(R|L)Q lo bits) (SH(L|R)Q hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits)
   855  (ORQ (SH(R|L)XQ lo bits) (SH(L|R)XQ hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits)
   856  
   857  // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
   858  // because the x86 instructions are defined to use all 5 bits of the shift even
   859  // for the small shifts. I don't think we'll ever generate a weird shift (e.g.
   860  // (SHRW x (MOVLconst [24])), but just in case.
   861  
   862  (CMPQ x (MOVQconst [c])) && is32Bit(c) => (CMPQconst x [int32(c)])
   863  (CMPQ (MOVQconst [c]) x) && is32Bit(c) => (InvertFlags (CMPQconst x [int32(c)]))
   864  (CMPL x (MOVLconst [c])) => (CMPLconst x [c])
   865  (CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c]))
   866  (CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)])
   867  (CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)]))
   868  (CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)])
   869  (CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
   870  
   871  // Canonicalize the order of arguments to comparisons - helps with CSE.
   872  (CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x))
   873  
   874  // Using MOVZX instead of AND is cheaper.
   875  (AND(Q|L)const [  0xFF] x) => (MOVBQZX x)
   876  (AND(Q|L)const [0xFFFF] x) => (MOVWQZX x)
   877  // This rule is currently invalid because 0xFFFFFFFF is not representable by a signed int32.
   878  // Commenting out for now, because it also can't trigger because of the is32bit guard on the
   879  // ANDQconst lowering-rule, above, prevents 0xFFFFFFFF from matching (for the same reason)
   880  // Using an alternate form of this rule segfaults some binaries because of
   881  // adverse interactions with other passes.
   882  // (ANDQconst [0xFFFFFFFF] x) => (MOVLQZX x)
   883  
   884  // strength reduction
   885  // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
   886  //    1 - addq, shlq, leaq, negq, subq
   887  //    3 - imulq
   888  // This limits the rewrites to two instructions.
   889  // Note that negq always operates in-place,
   890  // which can require a register-register move
   891  // to preserve the original value,
   892  // so it must be used with care.
   893  (MUL(Q|L)const [-9] x) => (NEG(Q|L) (LEA(Q|L)8 <v.Type> x x))
   894  (MUL(Q|L)const [-5] x) => (NEG(Q|L) (LEA(Q|L)4 <v.Type> x x))
   895  (MUL(Q|L)const [-3] x) => (NEG(Q|L) (LEA(Q|L)2 <v.Type> x x))
   896  (MUL(Q|L)const [-1] x) => (NEG(Q|L) x)
   897  (MUL(Q|L)const [ 0] _) => (MOV(Q|L)const [0])
   898  (MUL(Q|L)const [ 1] x) => x
   899  (MUL(Q|L)const [ 3] x) => (LEA(Q|L)2 x x)
   900  (MUL(Q|L)const [ 5] x) => (LEA(Q|L)4 x x)
   901  (MUL(Q|L)const [ 7] x) => (LEA(Q|L)2 x (LEA(Q|L)2 <v.Type> x x))
   902  (MUL(Q|L)const [ 9] x) => (LEA(Q|L)8 x x)
   903  (MUL(Q|L)const [11] x) => (LEA(Q|L)2 x (LEA(Q|L)4 <v.Type> x x))
   904  (MUL(Q|L)const [13] x) => (LEA(Q|L)4 x (LEA(Q|L)2 <v.Type> x x))
   905  (MUL(Q|L)const [19] x) => (LEA(Q|L)2 x (LEA(Q|L)8 <v.Type> x x))
   906  (MUL(Q|L)const [21] x) => (LEA(Q|L)4 x (LEA(Q|L)4 <v.Type> x x))
   907  (MUL(Q|L)const [25] x) => (LEA(Q|L)8 x (LEA(Q|L)2 <v.Type> x x))
   908  (MUL(Q|L)const [27] x) => (LEA(Q|L)8 (LEA(Q|L)2 <v.Type> x x) (LEA(Q|L)2 <v.Type> x x))
   909  (MUL(Q|L)const [37] x) => (LEA(Q|L)4 x (LEA(Q|L)8 <v.Type> x x))
   910  (MUL(Q|L)const [41] x) => (LEA(Q|L)8 x (LEA(Q|L)4 <v.Type> x x))
   911  (MUL(Q|L)const [45] x) => (LEA(Q|L)8 (LEA(Q|L)4 <v.Type> x x) (LEA(Q|L)4 <v.Type> x x))
   912  (MUL(Q|L)const [73] x) => (LEA(Q|L)8 x (LEA(Q|L)8 <v.Type> x x))
   913  (MUL(Q|L)const [81] x) => (LEA(Q|L)8 (LEA(Q|L)8 <v.Type> x x) (LEA(Q|L)8 <v.Type> x x))
   914  
   915  (MUL(Q|L)const [c] x) && isPowerOfTwo(int64(c)+1) && c >=  15 => (SUB(Q|L)  (SHL(Q|L)const <v.Type> [int8(log64(int64(c)+1))] x) x)
   916  (MUL(Q|L)const [c] x) && isPowerOfTwo(c-1) && c >=  17 => (LEA(Q|L)1 (SHL(Q|L)const <v.Type> [int8(log32(c-1))] x) x)
   917  (MUL(Q|L)const [c] x) && isPowerOfTwo(c-2) && c >=  34 => (LEA(Q|L)2 (SHL(Q|L)const <v.Type> [int8(log32(c-2))] x) x)
   918  (MUL(Q|L)const [c] x) && isPowerOfTwo(c-4) && c >=  68 => (LEA(Q|L)4 (SHL(Q|L)const <v.Type> [int8(log32(c-4))] x) x)
   919  (MUL(Q|L)const [c] x) && isPowerOfTwo(c-8) && c >= 136 => (LEA(Q|L)8 (SHL(Q|L)const <v.Type> [int8(log32(c-8))] x) x)
   920  (MUL(Q|L)const [c] x) && c%3 == 0 && isPowerOfTwo(c/3) => (SHL(Q|L)const [int8(log32(c/3))] (LEA(Q|L)2 <v.Type> x x))
   921  (MUL(Q|L)const [c] x) && c%5 == 0 && isPowerOfTwo(c/5) => (SHL(Q|L)const [int8(log32(c/5))] (LEA(Q|L)4 <v.Type> x x))
   922  (MUL(Q|L)const [c] x) && c%9 == 0 && isPowerOfTwo(c/9) => (SHL(Q|L)const [int8(log32(c/9))] (LEA(Q|L)8 <v.Type> x x))
   923  
   924  // Prefer addition when shifting left by one
   925  (SHL(Q|L)const [1] x) => (ADD(Q|L) x x)
   926  
   927  // combine add/shift into LEAQ/LEAL
   928  (ADD(L|Q) x (SHL(L|Q)const [3] y)) => (LEA(L|Q)8 x y)
   929  (ADD(L|Q) x (SHL(L|Q)const [2] y)) => (LEA(L|Q)4 x y)
   930  (ADD(L|Q) x (ADD(L|Q) y y))        => (LEA(L|Q)2 x y)
   931  (ADD(L|Q) x (ADD(L|Q) x y))        => (LEA(L|Q)2 y x)
   932  
   933  // combine ADDQ/ADDQconst into LEAQ1/LEAL1
   934  (ADD(Q|L)const [c] (ADD(Q|L) x y)) => (LEA(Q|L)1 [c] x y)
   935  (ADD(Q|L) (ADD(Q|L)const [c] x) y) => (LEA(Q|L)1 [c] x y)
   936  (ADD(Q|L)const [c] (ADD(Q|L) x x)) => (LEA(Q|L)1 [c] x x)
   937  
   938  // fold ADDQ/ADDL into LEAQ/LEAL
   939  (ADD(Q|L)const [c] (LEA(Q|L) [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
   940  (LEA(Q|L) [c] {s} (ADD(Q|L)const [d] x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
   941  (LEA(Q|L) [c] {s} (ADD(Q|L) x y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
   942  (ADD(Q|L) x (LEA(Q|L) [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
   943  
   944  // fold ADDQconst/ADDLconst into LEAQx/LEALx
   945  (ADD(Q|L)const [c] (LEA(Q|L)1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)1 [c+d] {s} x y)
   946  (ADD(Q|L)const [c] (LEA(Q|L)2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)2 [c+d] {s} x y)
   947  (ADD(Q|L)const [c] (LEA(Q|L)4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)4 [c+d] {s} x y)
   948  (ADD(Q|L)const [c] (LEA(Q|L)8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)8 [c+d] {s} x y)
   949  (LEA(Q|L)1 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d))   && x.Op != OpSB => (LEA(Q|L)1 [c+d] {s} x y)
   950  (LEA(Q|L)2 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d))   && x.Op != OpSB => (LEA(Q|L)2 [c+d] {s} x y)
   951  (LEA(Q|L)2 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEA(Q|L)2 [c+2*d] {s} x y)
   952  (LEA(Q|L)4 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d))   && x.Op != OpSB => (LEA(Q|L)4 [c+d] {s} x y)
   953  (LEA(Q|L)4 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEA(Q|L)4 [c+4*d] {s} x y)
   954  (LEA(Q|L)8 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d))   && x.Op != OpSB => (LEA(Q|L)8 [c+d] {s} x y)
   955  (LEA(Q|L)8 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEA(Q|L)8 [c+8*d] {s} x y)
   956  
   957  // fold shifts into LEAQx/LEALx
   958  (LEA(Q|L)1 [c] {s} x (ADD(Q|L) y y)) => (LEA(Q|L)2 [c] {s} x y)
   959  (LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)4 [c] {s} x y)
   960  (LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [3] y)) => (LEA(Q|L)8 [c] {s} x y)
   961  (LEA(Q|L)2 [c] {s} x (ADD(Q|L) y y)) => (LEA(Q|L)4 [c] {s} x y)
   962  (LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)8 [c] {s} x y)
   963  (LEA(Q|L)4 [c] {s} x (ADD(Q|L) y y)) => (LEA(Q|L)8 [c] {s} x y)
   964  
   965  // (x + x) << 1 -> x << 2
   966  (LEA(Q|L)2 [0] {s} (ADD(Q|L) x x) x) && s == nil => (SHL(Q|L)const [2] x)
   967  
   968  // (x + x) << 2 -> x << 3 and similar
   969  (SHL(Q|L)const [c] (ADD(Q|L) x x))  => (SHL(Q|L)const [c+1] x)
   970  
   971  // reverse ordering of compare instruction
   972  (SETL (InvertFlags x)) => (SETG x)
   973  (SETG (InvertFlags x)) => (SETL x)
   974  (SETB (InvertFlags x)) => (SETA x)
   975  (SETA (InvertFlags x)) => (SETB x)
   976  (SETLE (InvertFlags x)) => (SETGE x)
   977  (SETGE (InvertFlags x)) => (SETLE x)
   978  (SETBE (InvertFlags x)) => (SETAE x)
   979  (SETAE (InvertFlags x)) => (SETBE x)
   980  (SETEQ (InvertFlags x)) => (SETEQ x)
   981  (SETNE (InvertFlags x)) => (SETNE x)
   982  
   983  (SETLstore [off] {sym} ptr (InvertFlags x) mem) => (SETGstore [off] {sym} ptr x mem)
   984  (SETGstore [off] {sym} ptr (InvertFlags x) mem) => (SETLstore [off] {sym} ptr x mem)
   985  (SETBstore [off] {sym} ptr (InvertFlags x) mem) => (SETAstore [off] {sym} ptr x mem)
   986  (SETAstore [off] {sym} ptr (InvertFlags x) mem) => (SETBstore [off] {sym} ptr x mem)
   987  (SETLEstore [off] {sym} ptr (InvertFlags x) mem) => (SETGEstore [off] {sym} ptr x mem)
   988  (SETGEstore [off] {sym} ptr (InvertFlags x) mem) => (SETLEstore [off] {sym} ptr x mem)
   989  (SETBEstore [off] {sym} ptr (InvertFlags x) mem) => (SETAEstore [off] {sym} ptr x mem)
   990  (SETAEstore [off] {sym} ptr (InvertFlags x) mem) => (SETBEstore [off] {sym} ptr x mem)
   991  (SETEQstore [off] {sym} ptr (InvertFlags x) mem) => (SETEQstore [off] {sym} ptr x mem)
   992  (SETNEstore [off] {sym} ptr (InvertFlags x) mem) => (SETNEstore [off] {sym} ptr x mem)
   993  
   994  // sign extended loads
   995  // Note: The combined instruction must end up in the same block
   996  // as the original load. If not, we end up making a value with
   997  // memory type live in two different blocks, which can lead to
   998  // multiple memory values alive simultaneously.
   999  // Make sure we don't combine these ops if the load has another use.
  1000  // This prevents a single load from being split into multiple loads
  1001  // which then might return different values.  See test/atomicload.go.
  1002  (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  1003  (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  1004  (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  1005  (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  1006  (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  1007  (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  1008  (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  1009  (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  1010  (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
  1011  (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
  1012  (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
  1013  (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
  1014  (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
  1015  (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
  1016  (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
  1017  (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
  1018  (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
  1019  (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
  1020  
  1021  // replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
  1022  (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQZX x)
  1023  (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQZX x)
  1024  (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQZX x)
  1025  (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
  1026  (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQSX x)
  1027  (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQSX x)
  1028  (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQSX x)
  1029  
  1030  // Fold extensions and ANDs together.
  1031  (MOVBQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x)
  1032  (MOVWQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x)
  1033  (MOVLQZX (ANDLconst [c] x)) => (ANDLconst [c] x)
  1034  (MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x)
  1035  (MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x)
  1036  (MOVLQSX (ANDLconst [c] x)) && uint32(c) & 0x80000000 == 0 => (ANDLconst [c & 0x7fffffff] x)
  1037  
  1038  // Don't extend before storing
  1039  (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) => (MOVLstore [off] {sym} ptr x mem)
  1040  (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) => (MOVWstore [off] {sym} ptr x mem)
  1041  (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) => (MOVBstore [off] {sym} ptr x mem)
  1042  (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) => (MOVLstore [off] {sym} ptr x mem)
  1043  (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) => (MOVWstore [off] {sym} ptr x mem)
  1044  (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) => (MOVBstore [off] {sym} ptr x mem)
  1045  
  1046  // fold constants into memory operations
  1047  // Note that this is not always a good idea because if not all the uses of
  1048  // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
  1049  // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
  1050  // Nevertheless, let's do it!
  1051  (MOV(Q|L|W|B|SS|SD|O)load  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1052      (MOV(Q|L|W|B|SS|SD|O)load  [off1+off2] {sym} ptr mem)
  1053  (MOV(Q|L|W|B|SS|SD|O)store  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) =>
  1054  	(MOV(Q|L|W|B|SS|SD|O)store  [off1+off2] {sym} ptr val mem)
  1055  (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
  1056  	(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {sym} base val mem)
  1057  ((ADD|SUB|AND|OR|XOR)Qload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1058  	((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {sym} val base mem)
  1059  ((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1060  	((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
  1061  (CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
  1062  	(CMP(Q|L|W|B)load [off1+off2] {sym} base val mem)
  1063  (CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
  1064  	(CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
  1065  
  1066  ((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1067  	((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
  1068  ((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1069  	((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
  1070  ((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
  1071  	((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
  1072  ((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
  1073  	((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
  1074  ((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
  1075  	((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {sym} base val mem)
  1076  ((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
  1077  	((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
  1078  
  1079  // Fold constants into stores.
  1080  (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validVal(c) =>
  1081  	(MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
  1082  (MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
  1083  	(MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
  1084  (MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
  1085  	(MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
  1086  (MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
  1087  	(MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
  1088  
  1089  // Fold address offsets into constant stores.
  1090  (MOV(Q|L|W|B|O)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd32(off) =>
  1091  	(MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
  1092  
  1093  // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
  1094  // what variables are being read/written by the ops.
  1095  (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  1096  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1097  	(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1098  (MOV(Q|L|W|B|SS|SD|O)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1099  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1100  	(MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1101  (MOV(Q|L|W|B|O)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) =>
  1102  	(MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
  1103  (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1104  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1105  	(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1106  ((ADD|SUB|AND|OR|XOR)Qload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  1107  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1108  	((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  1109  ((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  1110  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1111  	((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  1112  (CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1113  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1114  	(CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1115  (CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  1116  	&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
  1117  	(CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
  1118  
  1119  ((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  1120  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1121  	((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  1122  ((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  1123  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1124  	((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  1125  ((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  1126  	&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
  1127  	((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
  1128  ((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  1129  	&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
  1130  	((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
  1131  ((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1132  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1133  	((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1134  ((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1135  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1136  	((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1137  
  1138  // fold LEAQs together
  1139  (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1140        (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
  1141  
  1142  // LEAQ into LEAQ1
  1143  (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
  1144         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1145  
  1146  // LEAQ1 into LEAQ
  1147  (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1148         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1149  
  1150  // LEAQ into LEAQ[248]
  1151  (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
  1152         (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1153  (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
  1154         (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1155  (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
  1156         (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1157  
  1158  // LEAQ[248] into LEAQ
  1159  (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1160        (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1161  (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1162        (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1163  (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1164        (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1165  
  1166  // LEAQ[1248] into LEAQ[1248]. Only some such merges are possible.
  1167  (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1168        (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
  1169  (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1170        (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
  1171  (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil =>
  1172        (LEAQ4 [off1+2*off2] {sym1} x y)
  1173  (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil =>
  1174        (LEAQ8 [off1+4*off2] {sym1} x y)
  1175  // TODO: more?
  1176  
  1177  // Lower LEAQ2/4/8 when the offset is a constant
  1178  (LEAQ2 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*2) =>
  1179  	(LEAQ [off+int32(scale)*2] {sym} x)
  1180  (LEAQ4 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*4) =>
  1181  	(LEAQ [off+int32(scale)*4] {sym} x)
  1182  (LEAQ8 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*8) =>
  1183  	(LEAQ [off+int32(scale)*8] {sym} x)
  1184  
  1185  // Absorb InvertFlags into branches.
  1186  (LT (InvertFlags cmp) yes no) => (GT cmp yes no)
  1187  (GT (InvertFlags cmp) yes no) => (LT cmp yes no)
  1188  (LE (InvertFlags cmp) yes no) => (GE cmp yes no)
  1189  (GE (InvertFlags cmp) yes no) => (LE cmp yes no)
  1190  (ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
  1191  (UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
  1192  (ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
  1193  (UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
  1194  (EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
  1195  (NE (InvertFlags cmp) yes no) => (NE cmp yes no)
  1196  
  1197  // Constant comparisons.
  1198  (CMPQconst (MOVQconst [x]) [y]) && x==int64(y) => (FlagEQ)
  1199  (CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)<uint64(int64(y)) => (FlagLT_ULT)
  1200  (CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)>uint64(int64(y)) => (FlagLT_UGT)
  1201  (CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)<uint64(int64(y)) => (FlagGT_ULT)
  1202  (CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)>uint64(int64(y)) => (FlagGT_UGT)
  1203  (CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ)
  1204  (CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)<uint32(y) => (FlagLT_ULT)
  1205  (CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)>uint32(y) => (FlagLT_UGT)
  1206  (CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)<uint32(y) => (FlagGT_ULT)
  1207  (CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT)
  1208  (CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ)
  1209  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)<uint16(y) => (FlagLT_ULT)
  1210  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)>uint16(y) => (FlagLT_UGT)
  1211  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)<uint16(y) => (FlagGT_ULT)
  1212  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT)
  1213  (CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ)
  1214  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)<uint8(y) => (FlagLT_ULT)
  1215  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)>uint8(y) => (FlagLT_UGT)
  1216  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)<uint8(y) => (FlagGT_ULT)
  1217  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT)
  1218  
  1219  // CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts.
  1220  // In theory this applies to any of the simplifications above,
  1221  // but CMPQ is the only one I've actually seen occur.
  1222  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y => (FlagEQ)
  1223  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)<uint64(y) => (FlagLT_ULT)
  1224  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)>uint64(y) => (FlagLT_UGT)
  1225  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)<uint64(y) => (FlagGT_ULT)
  1226  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) => (FlagGT_UGT)
  1227  
  1228  // Other known comparisons.
  1229  (CMPQconst (MOVBQZX _) [c]) && 0xFF < c => (FlagLT_ULT)
  1230  (CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c => (FlagLT_ULT)
  1231  (CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) => (FlagLT_ULT)
  1232  (CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) => (FlagLT_ULT)
  1233  (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
  1234  (CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
  1235  (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
  1236  (CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT)
  1237  (CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m)  && int8(m)  < n => (FlagLT_ULT)
  1238  
  1239  // TESTQ c c sets flags like CMPQ c 0.
  1240  (TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ)
  1241  (TESTLconst [c] (MOVLconst [c])) && c == 0 => (FlagEQ)
  1242  (TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c < 0  => (FlagLT_UGT)
  1243  (TESTLconst [c] (MOVLconst [c])) && c < 0  => (FlagLT_UGT)
  1244  (TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c > 0  => (FlagGT_UGT)
  1245  (TESTLconst [c] (MOVLconst [c])) && c > 0  => (FlagGT_UGT)
  1246  
  1247  // TODO: DIVxU also.
  1248  
  1249  // Absorb flag constants into SBB ops.
  1250  (SBBQcarrymask (FlagEQ))     => (MOVQconst [0])
  1251  (SBBQcarrymask (FlagLT_ULT)) => (MOVQconst [-1])
  1252  (SBBQcarrymask (FlagLT_UGT)) => (MOVQconst [0])
  1253  (SBBQcarrymask (FlagGT_ULT)) => (MOVQconst [-1])
  1254  (SBBQcarrymask (FlagGT_UGT)) => (MOVQconst [0])
  1255  (SBBLcarrymask (FlagEQ))     => (MOVLconst [0])
  1256  (SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1])
  1257  (SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0])
  1258  (SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1])
  1259  (SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0])
  1260  
  1261  // Absorb flag constants into branches.
  1262  ((EQ|LE|GE|ULE|UGE) (FlagEQ) yes no)     => (First yes no)
  1263  ((NE|LT|GT|ULT|UGT) (FlagEQ) yes no)     => (First no yes)
  1264  ((NE|LT|LE|ULT|ULE) (FlagLT_ULT) yes no) => (First yes no)
  1265  ((EQ|GT|GE|UGT|UGE) (FlagLT_ULT) yes no) => (First no yes)
  1266  ((NE|LT|LE|UGT|UGE) (FlagLT_UGT) yes no) => (First yes no)
  1267  ((EQ|GT|GE|ULT|ULE) (FlagLT_UGT) yes no) => (First no yes)
  1268  ((NE|GT|GE|ULT|ULE) (FlagGT_ULT) yes no) => (First yes no)
  1269  ((EQ|LT|LE|UGT|UGE) (FlagGT_ULT) yes no) => (First no yes)
  1270  ((NE|GT|GE|UGT|UGE) (FlagGT_UGT) yes no) => (First yes no)
  1271  ((EQ|LT|LE|ULT|ULE) (FlagGT_UGT) yes no) => (First no yes)
  1272  
  1273  // Absorb flag constants into SETxx ops.
  1274  ((SETEQ|SETLE|SETGE|SETBE|SETAE) (FlagEQ))     => (MOVLconst [1])
  1275  ((SETNE|SETL|SETG|SETB|SETA)     (FlagEQ))     => (MOVLconst [0])
  1276  ((SETNE|SETL|SETLE|SETB|SETBE)   (FlagLT_ULT)) => (MOVLconst [1])
  1277  ((SETEQ|SETG|SETGE|SETA|SETAE)   (FlagLT_ULT)) => (MOVLconst [0])
  1278  ((SETNE|SETL|SETLE|SETA|SETAE)   (FlagLT_UGT)) => (MOVLconst [1])
  1279  ((SETEQ|SETG|SETGE|SETB|SETBE)   (FlagLT_UGT)) => (MOVLconst [0])
  1280  ((SETNE|SETG|SETGE|SETB|SETBE)   (FlagGT_ULT)) => (MOVLconst [1])
  1281  ((SETEQ|SETL|SETLE|SETA|SETAE)   (FlagGT_ULT)) => (MOVLconst [0])
  1282  ((SETNE|SETG|SETGE|SETA|SETAE)   (FlagGT_UGT)) => (MOVLconst [1])
  1283  ((SETEQ|SETL|SETLE|SETB|SETBE)   (FlagGT_UGT)) => (MOVLconst [0])
  1284  
  1285  (SETEQstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1286  (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1287  (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1288  (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1289  (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1290  
  1291  (SETNEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1292  (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1293  (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1294  (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1295  (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1296  
  1297  (SETLstore  [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1298  (SETLstore  [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1299  (SETLstore  [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1300  (SETLstore  [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1301  (SETLstore  [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1302  
  1303  (SETLEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1304  (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1305  (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1306  (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1307  (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1308  
  1309  (SETGstore  [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1310  (SETGstore  [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1311  (SETGstore  [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1312  (SETGstore  [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1313  (SETGstore  [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1314  
  1315  (SETGEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1316  (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1317  (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1318  (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1319  (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1320  
  1321  (SETBstore  [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1322  (SETBstore  [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1323  (SETBstore  [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1324  (SETBstore  [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1325  (SETBstore  [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1326  
  1327  (SETBEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1328  (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1329  (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1330  (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1331  (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1332  
  1333  (SETAstore  [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1334  (SETAstore  [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1335  (SETAstore  [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1336  (SETAstore  [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1337  (SETAstore  [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1338  
  1339  (SETAEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1340  (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1341  (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1342  (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1343  (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1344  
  1345  // Remove redundant *const ops
  1346  (ADDQconst [0] x)          => x
  1347  (ADDLconst [c] x) && c==0  => x
  1348  (SUBQconst [0] x)          => x
  1349  (SUBLconst [c] x) && c==0  => x
  1350  (ANDQconst [0] _)          => (MOVQconst [0])
  1351  (ANDLconst [c] _) && c==0  => (MOVLconst [0])
  1352  (ANDQconst [-1] x)         => x
  1353  (ANDLconst [c] x) && c==-1 => x
  1354  (ORQconst [0] x)           => x
  1355  (ORLconst [c] x)  && c==0  => x
  1356  (ORQconst [-1] _)          => (MOVQconst [-1])
  1357  (ORLconst [c] _)  && c==-1 => (MOVLconst [-1])
  1358  (XORQconst [0] x)          => x
  1359  (XORLconst [c] x) && c==0  => x
  1360  // TODO: since we got rid of the W/B versions, we might miss
  1361  // things like (ANDLconst [0x100] x) which were formerly
  1362  // (ANDBconst [0] x).  Probably doesn't happen very often.
  1363  // If we cared, we might do:
  1364  //  (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
  1365  
  1366  // Remove redundant ops
  1367  // Not in generic rules, because they may appear after lowering e. g. Slicemask
  1368  (NEG(Q|L) (NEG(Q|L) x)) => x
  1369  (NEG(Q|L) s:(SUB(Q|L) x y)) && s.Uses == 1 => (SUB(Q|L) y x)
  1370  
  1371  // Convert constant subtracts to constant adds
  1372  (SUBQconst [c] x) && c != -(1<<31) => (ADDQconst [-c] x)
  1373  (SUBLconst [c] x) => (ADDLconst [-c] x)
  1374  
  1375  // generic constant folding
  1376  // TODO: more of this
  1377  (ADDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)+d])
  1378  (ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d])
  1379  (ADDQconst [c] (ADDQconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDQconst [c+d] x)
  1380  (ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x)
  1381  (SUBQconst (MOVQconst [d]) [c]) => (MOVQconst [d-int64(c)])
  1382  (SUBQconst (SUBQconst x [d]) [c]) && is32Bit(int64(-c)-int64(d)) => (ADDQconst [-c-d] x)
  1383  (SARQconst [c] (MOVQconst [d])) => (MOVQconst [d>>uint64(c)])
  1384  (SARLconst [c] (MOVQconst [d])) => (MOVQconst [int64(int32(d))>>uint64(c)])
  1385  (SARWconst [c] (MOVQconst [d])) => (MOVQconst [int64(int16(d))>>uint64(c)])
  1386  (SARBconst [c] (MOVQconst [d])) => (MOVQconst [int64(int8(d))>>uint64(c)])
  1387  (NEGQ (MOVQconst [c])) => (MOVQconst [-c])
  1388  (NEGL (MOVLconst [c])) => (MOVLconst [-c])
  1389  (MULQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)*d])
  1390  (MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d])
  1391  (ANDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)&d])
  1392  (ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d])
  1393  (ORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)|d])
  1394  (ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d])
  1395  (XORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)^d])
  1396  (XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d])
  1397  (NOTQ (MOVQconst [c])) => (MOVQconst [^c])
  1398  (NOTL (MOVLconst [c])) => (MOVLconst [^c])
  1399  (BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1<<uint32(c))])
  1400  (BTRQconst [c] (MOVQconst [d])) => (MOVQconst [d&^(1<<uint32(c))])
  1401  (BTCQconst [c] (MOVQconst [d])) => (MOVQconst [d^(1<<uint32(c))])
  1402  
  1403  // If c or d doesn't fit into 32 bits, then we can't construct ORQconst,
  1404  // but we can still constant-fold.
  1405  // In theory this applies to any of the simplifications above,
  1406  // but ORQ is the only one I've actually seen occur.
  1407  (ORQ (MOVQconst [c]) (MOVQconst [d])) => (MOVQconst [c|d])
  1408  
  1409  // generic simplifications
  1410  // TODO: more of this
  1411  (ADDQ x (NEGQ y)) => (SUBQ x y)
  1412  (ADDL x (NEGL y)) => (SUBL x y)
  1413  (SUBQ x x) => (MOVQconst [0])
  1414  (SUBL x x) => (MOVLconst [0])
  1415  (ANDQ x x) => x
  1416  (ANDL x x) => x
  1417  (ORQ x x)  => x
  1418  (ORL x x)  => x
  1419  (XORQ x x) => (MOVQconst [0])
  1420  (XORL x x) => (MOVLconst [0])
  1421  
  1422  (SHLLconst [d] (MOVLconst [c])) => (MOVLconst [c << uint64(d)])
  1423  (SHLQconst [d] (MOVQconst [c])) => (MOVQconst [c << uint64(d)])
  1424  (SHLQconst [d] (MOVLconst [c])) => (MOVQconst [int64(c) << uint64(d)])
  1425  
  1426  // Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range.
  1427  (NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) => (ADDQconst [-c] x)
  1428  (MULQconst [c] (NEGQ x)) && c != -(1<<31) => (MULQconst [-c] x)
  1429  
  1430  // checking AND against 0.
  1431  (CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y)
  1432  (CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTL x y)
  1433  (CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTW x y)
  1434  (CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTB x y)
  1435  (CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 => (TESTQconst [c] x)
  1436  (CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTLconst [c] x)
  1437  (CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTWconst [int16(c)] x)
  1438  (CMPBconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTBconst [int8(c)] x)
  1439  
  1440  // Convert TESTx to TESTxconst if possible.
  1441  (TESTQ (MOVQconst [c]) x) && is32Bit(c) => (TESTQconst [int32(c)] x)
  1442  (TESTL (MOVLconst [c]) x) => (TESTLconst [c] x)
  1443  (TESTW (MOVLconst [c]) x) => (TESTWconst [int16(c)] x)
  1444  (TESTB (MOVLconst [c]) x) => (TESTBconst [int8(c)] x)
  1445  
  1446  // TEST %reg,%reg is shorter than CMP
  1447  (CMPQconst x [0]) => (TESTQ x x)
  1448  (CMPLconst x [0]) => (TESTL x x)
  1449  (CMPWconst x [0]) => (TESTW x x)
  1450  (CMPBconst x [0]) => (TESTB x x)
  1451  (TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst => (TESTQ x x)
  1452  (TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTL x x)
  1453  (TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTW x x)
  1454  (TESTBconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTB x x)
  1455  
  1456  // Convert LEAQ1 back to ADDQ if we can
  1457  (LEAQ1 [0] x y) && v.Aux == nil => (ADDQ x y)
  1458  
  1459  (MOVQstoreconst [c] {s} p1 x:(MOVQstoreconst [a] {s} p0 mem))
  1460    && x.Uses == 1
  1461    && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off()))
  1462    && a.Val() == 0
  1463    && c.Val() == 0
  1464    && setPos(v, x.Pos)
  1465    && clobber(x)
  1466    => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
  1467  (MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem))
  1468    && x.Uses == 1
  1469    && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off()))
  1470    && a.Val() == 0
  1471    && c.Val() == 0
  1472    && setPos(v, x.Pos)
  1473    && clobber(x)
  1474    => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
  1475  
  1476  // Merge load and op
  1477  // TODO: add indexed variants?
  1478  ((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem)
  1479  ((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem)
  1480  ((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
  1481  ((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
  1482  (MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
  1483  (MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
  1484  	((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
  1485  (MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
  1486  (MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
  1487  	((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
  1488  (MOVQstore {sym} [off] ptr x:(BT(S|R|C)Qconst [c] l:(MOVQload {sym} [off] ptr mem)) mem) && x.Uses == 1 && l.Uses == 1 && clobber(x, l) =>
  1489  	(BT(S|R|C)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
  1490  
  1491  // Merge ADDQconst and LEAQ into atomic loads.
  1492  (MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1493  	(MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem)
  1494  (MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1495  	(MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
  1496  
  1497  // Merge ADDQconst and LEAQ into atomic stores.
  1498  (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1499  	(XCHGQ [off1+off2] {sym} val ptr mem)
  1500  (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
  1501  	(XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
  1502  (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1503  	(XCHGL [off1+off2] {sym} val ptr mem)
  1504  (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
  1505  	(XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
  1506  
  1507  // Merge ADDQconst into atomic adds.
  1508  // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
  1509  (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1510  	(XADDQlock [off1+off2] {sym} val ptr mem)
  1511  (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1512  	(XADDLlock [off1+off2] {sym} val ptr mem)
  1513  
  1514  // Merge ADDQconst into atomic compare and swaps.
  1515  // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
  1516  (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
  1517  	(CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
  1518  (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
  1519  	(CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
  1520  
  1521  // We don't need the conditional move if we know the arg of BSF is not zero.
  1522  (CMOVQEQ x _ (Select1 (BS(F|R)Q (ORQconst [c] _)))) && c != 0 => x
  1523  // Extension is unnecessary for trailing zeros.
  1524  (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) => (BSFQ (ORQconst <t> [1<<8] x))
  1525  (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) => (BSFQ (ORQconst <t> [1<<16] x))
  1526  
  1527  // Redundant sign/zero extensions
  1528  // Note: see issue 21963. We have to make sure we use the right type on
  1529  // the resulting extension (the outer type, not the inner type).
  1530  (MOVLQSX (MOVLQSX x)) => (MOVLQSX x)
  1531  (MOVLQSX (MOVWQSX x)) => (MOVWQSX x)
  1532  (MOVLQSX (MOVBQSX x)) => (MOVBQSX x)
  1533  (MOVWQSX (MOVWQSX x)) => (MOVWQSX x)
  1534  (MOVWQSX (MOVBQSX x)) => (MOVBQSX x)
  1535  (MOVBQSX (MOVBQSX x)) => (MOVBQSX x)
  1536  (MOVLQZX (MOVLQZX x)) => (MOVLQZX x)
  1537  (MOVLQZX (MOVWQZX x)) => (MOVWQZX x)
  1538  (MOVLQZX (MOVBQZX x)) => (MOVBQZX x)
  1539  (MOVWQZX (MOVWQZX x)) => (MOVWQZX x)
  1540  (MOVWQZX (MOVBQZX x)) => (MOVBQZX x)
  1541  (MOVBQZX (MOVBQZX x)) => (MOVBQZX x)
  1542  
  1543  (MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
  1544  	&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
  1545  	((ADD|AND|OR|XOR)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
  1546  (MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
  1547  	&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
  1548  	((ADD|AND|OR|XOR)Lconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
  1549  
  1550  // float <-> int register moves, with no conversion.
  1551  // These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
  1552  (MOVQload  [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) => (MOVQf2i val)
  1553  (MOVLload  [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) => (MOVLf2i val)
  1554  (MOVSDload [off] {sym} ptr (MOVQstore  [off] {sym} ptr val _)) => (MOVQi2f val)
  1555  (MOVSSload [off] {sym} ptr (MOVLstore  [off] {sym} ptr val _)) => (MOVLi2f val)
  1556  
  1557  // Other load-like ops.
  1558  (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ADDQ x (MOVQf2i y))
  1559  (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ADDL x (MOVLf2i y))
  1560  (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (SUBQ x (MOVQf2i y))
  1561  (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (SUBL x (MOVLf2i y))
  1562  (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ANDQ x (MOVQf2i y))
  1563  (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ANDL x (MOVLf2i y))
  1564  ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => ( ORQ x (MOVQf2i y))
  1565  ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => ( ORL x (MOVLf2i y))
  1566  (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (XORQ x (MOVQf2i y))
  1567  (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (XORL x (MOVLf2i y))
  1568  
  1569  (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (ADDSD x (MOVQi2f y))
  1570  (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (ADDSS x (MOVLi2f y))
  1571  (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (SUBSD x (MOVQi2f y))
  1572  (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (SUBSS x (MOVLi2f y))
  1573  (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (MULSD x (MOVQi2f y))
  1574  (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (MULSS x (MOVLi2f y))
  1575  
  1576  // Detect FMA
  1577  (ADDS(S|D) (MULS(S|D) x y) z) && buildcfg.GOAMD64 >= 3 && z.Block.Func.useFMA(v) => (VFMADD231S(S|D) z x y)
  1578  
  1579  // Redirect stores to use the other register set.
  1580  (MOVQstore  [off] {sym} ptr (MOVQf2i val) mem) => (MOVSDstore [off] {sym} ptr val mem)
  1581  (MOVLstore  [off] {sym} ptr (MOVLf2i val) mem) => (MOVSSstore [off] {sym} ptr val mem)
  1582  (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) => (MOVQstore  [off] {sym} ptr val mem)
  1583  (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) => (MOVLstore  [off] {sym} ptr val mem)
  1584  
  1585  (MOVSDstore [off] {sym} ptr (MOVSDconst [f]) mem) && f == f => (MOVQstore [off] {sym} ptr (MOVQconst [int64(math.Float64bits(f))]) mem)
  1586  (MOVSSstore [off] {sym} ptr (MOVSSconst [f]) mem) && f == f => (MOVLstore [off] {sym} ptr (MOVLconst [int32(math.Float32bits(f))]) mem)
  1587  
  1588  // Load args directly into the register class where it will be used.
  1589  // We do this by just modifying the type of the Arg.
  1590  (MOVQf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
  1591  (MOVLf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
  1592  (MOVQi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
  1593  (MOVLi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
  1594  
  1595  // LEAQ is rematerializeable, so this helps to avoid register spill.
  1596  // See issue 22947 for details
  1597  (ADD(Q|L)const [off] x:(SP)) => (LEA(Q|L) [off] x)
  1598  
  1599  // HMULx is commutative, but its first argument must go in AX.
  1600  // If possible, put a rematerializeable value in the first argument slot,
  1601  // to reduce the odds that another value will be have to spilled
  1602  // specifically to free up AX.
  1603  (HMUL(Q|L)  x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)  y x)
  1604  (HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)U y x)
  1605  
  1606  // Fold loads into compares
  1607  // Note: these may be undone by the flagalloc pass.
  1608  (CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(Q|L|W|B)load {sym} [off] ptr x mem)
  1609  (CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem))
  1610  
  1611  (CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c])
  1612  	&& l.Uses == 1
  1613  	&& clobber(l) =>
  1614  @l.Block (CMP(Q|L)constload {sym} [makeValAndOff(c,off)] ptr mem)
  1615  (CMP(W|B)const l:(MOV(W|B)load {sym} [off] ptr mem) [c])
  1616  	&& l.Uses == 1
  1617  	&& clobber(l) =>
  1618  @l.Block (CMP(W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem)
  1619  
  1620  (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validVal(c) => (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
  1621  (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
  1622  (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
  1623  (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
  1624  
  1625  (TEST(Q|L|W|B)  l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2)
  1626          && l == l2
  1627  	&& l.Uses == 2
  1628  	&& clobber(l) =>
  1629    @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0, off)] ptr mem)
  1630  
  1631  // Convert ANDload to MOVload when we can do the AND in a containing TEST op.
  1632  // Only do when it's within the same block, so we don't have flags live across basic block boundaries.
  1633  // See issue 44228.
  1634  (TEST(Q|L) a:(AND(Q|L)load [off] {sym} x ptr mem) a) && a.Uses == 2 && a.Block == v.Block && clobber(a) => (TEST(Q|L) (MOV(Q|L)load <a.Type> [off] {sym} ptr mem) x)
  1635  
  1636  (MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
  1637  (MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  1638  (MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  1639  (MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  1640  (MOVBQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int8(read8(sym, int64(off))))])
  1641  (MOVWQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
  1642  (MOVLQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
  1643  
  1644  
  1645  (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) =>
  1646    (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))])
  1647      (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
  1648  
  1649  // Arch-specific inlining for small or disjoint runtime.memmove
  1650  // Match post-lowering calls, memory version.
  1651  (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem)))))
  1652  	&& sc.Val64() >= 0
  1653  	&& isSameCall(sym, "runtime.memmove")
  1654  	&& s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
  1655  	&& isInlinableMemmove(dst, src, sc.Val64(), config)
  1656  	&& clobber(s1, s2, s3, call)
  1657  	=> (Move [sc.Val64()] dst src mem)
  1658  
  1659  // Match post-lowering calls, register version.
  1660  (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem))
  1661  	&& sz >= 0
  1662  	&& isSameCall(sym, "runtime.memmove")
  1663  	&& call.Uses == 1
  1664  	&& isInlinableMemmove(dst, src, sz, config)
  1665  	&& clobber(call)
  1666  	=> (Move [sz] dst src mem)
  1667  
  1668  // Prefetch instructions
  1669  (PrefetchCache ...)   => (PrefetchT0 ...)
  1670  (PrefetchCacheStreamed ...) => (PrefetchNTA ...)
  1671  
  1672  // CPUID feature: BMI1.
  1673  (AND(Q|L) x (NOT(Q|L) y))               && buildcfg.GOAMD64 >= 3 => (ANDN(Q|L) x y)
  1674  (AND(Q|L) x (NEG(Q|L) x))               && buildcfg.GOAMD64 >= 3 => (BLSI(Q|L) x)
  1675  (XOR(Q|L) x (ADD(Q|L)const [-1] x))     && buildcfg.GOAMD64 >= 3 => (BLSMSK(Q|L) x)
  1676  (AND(Q|L) <t> x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (Select0 <t> (BLSR(Q|L) x))
  1677  // eliminate TEST instruction in classical "isPowerOfTwo" check
  1678  (SETEQ       (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s))        => (SETEQ       (Select1 <types.TypeFlags> blsr))
  1679  (CMOVQEQ x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s))        => (CMOVQEQ x y (Select1 <types.TypeFlags> blsr))
  1680  (CMOVLEQ x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s))        => (CMOVLEQ x y (Select1 <types.TypeFlags> blsr))
  1681  (EQ          (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s) yes no) => (EQ          (Select1 <types.TypeFlags> blsr) yes no)
  1682  (SETNE       (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s))        => (SETNE       (Select1 <types.TypeFlags> blsr))
  1683  (CMOVQNE x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s))        => (CMOVQNE x y (Select1 <types.TypeFlags> blsr))
  1684  (CMOVLNE x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s))        => (CMOVLNE x y (Select1 <types.TypeFlags> blsr))
  1685  (NE          (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s) yes no) => (NE          (Select1 <types.TypeFlags> blsr) yes no)
  1686  
  1687  (BSWAP(Q|L) (BSWAP(Q|L) p)) => p
  1688  
  1689  // CPUID feature: MOVBE.
  1690  (MOV(Q|L)store   [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBE(Q|L)store [i] {s} p w mem)
  1691  (MOVBE(Q|L)store [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1                          => (MOV(Q|L)store   [i] {s} p w mem)
  1692  (BSWAP(Q|L) x:(MOV(Q|L)load   [i] {s} p mem))  && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => @x.Block (MOVBE(Q|L)load [i] {s} p mem)
  1693  (BSWAP(Q|L) x:(MOVBE(Q|L)load [i] {s} p mem))  && x.Uses == 1                          => @x.Block (MOV(Q|L)load   [i] {s} p mem)
  1694  (MOVWstore [i] {s} p x:(ROLWconst [8] w) mem)   && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBEWstore [i] {s} p w mem)
  1695  (MOVBEWstore [i] {s} p x:(ROLWconst [8] w) mem) && x.Uses == 1 => (MOVWstore [i] {s} p w mem)
  1696  
  1697  (SAR(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SARX(Q|L)load [off] {sym} ptr x mem)
  1698  (SHL(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHLX(Q|L)load [off] {sym} ptr x mem)
  1699  (SHR(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHRX(Q|L)load [off] {sym} ptr x mem)
  1700  
  1701  ((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVQconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
  1702  ((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
  1703  ((SHL|SHR|SAR)XLload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Lconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
  1704  
  1705  // Convert atomic logical operations to easier ones if we don't use the result.
  1706  (Select1 a:(LoweredAtomic(And64|And32|Or64|Or32) ptr val mem)) && a.Uses == 1 && clobber(a) => ((ANDQ|ANDL|ORQ|ORL)lock ptr val mem)
  1707  

View as plain text