1
2
3 package ssa
4
5 import "internal/buildcfg"
6 import "math"
7 import "cmd/internal/obj"
8 import "cmd/compile/internal/types"
9
10 func rewriteValueAMD64(v *Value) bool {
11 switch v.Op {
12 case OpAMD64ADCQ:
13 return rewriteValueAMD64_OpAMD64ADCQ(v)
14 case OpAMD64ADCQconst:
15 return rewriteValueAMD64_OpAMD64ADCQconst(v)
16 case OpAMD64ADDL:
17 return rewriteValueAMD64_OpAMD64ADDL(v)
18 case OpAMD64ADDLconst:
19 return rewriteValueAMD64_OpAMD64ADDLconst(v)
20 case OpAMD64ADDLconstmodify:
21 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
22 case OpAMD64ADDLload:
23 return rewriteValueAMD64_OpAMD64ADDLload(v)
24 case OpAMD64ADDLmodify:
25 return rewriteValueAMD64_OpAMD64ADDLmodify(v)
26 case OpAMD64ADDQ:
27 return rewriteValueAMD64_OpAMD64ADDQ(v)
28 case OpAMD64ADDQcarry:
29 return rewriteValueAMD64_OpAMD64ADDQcarry(v)
30 case OpAMD64ADDQconst:
31 return rewriteValueAMD64_OpAMD64ADDQconst(v)
32 case OpAMD64ADDQconstmodify:
33 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
34 case OpAMD64ADDQload:
35 return rewriteValueAMD64_OpAMD64ADDQload(v)
36 case OpAMD64ADDQmodify:
37 return rewriteValueAMD64_OpAMD64ADDQmodify(v)
38 case OpAMD64ADDSD:
39 return rewriteValueAMD64_OpAMD64ADDSD(v)
40 case OpAMD64ADDSDload:
41 return rewriteValueAMD64_OpAMD64ADDSDload(v)
42 case OpAMD64ADDSS:
43 return rewriteValueAMD64_OpAMD64ADDSS(v)
44 case OpAMD64ADDSSload:
45 return rewriteValueAMD64_OpAMD64ADDSSload(v)
46 case OpAMD64ANDL:
47 return rewriteValueAMD64_OpAMD64ANDL(v)
48 case OpAMD64ANDLconst:
49 return rewriteValueAMD64_OpAMD64ANDLconst(v)
50 case OpAMD64ANDLconstmodify:
51 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
52 case OpAMD64ANDLload:
53 return rewriteValueAMD64_OpAMD64ANDLload(v)
54 case OpAMD64ANDLmodify:
55 return rewriteValueAMD64_OpAMD64ANDLmodify(v)
56 case OpAMD64ANDNL:
57 return rewriteValueAMD64_OpAMD64ANDNL(v)
58 case OpAMD64ANDNQ:
59 return rewriteValueAMD64_OpAMD64ANDNQ(v)
60 case OpAMD64ANDQ:
61 return rewriteValueAMD64_OpAMD64ANDQ(v)
62 case OpAMD64ANDQconst:
63 return rewriteValueAMD64_OpAMD64ANDQconst(v)
64 case OpAMD64ANDQconstmodify:
65 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
66 case OpAMD64ANDQload:
67 return rewriteValueAMD64_OpAMD64ANDQload(v)
68 case OpAMD64ANDQmodify:
69 return rewriteValueAMD64_OpAMD64ANDQmodify(v)
70 case OpAMD64BSFQ:
71 return rewriteValueAMD64_OpAMD64BSFQ(v)
72 case OpAMD64BSWAPL:
73 return rewriteValueAMD64_OpAMD64BSWAPL(v)
74 case OpAMD64BSWAPQ:
75 return rewriteValueAMD64_OpAMD64BSWAPQ(v)
76 case OpAMD64BTCQconst:
77 return rewriteValueAMD64_OpAMD64BTCQconst(v)
78 case OpAMD64BTLconst:
79 return rewriteValueAMD64_OpAMD64BTLconst(v)
80 case OpAMD64BTQconst:
81 return rewriteValueAMD64_OpAMD64BTQconst(v)
82 case OpAMD64BTRQconst:
83 return rewriteValueAMD64_OpAMD64BTRQconst(v)
84 case OpAMD64BTSQconst:
85 return rewriteValueAMD64_OpAMD64BTSQconst(v)
86 case OpAMD64CMOVLCC:
87 return rewriteValueAMD64_OpAMD64CMOVLCC(v)
88 case OpAMD64CMOVLCS:
89 return rewriteValueAMD64_OpAMD64CMOVLCS(v)
90 case OpAMD64CMOVLEQ:
91 return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
92 case OpAMD64CMOVLGE:
93 return rewriteValueAMD64_OpAMD64CMOVLGE(v)
94 case OpAMD64CMOVLGT:
95 return rewriteValueAMD64_OpAMD64CMOVLGT(v)
96 case OpAMD64CMOVLHI:
97 return rewriteValueAMD64_OpAMD64CMOVLHI(v)
98 case OpAMD64CMOVLLE:
99 return rewriteValueAMD64_OpAMD64CMOVLLE(v)
100 case OpAMD64CMOVLLS:
101 return rewriteValueAMD64_OpAMD64CMOVLLS(v)
102 case OpAMD64CMOVLLT:
103 return rewriteValueAMD64_OpAMD64CMOVLLT(v)
104 case OpAMD64CMOVLNE:
105 return rewriteValueAMD64_OpAMD64CMOVLNE(v)
106 case OpAMD64CMOVQCC:
107 return rewriteValueAMD64_OpAMD64CMOVQCC(v)
108 case OpAMD64CMOVQCS:
109 return rewriteValueAMD64_OpAMD64CMOVQCS(v)
110 case OpAMD64CMOVQEQ:
111 return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
112 case OpAMD64CMOVQGE:
113 return rewriteValueAMD64_OpAMD64CMOVQGE(v)
114 case OpAMD64CMOVQGT:
115 return rewriteValueAMD64_OpAMD64CMOVQGT(v)
116 case OpAMD64CMOVQHI:
117 return rewriteValueAMD64_OpAMD64CMOVQHI(v)
118 case OpAMD64CMOVQLE:
119 return rewriteValueAMD64_OpAMD64CMOVQLE(v)
120 case OpAMD64CMOVQLS:
121 return rewriteValueAMD64_OpAMD64CMOVQLS(v)
122 case OpAMD64CMOVQLT:
123 return rewriteValueAMD64_OpAMD64CMOVQLT(v)
124 case OpAMD64CMOVQNE:
125 return rewriteValueAMD64_OpAMD64CMOVQNE(v)
126 case OpAMD64CMOVWCC:
127 return rewriteValueAMD64_OpAMD64CMOVWCC(v)
128 case OpAMD64CMOVWCS:
129 return rewriteValueAMD64_OpAMD64CMOVWCS(v)
130 case OpAMD64CMOVWEQ:
131 return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
132 case OpAMD64CMOVWGE:
133 return rewriteValueAMD64_OpAMD64CMOVWGE(v)
134 case OpAMD64CMOVWGT:
135 return rewriteValueAMD64_OpAMD64CMOVWGT(v)
136 case OpAMD64CMOVWHI:
137 return rewriteValueAMD64_OpAMD64CMOVWHI(v)
138 case OpAMD64CMOVWLE:
139 return rewriteValueAMD64_OpAMD64CMOVWLE(v)
140 case OpAMD64CMOVWLS:
141 return rewriteValueAMD64_OpAMD64CMOVWLS(v)
142 case OpAMD64CMOVWLT:
143 return rewriteValueAMD64_OpAMD64CMOVWLT(v)
144 case OpAMD64CMOVWNE:
145 return rewriteValueAMD64_OpAMD64CMOVWNE(v)
146 case OpAMD64CMPB:
147 return rewriteValueAMD64_OpAMD64CMPB(v)
148 case OpAMD64CMPBconst:
149 return rewriteValueAMD64_OpAMD64CMPBconst(v)
150 case OpAMD64CMPBconstload:
151 return rewriteValueAMD64_OpAMD64CMPBconstload(v)
152 case OpAMD64CMPBload:
153 return rewriteValueAMD64_OpAMD64CMPBload(v)
154 case OpAMD64CMPL:
155 return rewriteValueAMD64_OpAMD64CMPL(v)
156 case OpAMD64CMPLconst:
157 return rewriteValueAMD64_OpAMD64CMPLconst(v)
158 case OpAMD64CMPLconstload:
159 return rewriteValueAMD64_OpAMD64CMPLconstload(v)
160 case OpAMD64CMPLload:
161 return rewriteValueAMD64_OpAMD64CMPLload(v)
162 case OpAMD64CMPQ:
163 return rewriteValueAMD64_OpAMD64CMPQ(v)
164 case OpAMD64CMPQconst:
165 return rewriteValueAMD64_OpAMD64CMPQconst(v)
166 case OpAMD64CMPQconstload:
167 return rewriteValueAMD64_OpAMD64CMPQconstload(v)
168 case OpAMD64CMPQload:
169 return rewriteValueAMD64_OpAMD64CMPQload(v)
170 case OpAMD64CMPW:
171 return rewriteValueAMD64_OpAMD64CMPW(v)
172 case OpAMD64CMPWconst:
173 return rewriteValueAMD64_OpAMD64CMPWconst(v)
174 case OpAMD64CMPWconstload:
175 return rewriteValueAMD64_OpAMD64CMPWconstload(v)
176 case OpAMD64CMPWload:
177 return rewriteValueAMD64_OpAMD64CMPWload(v)
178 case OpAMD64CMPXCHGLlock:
179 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
180 case OpAMD64CMPXCHGQlock:
181 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
182 case OpAMD64DIVSD:
183 return rewriteValueAMD64_OpAMD64DIVSD(v)
184 case OpAMD64DIVSDload:
185 return rewriteValueAMD64_OpAMD64DIVSDload(v)
186 case OpAMD64DIVSS:
187 return rewriteValueAMD64_OpAMD64DIVSS(v)
188 case OpAMD64DIVSSload:
189 return rewriteValueAMD64_OpAMD64DIVSSload(v)
190 case OpAMD64HMULL:
191 return rewriteValueAMD64_OpAMD64HMULL(v)
192 case OpAMD64HMULLU:
193 return rewriteValueAMD64_OpAMD64HMULLU(v)
194 case OpAMD64HMULQ:
195 return rewriteValueAMD64_OpAMD64HMULQ(v)
196 case OpAMD64HMULQU:
197 return rewriteValueAMD64_OpAMD64HMULQU(v)
198 case OpAMD64LEAL:
199 return rewriteValueAMD64_OpAMD64LEAL(v)
200 case OpAMD64LEAL1:
201 return rewriteValueAMD64_OpAMD64LEAL1(v)
202 case OpAMD64LEAL2:
203 return rewriteValueAMD64_OpAMD64LEAL2(v)
204 case OpAMD64LEAL4:
205 return rewriteValueAMD64_OpAMD64LEAL4(v)
206 case OpAMD64LEAL8:
207 return rewriteValueAMD64_OpAMD64LEAL8(v)
208 case OpAMD64LEAQ:
209 return rewriteValueAMD64_OpAMD64LEAQ(v)
210 case OpAMD64LEAQ1:
211 return rewriteValueAMD64_OpAMD64LEAQ1(v)
212 case OpAMD64LEAQ2:
213 return rewriteValueAMD64_OpAMD64LEAQ2(v)
214 case OpAMD64LEAQ4:
215 return rewriteValueAMD64_OpAMD64LEAQ4(v)
216 case OpAMD64LEAQ8:
217 return rewriteValueAMD64_OpAMD64LEAQ8(v)
218 case OpAMD64MOVBELstore:
219 return rewriteValueAMD64_OpAMD64MOVBELstore(v)
220 case OpAMD64MOVBEQstore:
221 return rewriteValueAMD64_OpAMD64MOVBEQstore(v)
222 case OpAMD64MOVBEWstore:
223 return rewriteValueAMD64_OpAMD64MOVBEWstore(v)
224 case OpAMD64MOVBQSX:
225 return rewriteValueAMD64_OpAMD64MOVBQSX(v)
226 case OpAMD64MOVBQSXload:
227 return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
228 case OpAMD64MOVBQZX:
229 return rewriteValueAMD64_OpAMD64MOVBQZX(v)
230 case OpAMD64MOVBatomicload:
231 return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
232 case OpAMD64MOVBload:
233 return rewriteValueAMD64_OpAMD64MOVBload(v)
234 case OpAMD64MOVBstore:
235 return rewriteValueAMD64_OpAMD64MOVBstore(v)
236 case OpAMD64MOVBstoreconst:
237 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
238 case OpAMD64MOVLQSX:
239 return rewriteValueAMD64_OpAMD64MOVLQSX(v)
240 case OpAMD64MOVLQSXload:
241 return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
242 case OpAMD64MOVLQZX:
243 return rewriteValueAMD64_OpAMD64MOVLQZX(v)
244 case OpAMD64MOVLatomicload:
245 return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
246 case OpAMD64MOVLf2i:
247 return rewriteValueAMD64_OpAMD64MOVLf2i(v)
248 case OpAMD64MOVLi2f:
249 return rewriteValueAMD64_OpAMD64MOVLi2f(v)
250 case OpAMD64MOVLload:
251 return rewriteValueAMD64_OpAMD64MOVLload(v)
252 case OpAMD64MOVLstore:
253 return rewriteValueAMD64_OpAMD64MOVLstore(v)
254 case OpAMD64MOVLstoreconst:
255 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
256 case OpAMD64MOVOload:
257 return rewriteValueAMD64_OpAMD64MOVOload(v)
258 case OpAMD64MOVOstore:
259 return rewriteValueAMD64_OpAMD64MOVOstore(v)
260 case OpAMD64MOVOstoreconst:
261 return rewriteValueAMD64_OpAMD64MOVOstoreconst(v)
262 case OpAMD64MOVQatomicload:
263 return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
264 case OpAMD64MOVQf2i:
265 return rewriteValueAMD64_OpAMD64MOVQf2i(v)
266 case OpAMD64MOVQi2f:
267 return rewriteValueAMD64_OpAMD64MOVQi2f(v)
268 case OpAMD64MOVQload:
269 return rewriteValueAMD64_OpAMD64MOVQload(v)
270 case OpAMD64MOVQstore:
271 return rewriteValueAMD64_OpAMD64MOVQstore(v)
272 case OpAMD64MOVQstoreconst:
273 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
274 case OpAMD64MOVSDload:
275 return rewriteValueAMD64_OpAMD64MOVSDload(v)
276 case OpAMD64MOVSDstore:
277 return rewriteValueAMD64_OpAMD64MOVSDstore(v)
278 case OpAMD64MOVSSload:
279 return rewriteValueAMD64_OpAMD64MOVSSload(v)
280 case OpAMD64MOVSSstore:
281 return rewriteValueAMD64_OpAMD64MOVSSstore(v)
282 case OpAMD64MOVWQSX:
283 return rewriteValueAMD64_OpAMD64MOVWQSX(v)
284 case OpAMD64MOVWQSXload:
285 return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
286 case OpAMD64MOVWQZX:
287 return rewriteValueAMD64_OpAMD64MOVWQZX(v)
288 case OpAMD64MOVWload:
289 return rewriteValueAMD64_OpAMD64MOVWload(v)
290 case OpAMD64MOVWstore:
291 return rewriteValueAMD64_OpAMD64MOVWstore(v)
292 case OpAMD64MOVWstoreconst:
293 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
294 case OpAMD64MULL:
295 return rewriteValueAMD64_OpAMD64MULL(v)
296 case OpAMD64MULLconst:
297 return rewriteValueAMD64_OpAMD64MULLconst(v)
298 case OpAMD64MULQ:
299 return rewriteValueAMD64_OpAMD64MULQ(v)
300 case OpAMD64MULQconst:
301 return rewriteValueAMD64_OpAMD64MULQconst(v)
302 case OpAMD64MULSD:
303 return rewriteValueAMD64_OpAMD64MULSD(v)
304 case OpAMD64MULSDload:
305 return rewriteValueAMD64_OpAMD64MULSDload(v)
306 case OpAMD64MULSS:
307 return rewriteValueAMD64_OpAMD64MULSS(v)
308 case OpAMD64MULSSload:
309 return rewriteValueAMD64_OpAMD64MULSSload(v)
310 case OpAMD64NEGL:
311 return rewriteValueAMD64_OpAMD64NEGL(v)
312 case OpAMD64NEGQ:
313 return rewriteValueAMD64_OpAMD64NEGQ(v)
314 case OpAMD64NOTL:
315 return rewriteValueAMD64_OpAMD64NOTL(v)
316 case OpAMD64NOTQ:
317 return rewriteValueAMD64_OpAMD64NOTQ(v)
318 case OpAMD64ORL:
319 return rewriteValueAMD64_OpAMD64ORL(v)
320 case OpAMD64ORLconst:
321 return rewriteValueAMD64_OpAMD64ORLconst(v)
322 case OpAMD64ORLconstmodify:
323 return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
324 case OpAMD64ORLload:
325 return rewriteValueAMD64_OpAMD64ORLload(v)
326 case OpAMD64ORLmodify:
327 return rewriteValueAMD64_OpAMD64ORLmodify(v)
328 case OpAMD64ORQ:
329 return rewriteValueAMD64_OpAMD64ORQ(v)
330 case OpAMD64ORQconst:
331 return rewriteValueAMD64_OpAMD64ORQconst(v)
332 case OpAMD64ORQconstmodify:
333 return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
334 case OpAMD64ORQload:
335 return rewriteValueAMD64_OpAMD64ORQload(v)
336 case OpAMD64ORQmodify:
337 return rewriteValueAMD64_OpAMD64ORQmodify(v)
338 case OpAMD64ROLB:
339 return rewriteValueAMD64_OpAMD64ROLB(v)
340 case OpAMD64ROLBconst:
341 return rewriteValueAMD64_OpAMD64ROLBconst(v)
342 case OpAMD64ROLL:
343 return rewriteValueAMD64_OpAMD64ROLL(v)
344 case OpAMD64ROLLconst:
345 return rewriteValueAMD64_OpAMD64ROLLconst(v)
346 case OpAMD64ROLQ:
347 return rewriteValueAMD64_OpAMD64ROLQ(v)
348 case OpAMD64ROLQconst:
349 return rewriteValueAMD64_OpAMD64ROLQconst(v)
350 case OpAMD64ROLW:
351 return rewriteValueAMD64_OpAMD64ROLW(v)
352 case OpAMD64ROLWconst:
353 return rewriteValueAMD64_OpAMD64ROLWconst(v)
354 case OpAMD64RORB:
355 return rewriteValueAMD64_OpAMD64RORB(v)
356 case OpAMD64RORL:
357 return rewriteValueAMD64_OpAMD64RORL(v)
358 case OpAMD64RORQ:
359 return rewriteValueAMD64_OpAMD64RORQ(v)
360 case OpAMD64RORW:
361 return rewriteValueAMD64_OpAMD64RORW(v)
362 case OpAMD64SARB:
363 return rewriteValueAMD64_OpAMD64SARB(v)
364 case OpAMD64SARBconst:
365 return rewriteValueAMD64_OpAMD64SARBconst(v)
366 case OpAMD64SARL:
367 return rewriteValueAMD64_OpAMD64SARL(v)
368 case OpAMD64SARLconst:
369 return rewriteValueAMD64_OpAMD64SARLconst(v)
370 case OpAMD64SARQ:
371 return rewriteValueAMD64_OpAMD64SARQ(v)
372 case OpAMD64SARQconst:
373 return rewriteValueAMD64_OpAMD64SARQconst(v)
374 case OpAMD64SARW:
375 return rewriteValueAMD64_OpAMD64SARW(v)
376 case OpAMD64SARWconst:
377 return rewriteValueAMD64_OpAMD64SARWconst(v)
378 case OpAMD64SARXLload:
379 return rewriteValueAMD64_OpAMD64SARXLload(v)
380 case OpAMD64SARXQload:
381 return rewriteValueAMD64_OpAMD64SARXQload(v)
382 case OpAMD64SBBLcarrymask:
383 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
384 case OpAMD64SBBQ:
385 return rewriteValueAMD64_OpAMD64SBBQ(v)
386 case OpAMD64SBBQcarrymask:
387 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
388 case OpAMD64SBBQconst:
389 return rewriteValueAMD64_OpAMD64SBBQconst(v)
390 case OpAMD64SETA:
391 return rewriteValueAMD64_OpAMD64SETA(v)
392 case OpAMD64SETAE:
393 return rewriteValueAMD64_OpAMD64SETAE(v)
394 case OpAMD64SETAEstore:
395 return rewriteValueAMD64_OpAMD64SETAEstore(v)
396 case OpAMD64SETAstore:
397 return rewriteValueAMD64_OpAMD64SETAstore(v)
398 case OpAMD64SETB:
399 return rewriteValueAMD64_OpAMD64SETB(v)
400 case OpAMD64SETBE:
401 return rewriteValueAMD64_OpAMD64SETBE(v)
402 case OpAMD64SETBEstore:
403 return rewriteValueAMD64_OpAMD64SETBEstore(v)
404 case OpAMD64SETBstore:
405 return rewriteValueAMD64_OpAMD64SETBstore(v)
406 case OpAMD64SETEQ:
407 return rewriteValueAMD64_OpAMD64SETEQ(v)
408 case OpAMD64SETEQstore:
409 return rewriteValueAMD64_OpAMD64SETEQstore(v)
410 case OpAMD64SETG:
411 return rewriteValueAMD64_OpAMD64SETG(v)
412 case OpAMD64SETGE:
413 return rewriteValueAMD64_OpAMD64SETGE(v)
414 case OpAMD64SETGEstore:
415 return rewriteValueAMD64_OpAMD64SETGEstore(v)
416 case OpAMD64SETGstore:
417 return rewriteValueAMD64_OpAMD64SETGstore(v)
418 case OpAMD64SETL:
419 return rewriteValueAMD64_OpAMD64SETL(v)
420 case OpAMD64SETLE:
421 return rewriteValueAMD64_OpAMD64SETLE(v)
422 case OpAMD64SETLEstore:
423 return rewriteValueAMD64_OpAMD64SETLEstore(v)
424 case OpAMD64SETLstore:
425 return rewriteValueAMD64_OpAMD64SETLstore(v)
426 case OpAMD64SETNE:
427 return rewriteValueAMD64_OpAMD64SETNE(v)
428 case OpAMD64SETNEstore:
429 return rewriteValueAMD64_OpAMD64SETNEstore(v)
430 case OpAMD64SHLL:
431 return rewriteValueAMD64_OpAMD64SHLL(v)
432 case OpAMD64SHLLconst:
433 return rewriteValueAMD64_OpAMD64SHLLconst(v)
434 case OpAMD64SHLQ:
435 return rewriteValueAMD64_OpAMD64SHLQ(v)
436 case OpAMD64SHLQconst:
437 return rewriteValueAMD64_OpAMD64SHLQconst(v)
438 case OpAMD64SHLXLload:
439 return rewriteValueAMD64_OpAMD64SHLXLload(v)
440 case OpAMD64SHLXQload:
441 return rewriteValueAMD64_OpAMD64SHLXQload(v)
442 case OpAMD64SHRB:
443 return rewriteValueAMD64_OpAMD64SHRB(v)
444 case OpAMD64SHRBconst:
445 return rewriteValueAMD64_OpAMD64SHRBconst(v)
446 case OpAMD64SHRL:
447 return rewriteValueAMD64_OpAMD64SHRL(v)
448 case OpAMD64SHRLconst:
449 return rewriteValueAMD64_OpAMD64SHRLconst(v)
450 case OpAMD64SHRQ:
451 return rewriteValueAMD64_OpAMD64SHRQ(v)
452 case OpAMD64SHRQconst:
453 return rewriteValueAMD64_OpAMD64SHRQconst(v)
454 case OpAMD64SHRW:
455 return rewriteValueAMD64_OpAMD64SHRW(v)
456 case OpAMD64SHRWconst:
457 return rewriteValueAMD64_OpAMD64SHRWconst(v)
458 case OpAMD64SHRXLload:
459 return rewriteValueAMD64_OpAMD64SHRXLload(v)
460 case OpAMD64SHRXQload:
461 return rewriteValueAMD64_OpAMD64SHRXQload(v)
462 case OpAMD64SUBL:
463 return rewriteValueAMD64_OpAMD64SUBL(v)
464 case OpAMD64SUBLconst:
465 return rewriteValueAMD64_OpAMD64SUBLconst(v)
466 case OpAMD64SUBLload:
467 return rewriteValueAMD64_OpAMD64SUBLload(v)
468 case OpAMD64SUBLmodify:
469 return rewriteValueAMD64_OpAMD64SUBLmodify(v)
470 case OpAMD64SUBQ:
471 return rewriteValueAMD64_OpAMD64SUBQ(v)
472 case OpAMD64SUBQborrow:
473 return rewriteValueAMD64_OpAMD64SUBQborrow(v)
474 case OpAMD64SUBQconst:
475 return rewriteValueAMD64_OpAMD64SUBQconst(v)
476 case OpAMD64SUBQload:
477 return rewriteValueAMD64_OpAMD64SUBQload(v)
478 case OpAMD64SUBQmodify:
479 return rewriteValueAMD64_OpAMD64SUBQmodify(v)
480 case OpAMD64SUBSD:
481 return rewriteValueAMD64_OpAMD64SUBSD(v)
482 case OpAMD64SUBSDload:
483 return rewriteValueAMD64_OpAMD64SUBSDload(v)
484 case OpAMD64SUBSS:
485 return rewriteValueAMD64_OpAMD64SUBSS(v)
486 case OpAMD64SUBSSload:
487 return rewriteValueAMD64_OpAMD64SUBSSload(v)
488 case OpAMD64TESTB:
489 return rewriteValueAMD64_OpAMD64TESTB(v)
490 case OpAMD64TESTBconst:
491 return rewriteValueAMD64_OpAMD64TESTBconst(v)
492 case OpAMD64TESTL:
493 return rewriteValueAMD64_OpAMD64TESTL(v)
494 case OpAMD64TESTLconst:
495 return rewriteValueAMD64_OpAMD64TESTLconst(v)
496 case OpAMD64TESTQ:
497 return rewriteValueAMD64_OpAMD64TESTQ(v)
498 case OpAMD64TESTQconst:
499 return rewriteValueAMD64_OpAMD64TESTQconst(v)
500 case OpAMD64TESTW:
501 return rewriteValueAMD64_OpAMD64TESTW(v)
502 case OpAMD64TESTWconst:
503 return rewriteValueAMD64_OpAMD64TESTWconst(v)
504 case OpAMD64XADDLlock:
505 return rewriteValueAMD64_OpAMD64XADDLlock(v)
506 case OpAMD64XADDQlock:
507 return rewriteValueAMD64_OpAMD64XADDQlock(v)
508 case OpAMD64XCHGL:
509 return rewriteValueAMD64_OpAMD64XCHGL(v)
510 case OpAMD64XCHGQ:
511 return rewriteValueAMD64_OpAMD64XCHGQ(v)
512 case OpAMD64XORL:
513 return rewriteValueAMD64_OpAMD64XORL(v)
514 case OpAMD64XORLconst:
515 return rewriteValueAMD64_OpAMD64XORLconst(v)
516 case OpAMD64XORLconstmodify:
517 return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
518 case OpAMD64XORLload:
519 return rewriteValueAMD64_OpAMD64XORLload(v)
520 case OpAMD64XORLmodify:
521 return rewriteValueAMD64_OpAMD64XORLmodify(v)
522 case OpAMD64XORQ:
523 return rewriteValueAMD64_OpAMD64XORQ(v)
524 case OpAMD64XORQconst:
525 return rewriteValueAMD64_OpAMD64XORQconst(v)
526 case OpAMD64XORQconstmodify:
527 return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
528 case OpAMD64XORQload:
529 return rewriteValueAMD64_OpAMD64XORQload(v)
530 case OpAMD64XORQmodify:
531 return rewriteValueAMD64_OpAMD64XORQmodify(v)
532 case OpAdd16:
533 v.Op = OpAMD64ADDL
534 return true
535 case OpAdd32:
536 v.Op = OpAMD64ADDL
537 return true
538 case OpAdd32F:
539 v.Op = OpAMD64ADDSS
540 return true
541 case OpAdd64:
542 v.Op = OpAMD64ADDQ
543 return true
544 case OpAdd64F:
545 v.Op = OpAMD64ADDSD
546 return true
547 case OpAdd8:
548 v.Op = OpAMD64ADDL
549 return true
550 case OpAddPtr:
551 v.Op = OpAMD64ADDQ
552 return true
553 case OpAddr:
554 return rewriteValueAMD64_OpAddr(v)
555 case OpAnd16:
556 v.Op = OpAMD64ANDL
557 return true
558 case OpAnd32:
559 v.Op = OpAMD64ANDL
560 return true
561 case OpAnd64:
562 v.Op = OpAMD64ANDQ
563 return true
564 case OpAnd8:
565 v.Op = OpAMD64ANDL
566 return true
567 case OpAndB:
568 v.Op = OpAMD64ANDL
569 return true
570 case OpAtomicAdd32:
571 return rewriteValueAMD64_OpAtomicAdd32(v)
572 case OpAtomicAdd64:
573 return rewriteValueAMD64_OpAtomicAdd64(v)
574 case OpAtomicAnd32:
575 return rewriteValueAMD64_OpAtomicAnd32(v)
576 case OpAtomicAnd32value:
577 return rewriteValueAMD64_OpAtomicAnd32value(v)
578 case OpAtomicAnd64value:
579 return rewriteValueAMD64_OpAtomicAnd64value(v)
580 case OpAtomicAnd8:
581 return rewriteValueAMD64_OpAtomicAnd8(v)
582 case OpAtomicCompareAndSwap32:
583 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
584 case OpAtomicCompareAndSwap64:
585 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
586 case OpAtomicExchange32:
587 return rewriteValueAMD64_OpAtomicExchange32(v)
588 case OpAtomicExchange64:
589 return rewriteValueAMD64_OpAtomicExchange64(v)
590 case OpAtomicExchange8:
591 return rewriteValueAMD64_OpAtomicExchange8(v)
592 case OpAtomicLoad32:
593 return rewriteValueAMD64_OpAtomicLoad32(v)
594 case OpAtomicLoad64:
595 return rewriteValueAMD64_OpAtomicLoad64(v)
596 case OpAtomicLoad8:
597 return rewriteValueAMD64_OpAtomicLoad8(v)
598 case OpAtomicLoadPtr:
599 return rewriteValueAMD64_OpAtomicLoadPtr(v)
600 case OpAtomicOr32:
601 return rewriteValueAMD64_OpAtomicOr32(v)
602 case OpAtomicOr32value:
603 return rewriteValueAMD64_OpAtomicOr32value(v)
604 case OpAtomicOr64value:
605 return rewriteValueAMD64_OpAtomicOr64value(v)
606 case OpAtomicOr8:
607 return rewriteValueAMD64_OpAtomicOr8(v)
608 case OpAtomicStore32:
609 return rewriteValueAMD64_OpAtomicStore32(v)
610 case OpAtomicStore64:
611 return rewriteValueAMD64_OpAtomicStore64(v)
612 case OpAtomicStore8:
613 return rewriteValueAMD64_OpAtomicStore8(v)
614 case OpAtomicStorePtrNoWB:
615 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
616 case OpAvg64u:
617 v.Op = OpAMD64AVGQU
618 return true
619 case OpBitLen16:
620 return rewriteValueAMD64_OpBitLen16(v)
621 case OpBitLen32:
622 return rewriteValueAMD64_OpBitLen32(v)
623 case OpBitLen64:
624 return rewriteValueAMD64_OpBitLen64(v)
625 case OpBitLen8:
626 return rewriteValueAMD64_OpBitLen8(v)
627 case OpBswap16:
628 return rewriteValueAMD64_OpBswap16(v)
629 case OpBswap32:
630 v.Op = OpAMD64BSWAPL
631 return true
632 case OpBswap64:
633 v.Op = OpAMD64BSWAPQ
634 return true
635 case OpCeil:
636 return rewriteValueAMD64_OpCeil(v)
637 case OpClosureCall:
638 v.Op = OpAMD64CALLclosure
639 return true
640 case OpCom16:
641 v.Op = OpAMD64NOTL
642 return true
643 case OpCom32:
644 v.Op = OpAMD64NOTL
645 return true
646 case OpCom64:
647 v.Op = OpAMD64NOTQ
648 return true
649 case OpCom8:
650 v.Op = OpAMD64NOTL
651 return true
652 case OpCondSelect:
653 return rewriteValueAMD64_OpCondSelect(v)
654 case OpConst16:
655 return rewriteValueAMD64_OpConst16(v)
656 case OpConst32:
657 v.Op = OpAMD64MOVLconst
658 return true
659 case OpConst32F:
660 v.Op = OpAMD64MOVSSconst
661 return true
662 case OpConst64:
663 v.Op = OpAMD64MOVQconst
664 return true
665 case OpConst64F:
666 v.Op = OpAMD64MOVSDconst
667 return true
668 case OpConst8:
669 return rewriteValueAMD64_OpConst8(v)
670 case OpConstBool:
671 return rewriteValueAMD64_OpConstBool(v)
672 case OpConstNil:
673 return rewriteValueAMD64_OpConstNil(v)
674 case OpCtz16:
675 return rewriteValueAMD64_OpCtz16(v)
676 case OpCtz16NonZero:
677 return rewriteValueAMD64_OpCtz16NonZero(v)
678 case OpCtz32:
679 return rewriteValueAMD64_OpCtz32(v)
680 case OpCtz32NonZero:
681 return rewriteValueAMD64_OpCtz32NonZero(v)
682 case OpCtz64:
683 return rewriteValueAMD64_OpCtz64(v)
684 case OpCtz64NonZero:
685 return rewriteValueAMD64_OpCtz64NonZero(v)
686 case OpCtz8:
687 return rewriteValueAMD64_OpCtz8(v)
688 case OpCtz8NonZero:
689 return rewriteValueAMD64_OpCtz8NonZero(v)
690 case OpCvt32Fto32:
691 v.Op = OpAMD64CVTTSS2SL
692 return true
693 case OpCvt32Fto64:
694 v.Op = OpAMD64CVTTSS2SQ
695 return true
696 case OpCvt32Fto64F:
697 v.Op = OpAMD64CVTSS2SD
698 return true
699 case OpCvt32to32F:
700 v.Op = OpAMD64CVTSL2SS
701 return true
702 case OpCvt32to64F:
703 v.Op = OpAMD64CVTSL2SD
704 return true
705 case OpCvt64Fto32:
706 v.Op = OpAMD64CVTTSD2SL
707 return true
708 case OpCvt64Fto32F:
709 v.Op = OpAMD64CVTSD2SS
710 return true
711 case OpCvt64Fto64:
712 v.Op = OpAMD64CVTTSD2SQ
713 return true
714 case OpCvt64to32F:
715 v.Op = OpAMD64CVTSQ2SS
716 return true
717 case OpCvt64to64F:
718 v.Op = OpAMD64CVTSQ2SD
719 return true
720 case OpCvtBoolToUint8:
721 v.Op = OpCopy
722 return true
723 case OpDiv128u:
724 v.Op = OpAMD64DIVQU2
725 return true
726 case OpDiv16:
727 return rewriteValueAMD64_OpDiv16(v)
728 case OpDiv16u:
729 return rewriteValueAMD64_OpDiv16u(v)
730 case OpDiv32:
731 return rewriteValueAMD64_OpDiv32(v)
732 case OpDiv32F:
733 v.Op = OpAMD64DIVSS
734 return true
735 case OpDiv32u:
736 return rewriteValueAMD64_OpDiv32u(v)
737 case OpDiv64:
738 return rewriteValueAMD64_OpDiv64(v)
739 case OpDiv64F:
740 v.Op = OpAMD64DIVSD
741 return true
742 case OpDiv64u:
743 return rewriteValueAMD64_OpDiv64u(v)
744 case OpDiv8:
745 return rewriteValueAMD64_OpDiv8(v)
746 case OpDiv8u:
747 return rewriteValueAMD64_OpDiv8u(v)
748 case OpEq16:
749 return rewriteValueAMD64_OpEq16(v)
750 case OpEq32:
751 return rewriteValueAMD64_OpEq32(v)
752 case OpEq32F:
753 return rewriteValueAMD64_OpEq32F(v)
754 case OpEq64:
755 return rewriteValueAMD64_OpEq64(v)
756 case OpEq64F:
757 return rewriteValueAMD64_OpEq64F(v)
758 case OpEq8:
759 return rewriteValueAMD64_OpEq8(v)
760 case OpEqB:
761 return rewriteValueAMD64_OpEqB(v)
762 case OpEqPtr:
763 return rewriteValueAMD64_OpEqPtr(v)
764 case OpFMA:
765 return rewriteValueAMD64_OpFMA(v)
766 case OpFloor:
767 return rewriteValueAMD64_OpFloor(v)
768 case OpGetCallerPC:
769 v.Op = OpAMD64LoweredGetCallerPC
770 return true
771 case OpGetCallerSP:
772 v.Op = OpAMD64LoweredGetCallerSP
773 return true
774 case OpGetClosurePtr:
775 v.Op = OpAMD64LoweredGetClosurePtr
776 return true
777 case OpGetG:
778 return rewriteValueAMD64_OpGetG(v)
779 case OpHasCPUFeature:
780 return rewriteValueAMD64_OpHasCPUFeature(v)
781 case OpHmul32:
782 v.Op = OpAMD64HMULL
783 return true
784 case OpHmul32u:
785 v.Op = OpAMD64HMULLU
786 return true
787 case OpHmul64:
788 v.Op = OpAMD64HMULQ
789 return true
790 case OpHmul64u:
791 v.Op = OpAMD64HMULQU
792 return true
793 case OpInterCall:
794 v.Op = OpAMD64CALLinter
795 return true
796 case OpIsInBounds:
797 return rewriteValueAMD64_OpIsInBounds(v)
798 case OpIsNonNil:
799 return rewriteValueAMD64_OpIsNonNil(v)
800 case OpIsSliceInBounds:
801 return rewriteValueAMD64_OpIsSliceInBounds(v)
802 case OpLeq16:
803 return rewriteValueAMD64_OpLeq16(v)
804 case OpLeq16U:
805 return rewriteValueAMD64_OpLeq16U(v)
806 case OpLeq32:
807 return rewriteValueAMD64_OpLeq32(v)
808 case OpLeq32F:
809 return rewriteValueAMD64_OpLeq32F(v)
810 case OpLeq32U:
811 return rewriteValueAMD64_OpLeq32U(v)
812 case OpLeq64:
813 return rewriteValueAMD64_OpLeq64(v)
814 case OpLeq64F:
815 return rewriteValueAMD64_OpLeq64F(v)
816 case OpLeq64U:
817 return rewriteValueAMD64_OpLeq64U(v)
818 case OpLeq8:
819 return rewriteValueAMD64_OpLeq8(v)
820 case OpLeq8U:
821 return rewriteValueAMD64_OpLeq8U(v)
822 case OpLess16:
823 return rewriteValueAMD64_OpLess16(v)
824 case OpLess16U:
825 return rewriteValueAMD64_OpLess16U(v)
826 case OpLess32:
827 return rewriteValueAMD64_OpLess32(v)
828 case OpLess32F:
829 return rewriteValueAMD64_OpLess32F(v)
830 case OpLess32U:
831 return rewriteValueAMD64_OpLess32U(v)
832 case OpLess64:
833 return rewriteValueAMD64_OpLess64(v)
834 case OpLess64F:
835 return rewriteValueAMD64_OpLess64F(v)
836 case OpLess64U:
837 return rewriteValueAMD64_OpLess64U(v)
838 case OpLess8:
839 return rewriteValueAMD64_OpLess8(v)
840 case OpLess8U:
841 return rewriteValueAMD64_OpLess8U(v)
842 case OpLoad:
843 return rewriteValueAMD64_OpLoad(v)
844 case OpLocalAddr:
845 return rewriteValueAMD64_OpLocalAddr(v)
846 case OpLsh16x16:
847 return rewriteValueAMD64_OpLsh16x16(v)
848 case OpLsh16x32:
849 return rewriteValueAMD64_OpLsh16x32(v)
850 case OpLsh16x64:
851 return rewriteValueAMD64_OpLsh16x64(v)
852 case OpLsh16x8:
853 return rewriteValueAMD64_OpLsh16x8(v)
854 case OpLsh32x16:
855 return rewriteValueAMD64_OpLsh32x16(v)
856 case OpLsh32x32:
857 return rewriteValueAMD64_OpLsh32x32(v)
858 case OpLsh32x64:
859 return rewriteValueAMD64_OpLsh32x64(v)
860 case OpLsh32x8:
861 return rewriteValueAMD64_OpLsh32x8(v)
862 case OpLsh64x16:
863 return rewriteValueAMD64_OpLsh64x16(v)
864 case OpLsh64x32:
865 return rewriteValueAMD64_OpLsh64x32(v)
866 case OpLsh64x64:
867 return rewriteValueAMD64_OpLsh64x64(v)
868 case OpLsh64x8:
869 return rewriteValueAMD64_OpLsh64x8(v)
870 case OpLsh8x16:
871 return rewriteValueAMD64_OpLsh8x16(v)
872 case OpLsh8x32:
873 return rewriteValueAMD64_OpLsh8x32(v)
874 case OpLsh8x64:
875 return rewriteValueAMD64_OpLsh8x64(v)
876 case OpLsh8x8:
877 return rewriteValueAMD64_OpLsh8x8(v)
878 case OpMax32F:
879 return rewriteValueAMD64_OpMax32F(v)
880 case OpMax64F:
881 return rewriteValueAMD64_OpMax64F(v)
882 case OpMin32F:
883 return rewriteValueAMD64_OpMin32F(v)
884 case OpMin64F:
885 return rewriteValueAMD64_OpMin64F(v)
886 case OpMod16:
887 return rewriteValueAMD64_OpMod16(v)
888 case OpMod16u:
889 return rewriteValueAMD64_OpMod16u(v)
890 case OpMod32:
891 return rewriteValueAMD64_OpMod32(v)
892 case OpMod32u:
893 return rewriteValueAMD64_OpMod32u(v)
894 case OpMod64:
895 return rewriteValueAMD64_OpMod64(v)
896 case OpMod64u:
897 return rewriteValueAMD64_OpMod64u(v)
898 case OpMod8:
899 return rewriteValueAMD64_OpMod8(v)
900 case OpMod8u:
901 return rewriteValueAMD64_OpMod8u(v)
902 case OpMove:
903 return rewriteValueAMD64_OpMove(v)
904 case OpMul16:
905 v.Op = OpAMD64MULL
906 return true
907 case OpMul32:
908 v.Op = OpAMD64MULL
909 return true
910 case OpMul32F:
911 v.Op = OpAMD64MULSS
912 return true
913 case OpMul64:
914 v.Op = OpAMD64MULQ
915 return true
916 case OpMul64F:
917 v.Op = OpAMD64MULSD
918 return true
919 case OpMul64uhilo:
920 v.Op = OpAMD64MULQU2
921 return true
922 case OpMul8:
923 v.Op = OpAMD64MULL
924 return true
925 case OpNeg16:
926 v.Op = OpAMD64NEGL
927 return true
928 case OpNeg32:
929 v.Op = OpAMD64NEGL
930 return true
931 case OpNeg32F:
932 return rewriteValueAMD64_OpNeg32F(v)
933 case OpNeg64:
934 v.Op = OpAMD64NEGQ
935 return true
936 case OpNeg64F:
937 return rewriteValueAMD64_OpNeg64F(v)
938 case OpNeg8:
939 v.Op = OpAMD64NEGL
940 return true
941 case OpNeq16:
942 return rewriteValueAMD64_OpNeq16(v)
943 case OpNeq32:
944 return rewriteValueAMD64_OpNeq32(v)
945 case OpNeq32F:
946 return rewriteValueAMD64_OpNeq32F(v)
947 case OpNeq64:
948 return rewriteValueAMD64_OpNeq64(v)
949 case OpNeq64F:
950 return rewriteValueAMD64_OpNeq64F(v)
951 case OpNeq8:
952 return rewriteValueAMD64_OpNeq8(v)
953 case OpNeqB:
954 return rewriteValueAMD64_OpNeqB(v)
955 case OpNeqPtr:
956 return rewriteValueAMD64_OpNeqPtr(v)
957 case OpNilCheck:
958 v.Op = OpAMD64LoweredNilCheck
959 return true
960 case OpNot:
961 return rewriteValueAMD64_OpNot(v)
962 case OpOffPtr:
963 return rewriteValueAMD64_OpOffPtr(v)
964 case OpOr16:
965 v.Op = OpAMD64ORL
966 return true
967 case OpOr32:
968 v.Op = OpAMD64ORL
969 return true
970 case OpOr64:
971 v.Op = OpAMD64ORQ
972 return true
973 case OpOr8:
974 v.Op = OpAMD64ORL
975 return true
976 case OpOrB:
977 v.Op = OpAMD64ORL
978 return true
979 case OpPanicBounds:
980 return rewriteValueAMD64_OpPanicBounds(v)
981 case OpPopCount16:
982 return rewriteValueAMD64_OpPopCount16(v)
983 case OpPopCount32:
984 v.Op = OpAMD64POPCNTL
985 return true
986 case OpPopCount64:
987 v.Op = OpAMD64POPCNTQ
988 return true
989 case OpPopCount8:
990 return rewriteValueAMD64_OpPopCount8(v)
991 case OpPrefetchCache:
992 v.Op = OpAMD64PrefetchT0
993 return true
994 case OpPrefetchCacheStreamed:
995 v.Op = OpAMD64PrefetchNTA
996 return true
997 case OpRotateLeft16:
998 v.Op = OpAMD64ROLW
999 return true
1000 case OpRotateLeft32:
1001 v.Op = OpAMD64ROLL
1002 return true
1003 case OpRotateLeft64:
1004 v.Op = OpAMD64ROLQ
1005 return true
1006 case OpRotateLeft8:
1007 v.Op = OpAMD64ROLB
1008 return true
1009 case OpRound32F:
1010 v.Op = OpAMD64LoweredRound32F
1011 return true
1012 case OpRound64F:
1013 v.Op = OpAMD64LoweredRound64F
1014 return true
1015 case OpRoundToEven:
1016 return rewriteValueAMD64_OpRoundToEven(v)
1017 case OpRsh16Ux16:
1018 return rewriteValueAMD64_OpRsh16Ux16(v)
1019 case OpRsh16Ux32:
1020 return rewriteValueAMD64_OpRsh16Ux32(v)
1021 case OpRsh16Ux64:
1022 return rewriteValueAMD64_OpRsh16Ux64(v)
1023 case OpRsh16Ux8:
1024 return rewriteValueAMD64_OpRsh16Ux8(v)
1025 case OpRsh16x16:
1026 return rewriteValueAMD64_OpRsh16x16(v)
1027 case OpRsh16x32:
1028 return rewriteValueAMD64_OpRsh16x32(v)
1029 case OpRsh16x64:
1030 return rewriteValueAMD64_OpRsh16x64(v)
1031 case OpRsh16x8:
1032 return rewriteValueAMD64_OpRsh16x8(v)
1033 case OpRsh32Ux16:
1034 return rewriteValueAMD64_OpRsh32Ux16(v)
1035 case OpRsh32Ux32:
1036 return rewriteValueAMD64_OpRsh32Ux32(v)
1037 case OpRsh32Ux64:
1038 return rewriteValueAMD64_OpRsh32Ux64(v)
1039 case OpRsh32Ux8:
1040 return rewriteValueAMD64_OpRsh32Ux8(v)
1041 case OpRsh32x16:
1042 return rewriteValueAMD64_OpRsh32x16(v)
1043 case OpRsh32x32:
1044 return rewriteValueAMD64_OpRsh32x32(v)
1045 case OpRsh32x64:
1046 return rewriteValueAMD64_OpRsh32x64(v)
1047 case OpRsh32x8:
1048 return rewriteValueAMD64_OpRsh32x8(v)
1049 case OpRsh64Ux16:
1050 return rewriteValueAMD64_OpRsh64Ux16(v)
1051 case OpRsh64Ux32:
1052 return rewriteValueAMD64_OpRsh64Ux32(v)
1053 case OpRsh64Ux64:
1054 return rewriteValueAMD64_OpRsh64Ux64(v)
1055 case OpRsh64Ux8:
1056 return rewriteValueAMD64_OpRsh64Ux8(v)
1057 case OpRsh64x16:
1058 return rewriteValueAMD64_OpRsh64x16(v)
1059 case OpRsh64x32:
1060 return rewriteValueAMD64_OpRsh64x32(v)
1061 case OpRsh64x64:
1062 return rewriteValueAMD64_OpRsh64x64(v)
1063 case OpRsh64x8:
1064 return rewriteValueAMD64_OpRsh64x8(v)
1065 case OpRsh8Ux16:
1066 return rewriteValueAMD64_OpRsh8Ux16(v)
1067 case OpRsh8Ux32:
1068 return rewriteValueAMD64_OpRsh8Ux32(v)
1069 case OpRsh8Ux64:
1070 return rewriteValueAMD64_OpRsh8Ux64(v)
1071 case OpRsh8Ux8:
1072 return rewriteValueAMD64_OpRsh8Ux8(v)
1073 case OpRsh8x16:
1074 return rewriteValueAMD64_OpRsh8x16(v)
1075 case OpRsh8x32:
1076 return rewriteValueAMD64_OpRsh8x32(v)
1077 case OpRsh8x64:
1078 return rewriteValueAMD64_OpRsh8x64(v)
1079 case OpRsh8x8:
1080 return rewriteValueAMD64_OpRsh8x8(v)
1081 case OpSelect0:
1082 return rewriteValueAMD64_OpSelect0(v)
1083 case OpSelect1:
1084 return rewriteValueAMD64_OpSelect1(v)
1085 case OpSelectN:
1086 return rewriteValueAMD64_OpSelectN(v)
1087 case OpSignExt16to32:
1088 v.Op = OpAMD64MOVWQSX
1089 return true
1090 case OpSignExt16to64:
1091 v.Op = OpAMD64MOVWQSX
1092 return true
1093 case OpSignExt32to64:
1094 v.Op = OpAMD64MOVLQSX
1095 return true
1096 case OpSignExt8to16:
1097 v.Op = OpAMD64MOVBQSX
1098 return true
1099 case OpSignExt8to32:
1100 v.Op = OpAMD64MOVBQSX
1101 return true
1102 case OpSignExt8to64:
1103 v.Op = OpAMD64MOVBQSX
1104 return true
1105 case OpSlicemask:
1106 return rewriteValueAMD64_OpSlicemask(v)
1107 case OpSpectreIndex:
1108 return rewriteValueAMD64_OpSpectreIndex(v)
1109 case OpSpectreSliceIndex:
1110 return rewriteValueAMD64_OpSpectreSliceIndex(v)
1111 case OpSqrt:
1112 v.Op = OpAMD64SQRTSD
1113 return true
1114 case OpSqrt32:
1115 v.Op = OpAMD64SQRTSS
1116 return true
1117 case OpStaticCall:
1118 v.Op = OpAMD64CALLstatic
1119 return true
1120 case OpStore:
1121 return rewriteValueAMD64_OpStore(v)
1122 case OpSub16:
1123 v.Op = OpAMD64SUBL
1124 return true
1125 case OpSub32:
1126 v.Op = OpAMD64SUBL
1127 return true
1128 case OpSub32F:
1129 v.Op = OpAMD64SUBSS
1130 return true
1131 case OpSub64:
1132 v.Op = OpAMD64SUBQ
1133 return true
1134 case OpSub64F:
1135 v.Op = OpAMD64SUBSD
1136 return true
1137 case OpSub8:
1138 v.Op = OpAMD64SUBL
1139 return true
1140 case OpSubPtr:
1141 v.Op = OpAMD64SUBQ
1142 return true
1143 case OpTailCall:
1144 v.Op = OpAMD64CALLtail
1145 return true
1146 case OpTrunc:
1147 return rewriteValueAMD64_OpTrunc(v)
1148 case OpTrunc16to8:
1149 v.Op = OpCopy
1150 return true
1151 case OpTrunc32to16:
1152 v.Op = OpCopy
1153 return true
1154 case OpTrunc32to8:
1155 v.Op = OpCopy
1156 return true
1157 case OpTrunc64to16:
1158 v.Op = OpCopy
1159 return true
1160 case OpTrunc64to32:
1161 v.Op = OpCopy
1162 return true
1163 case OpTrunc64to8:
1164 v.Op = OpCopy
1165 return true
1166 case OpWB:
1167 v.Op = OpAMD64LoweredWB
1168 return true
1169 case OpXor16:
1170 v.Op = OpAMD64XORL
1171 return true
1172 case OpXor32:
1173 v.Op = OpAMD64XORL
1174 return true
1175 case OpXor64:
1176 v.Op = OpAMD64XORQ
1177 return true
1178 case OpXor8:
1179 v.Op = OpAMD64XORL
1180 return true
1181 case OpZero:
1182 return rewriteValueAMD64_OpZero(v)
1183 case OpZeroExt16to32:
1184 v.Op = OpAMD64MOVWQZX
1185 return true
1186 case OpZeroExt16to64:
1187 v.Op = OpAMD64MOVWQZX
1188 return true
1189 case OpZeroExt32to64:
1190 v.Op = OpAMD64MOVLQZX
1191 return true
1192 case OpZeroExt8to16:
1193 v.Op = OpAMD64MOVBQZX
1194 return true
1195 case OpZeroExt8to32:
1196 v.Op = OpAMD64MOVBQZX
1197 return true
1198 case OpZeroExt8to64:
1199 v.Op = OpAMD64MOVBQZX
1200 return true
1201 }
1202 return false
1203 }
1204 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1205 v_2 := v.Args[2]
1206 v_1 := v.Args[1]
1207 v_0 := v.Args[0]
1208
1209
1210
1211 for {
1212 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1213 x := v_0
1214 if v_1.Op != OpAMD64MOVQconst {
1215 continue
1216 }
1217 c := auxIntToInt64(v_1.AuxInt)
1218 carry := v_2
1219 if !(is32Bit(c)) {
1220 continue
1221 }
1222 v.reset(OpAMD64ADCQconst)
1223 v.AuxInt = int32ToAuxInt(int32(c))
1224 v.AddArg2(x, carry)
1225 return true
1226 }
1227 break
1228 }
1229
1230
1231 for {
1232 x := v_0
1233 y := v_1
1234 if v_2.Op != OpAMD64FlagEQ {
1235 break
1236 }
1237 v.reset(OpAMD64ADDQcarry)
1238 v.AddArg2(x, y)
1239 return true
1240 }
1241 return false
1242 }
1243 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1244 v_1 := v.Args[1]
1245 v_0 := v.Args[0]
1246
1247
1248 for {
1249 c := auxIntToInt32(v.AuxInt)
1250 x := v_0
1251 if v_1.Op != OpAMD64FlagEQ {
1252 break
1253 }
1254 v.reset(OpAMD64ADDQconstcarry)
1255 v.AuxInt = int32ToAuxInt(c)
1256 v.AddArg(x)
1257 return true
1258 }
1259 return false
1260 }
1261 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1262 v_1 := v.Args[1]
1263 v_0 := v.Args[0]
1264
1265
1266 for {
1267 if v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
1268 break
1269 }
1270 x := v_0.Args[0]
1271 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 1 || x != v_1.Args[0] {
1272 break
1273 }
1274 v.reset(OpAMD64ANDLconst)
1275 v.AuxInt = int32ToAuxInt(-2)
1276 v.AddArg(x)
1277 return true
1278 }
1279
1280
1281 for {
1282 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1283 x := v_0
1284 if v_1.Op != OpAMD64MOVLconst {
1285 continue
1286 }
1287 c := auxIntToInt32(v_1.AuxInt)
1288 v.reset(OpAMD64ADDLconst)
1289 v.AuxInt = int32ToAuxInt(c)
1290 v.AddArg(x)
1291 return true
1292 }
1293 break
1294 }
1295
1296
1297 for {
1298 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1299 x := v_0
1300 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
1301 continue
1302 }
1303 y := v_1.Args[0]
1304 v.reset(OpAMD64LEAL8)
1305 v.AddArg2(x, y)
1306 return true
1307 }
1308 break
1309 }
1310
1311
1312 for {
1313 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1314 x := v_0
1315 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
1316 continue
1317 }
1318 y := v_1.Args[0]
1319 v.reset(OpAMD64LEAL4)
1320 v.AddArg2(x, y)
1321 return true
1322 }
1323 break
1324 }
1325
1326
1327 for {
1328 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1329 x := v_0
1330 if v_1.Op != OpAMD64ADDL {
1331 continue
1332 }
1333 y := v_1.Args[1]
1334 if y != v_1.Args[0] {
1335 continue
1336 }
1337 v.reset(OpAMD64LEAL2)
1338 v.AddArg2(x, y)
1339 return true
1340 }
1341 break
1342 }
1343
1344
1345 for {
1346 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1347 x := v_0
1348 if v_1.Op != OpAMD64ADDL {
1349 continue
1350 }
1351 _ = v_1.Args[1]
1352 v_1_0 := v_1.Args[0]
1353 v_1_1 := v_1.Args[1]
1354 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1355 if x != v_1_0 {
1356 continue
1357 }
1358 y := v_1_1
1359 v.reset(OpAMD64LEAL2)
1360 v.AddArg2(y, x)
1361 return true
1362 }
1363 }
1364 break
1365 }
1366
1367
1368 for {
1369 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1370 if v_0.Op != OpAMD64ADDLconst {
1371 continue
1372 }
1373 c := auxIntToInt32(v_0.AuxInt)
1374 x := v_0.Args[0]
1375 y := v_1
1376 v.reset(OpAMD64LEAL1)
1377 v.AuxInt = int32ToAuxInt(c)
1378 v.AddArg2(x, y)
1379 return true
1380 }
1381 break
1382 }
1383
1384
1385
1386 for {
1387 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1388 x := v_0
1389 if v_1.Op != OpAMD64LEAL {
1390 continue
1391 }
1392 c := auxIntToInt32(v_1.AuxInt)
1393 s := auxToSym(v_1.Aux)
1394 y := v_1.Args[0]
1395 if !(x.Op != OpSB && y.Op != OpSB) {
1396 continue
1397 }
1398 v.reset(OpAMD64LEAL1)
1399 v.AuxInt = int32ToAuxInt(c)
1400 v.Aux = symToAux(s)
1401 v.AddArg2(x, y)
1402 return true
1403 }
1404 break
1405 }
1406
1407
1408 for {
1409 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1410 x := v_0
1411 if v_1.Op != OpAMD64NEGL {
1412 continue
1413 }
1414 y := v_1.Args[0]
1415 v.reset(OpAMD64SUBL)
1416 v.AddArg2(x, y)
1417 return true
1418 }
1419 break
1420 }
1421
1422
1423
1424 for {
1425 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1426 x := v_0
1427 l := v_1
1428 if l.Op != OpAMD64MOVLload {
1429 continue
1430 }
1431 off := auxIntToInt32(l.AuxInt)
1432 sym := auxToSym(l.Aux)
1433 mem := l.Args[1]
1434 ptr := l.Args[0]
1435 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1436 continue
1437 }
1438 v.reset(OpAMD64ADDLload)
1439 v.AuxInt = int32ToAuxInt(off)
1440 v.Aux = symToAux(sym)
1441 v.AddArg3(x, ptr, mem)
1442 return true
1443 }
1444 break
1445 }
1446 return false
1447 }
1448 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1449 v_0 := v.Args[0]
1450
1451
1452 for {
1453 c := auxIntToInt32(v.AuxInt)
1454 if v_0.Op != OpAMD64ADDL {
1455 break
1456 }
1457 y := v_0.Args[1]
1458 x := v_0.Args[0]
1459 v.reset(OpAMD64LEAL1)
1460 v.AuxInt = int32ToAuxInt(c)
1461 v.AddArg2(x, y)
1462 return true
1463 }
1464
1465
1466 for {
1467 c := auxIntToInt32(v.AuxInt)
1468 if v_0.Op != OpAMD64ADDL {
1469 break
1470 }
1471 x := v_0.Args[1]
1472 if x != v_0.Args[0] {
1473 break
1474 }
1475 v.reset(OpAMD64LEAL1)
1476 v.AuxInt = int32ToAuxInt(c)
1477 v.AddArg2(x, x)
1478 return true
1479 }
1480
1481
1482
1483 for {
1484 c := auxIntToInt32(v.AuxInt)
1485 if v_0.Op != OpAMD64LEAL {
1486 break
1487 }
1488 d := auxIntToInt32(v_0.AuxInt)
1489 s := auxToSym(v_0.Aux)
1490 x := v_0.Args[0]
1491 if !(is32Bit(int64(c) + int64(d))) {
1492 break
1493 }
1494 v.reset(OpAMD64LEAL)
1495 v.AuxInt = int32ToAuxInt(c + d)
1496 v.Aux = symToAux(s)
1497 v.AddArg(x)
1498 return true
1499 }
1500
1501
1502
1503 for {
1504 c := auxIntToInt32(v.AuxInt)
1505 if v_0.Op != OpAMD64LEAL1 {
1506 break
1507 }
1508 d := auxIntToInt32(v_0.AuxInt)
1509 s := auxToSym(v_0.Aux)
1510 y := v_0.Args[1]
1511 x := v_0.Args[0]
1512 if !(is32Bit(int64(c) + int64(d))) {
1513 break
1514 }
1515 v.reset(OpAMD64LEAL1)
1516 v.AuxInt = int32ToAuxInt(c + d)
1517 v.Aux = symToAux(s)
1518 v.AddArg2(x, y)
1519 return true
1520 }
1521
1522
1523
1524 for {
1525 c := auxIntToInt32(v.AuxInt)
1526 if v_0.Op != OpAMD64LEAL2 {
1527 break
1528 }
1529 d := auxIntToInt32(v_0.AuxInt)
1530 s := auxToSym(v_0.Aux)
1531 y := v_0.Args[1]
1532 x := v_0.Args[0]
1533 if !(is32Bit(int64(c) + int64(d))) {
1534 break
1535 }
1536 v.reset(OpAMD64LEAL2)
1537 v.AuxInt = int32ToAuxInt(c + d)
1538 v.Aux = symToAux(s)
1539 v.AddArg2(x, y)
1540 return true
1541 }
1542
1543
1544
1545 for {
1546 c := auxIntToInt32(v.AuxInt)
1547 if v_0.Op != OpAMD64LEAL4 {
1548 break
1549 }
1550 d := auxIntToInt32(v_0.AuxInt)
1551 s := auxToSym(v_0.Aux)
1552 y := v_0.Args[1]
1553 x := v_0.Args[0]
1554 if !(is32Bit(int64(c) + int64(d))) {
1555 break
1556 }
1557 v.reset(OpAMD64LEAL4)
1558 v.AuxInt = int32ToAuxInt(c + d)
1559 v.Aux = symToAux(s)
1560 v.AddArg2(x, y)
1561 return true
1562 }
1563
1564
1565
1566 for {
1567 c := auxIntToInt32(v.AuxInt)
1568 if v_0.Op != OpAMD64LEAL8 {
1569 break
1570 }
1571 d := auxIntToInt32(v_0.AuxInt)
1572 s := auxToSym(v_0.Aux)
1573 y := v_0.Args[1]
1574 x := v_0.Args[0]
1575 if !(is32Bit(int64(c) + int64(d))) {
1576 break
1577 }
1578 v.reset(OpAMD64LEAL8)
1579 v.AuxInt = int32ToAuxInt(c + d)
1580 v.Aux = symToAux(s)
1581 v.AddArg2(x, y)
1582 return true
1583 }
1584
1585
1586
1587 for {
1588 c := auxIntToInt32(v.AuxInt)
1589 x := v_0
1590 if !(c == 0) {
1591 break
1592 }
1593 v.copyOf(x)
1594 return true
1595 }
1596
1597
1598 for {
1599 c := auxIntToInt32(v.AuxInt)
1600 if v_0.Op != OpAMD64MOVLconst {
1601 break
1602 }
1603 d := auxIntToInt32(v_0.AuxInt)
1604 v.reset(OpAMD64MOVLconst)
1605 v.AuxInt = int32ToAuxInt(c + d)
1606 return true
1607 }
1608
1609
1610 for {
1611 c := auxIntToInt32(v.AuxInt)
1612 if v_0.Op != OpAMD64ADDLconst {
1613 break
1614 }
1615 d := auxIntToInt32(v_0.AuxInt)
1616 x := v_0.Args[0]
1617 v.reset(OpAMD64ADDLconst)
1618 v.AuxInt = int32ToAuxInt(c + d)
1619 v.AddArg(x)
1620 return true
1621 }
1622
1623
1624 for {
1625 off := auxIntToInt32(v.AuxInt)
1626 x := v_0
1627 if x.Op != OpSP {
1628 break
1629 }
1630 v.reset(OpAMD64LEAL)
1631 v.AuxInt = int32ToAuxInt(off)
1632 v.AddArg(x)
1633 return true
1634 }
1635 return false
1636 }
1637 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1638 v_1 := v.Args[1]
1639 v_0 := v.Args[0]
1640
1641
1642
1643 for {
1644 valoff1 := auxIntToValAndOff(v.AuxInt)
1645 sym := auxToSym(v.Aux)
1646 if v_0.Op != OpAMD64ADDQconst {
1647 break
1648 }
1649 off2 := auxIntToInt32(v_0.AuxInt)
1650 base := v_0.Args[0]
1651 mem := v_1
1652 if !(ValAndOff(valoff1).canAdd32(off2)) {
1653 break
1654 }
1655 v.reset(OpAMD64ADDLconstmodify)
1656 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1657 v.Aux = symToAux(sym)
1658 v.AddArg2(base, mem)
1659 return true
1660 }
1661
1662
1663
1664 for {
1665 valoff1 := auxIntToValAndOff(v.AuxInt)
1666 sym1 := auxToSym(v.Aux)
1667 if v_0.Op != OpAMD64LEAQ {
1668 break
1669 }
1670 off2 := auxIntToInt32(v_0.AuxInt)
1671 sym2 := auxToSym(v_0.Aux)
1672 base := v_0.Args[0]
1673 mem := v_1
1674 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
1675 break
1676 }
1677 v.reset(OpAMD64ADDLconstmodify)
1678 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1679 v.Aux = symToAux(mergeSym(sym1, sym2))
1680 v.AddArg2(base, mem)
1681 return true
1682 }
1683 return false
1684 }
1685 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1686 v_2 := v.Args[2]
1687 v_1 := v.Args[1]
1688 v_0 := v.Args[0]
1689 b := v.Block
1690 typ := &b.Func.Config.Types
1691
1692
1693
1694 for {
1695 off1 := auxIntToInt32(v.AuxInt)
1696 sym := auxToSym(v.Aux)
1697 val := v_0
1698 if v_1.Op != OpAMD64ADDQconst {
1699 break
1700 }
1701 off2 := auxIntToInt32(v_1.AuxInt)
1702 base := v_1.Args[0]
1703 mem := v_2
1704 if !(is32Bit(int64(off1) + int64(off2))) {
1705 break
1706 }
1707 v.reset(OpAMD64ADDLload)
1708 v.AuxInt = int32ToAuxInt(off1 + off2)
1709 v.Aux = symToAux(sym)
1710 v.AddArg3(val, base, mem)
1711 return true
1712 }
1713
1714
1715
1716 for {
1717 off1 := auxIntToInt32(v.AuxInt)
1718 sym1 := auxToSym(v.Aux)
1719 val := v_0
1720 if v_1.Op != OpAMD64LEAQ {
1721 break
1722 }
1723 off2 := auxIntToInt32(v_1.AuxInt)
1724 sym2 := auxToSym(v_1.Aux)
1725 base := v_1.Args[0]
1726 mem := v_2
1727 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1728 break
1729 }
1730 v.reset(OpAMD64ADDLload)
1731 v.AuxInt = int32ToAuxInt(off1 + off2)
1732 v.Aux = symToAux(mergeSym(sym1, sym2))
1733 v.AddArg3(val, base, mem)
1734 return true
1735 }
1736
1737
1738 for {
1739 off := auxIntToInt32(v.AuxInt)
1740 sym := auxToSym(v.Aux)
1741 x := v_0
1742 ptr := v_1
1743 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
1744 break
1745 }
1746 y := v_2.Args[1]
1747 if ptr != v_2.Args[0] {
1748 break
1749 }
1750 v.reset(OpAMD64ADDL)
1751 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1752 v0.AddArg(y)
1753 v.AddArg2(x, v0)
1754 return true
1755 }
1756 return false
1757 }
1758 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1759 v_2 := v.Args[2]
1760 v_1 := v.Args[1]
1761 v_0 := v.Args[0]
1762
1763
1764
1765 for {
1766 off1 := auxIntToInt32(v.AuxInt)
1767 sym := auxToSym(v.Aux)
1768 if v_0.Op != OpAMD64ADDQconst {
1769 break
1770 }
1771 off2 := auxIntToInt32(v_0.AuxInt)
1772 base := v_0.Args[0]
1773 val := v_1
1774 mem := v_2
1775 if !(is32Bit(int64(off1) + int64(off2))) {
1776 break
1777 }
1778 v.reset(OpAMD64ADDLmodify)
1779 v.AuxInt = int32ToAuxInt(off1 + off2)
1780 v.Aux = symToAux(sym)
1781 v.AddArg3(base, val, mem)
1782 return true
1783 }
1784
1785
1786
1787 for {
1788 off1 := auxIntToInt32(v.AuxInt)
1789 sym1 := auxToSym(v.Aux)
1790 if v_0.Op != OpAMD64LEAQ {
1791 break
1792 }
1793 off2 := auxIntToInt32(v_0.AuxInt)
1794 sym2 := auxToSym(v_0.Aux)
1795 base := v_0.Args[0]
1796 val := v_1
1797 mem := v_2
1798 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1799 break
1800 }
1801 v.reset(OpAMD64ADDLmodify)
1802 v.AuxInt = int32ToAuxInt(off1 + off2)
1803 v.Aux = symToAux(mergeSym(sym1, sym2))
1804 v.AddArg3(base, val, mem)
1805 return true
1806 }
1807 return false
1808 }
1809 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1810 v_1 := v.Args[1]
1811 v_0 := v.Args[0]
1812
1813
1814 for {
1815 if v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
1816 break
1817 }
1818 x := v_0.Args[0]
1819 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 1 || x != v_1.Args[0] {
1820 break
1821 }
1822 v.reset(OpAMD64ANDQconst)
1823 v.AuxInt = int32ToAuxInt(-2)
1824 v.AddArg(x)
1825 return true
1826 }
1827
1828
1829
1830 for {
1831 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1832 x := v_0
1833 if v_1.Op != OpAMD64MOVQconst {
1834 continue
1835 }
1836 t := v_1.Type
1837 c := auxIntToInt64(v_1.AuxInt)
1838 if !(is32Bit(c) && !t.IsPtr()) {
1839 continue
1840 }
1841 v.reset(OpAMD64ADDQconst)
1842 v.AuxInt = int32ToAuxInt(int32(c))
1843 v.AddArg(x)
1844 return true
1845 }
1846 break
1847 }
1848
1849
1850 for {
1851 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1852 x := v_0
1853 if v_1.Op != OpAMD64MOVLconst {
1854 continue
1855 }
1856 c := auxIntToInt32(v_1.AuxInt)
1857 v.reset(OpAMD64ADDQconst)
1858 v.AuxInt = int32ToAuxInt(c)
1859 v.AddArg(x)
1860 return true
1861 }
1862 break
1863 }
1864
1865
1866 for {
1867 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1868 x := v_0
1869 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
1870 continue
1871 }
1872 y := v_1.Args[0]
1873 v.reset(OpAMD64LEAQ8)
1874 v.AddArg2(x, y)
1875 return true
1876 }
1877 break
1878 }
1879
1880
1881 for {
1882 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1883 x := v_0
1884 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
1885 continue
1886 }
1887 y := v_1.Args[0]
1888 v.reset(OpAMD64LEAQ4)
1889 v.AddArg2(x, y)
1890 return true
1891 }
1892 break
1893 }
1894
1895
1896 for {
1897 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1898 x := v_0
1899 if v_1.Op != OpAMD64ADDQ {
1900 continue
1901 }
1902 y := v_1.Args[1]
1903 if y != v_1.Args[0] {
1904 continue
1905 }
1906 v.reset(OpAMD64LEAQ2)
1907 v.AddArg2(x, y)
1908 return true
1909 }
1910 break
1911 }
1912
1913
1914 for {
1915 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1916 x := v_0
1917 if v_1.Op != OpAMD64ADDQ {
1918 continue
1919 }
1920 _ = v_1.Args[1]
1921 v_1_0 := v_1.Args[0]
1922 v_1_1 := v_1.Args[1]
1923 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1924 if x != v_1_0 {
1925 continue
1926 }
1927 y := v_1_1
1928 v.reset(OpAMD64LEAQ2)
1929 v.AddArg2(y, x)
1930 return true
1931 }
1932 }
1933 break
1934 }
1935
1936
1937 for {
1938 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1939 if v_0.Op != OpAMD64ADDQconst {
1940 continue
1941 }
1942 c := auxIntToInt32(v_0.AuxInt)
1943 x := v_0.Args[0]
1944 y := v_1
1945 v.reset(OpAMD64LEAQ1)
1946 v.AuxInt = int32ToAuxInt(c)
1947 v.AddArg2(x, y)
1948 return true
1949 }
1950 break
1951 }
1952
1953
1954
1955 for {
1956 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1957 x := v_0
1958 if v_1.Op != OpAMD64LEAQ {
1959 continue
1960 }
1961 c := auxIntToInt32(v_1.AuxInt)
1962 s := auxToSym(v_1.Aux)
1963 y := v_1.Args[0]
1964 if !(x.Op != OpSB && y.Op != OpSB) {
1965 continue
1966 }
1967 v.reset(OpAMD64LEAQ1)
1968 v.AuxInt = int32ToAuxInt(c)
1969 v.Aux = symToAux(s)
1970 v.AddArg2(x, y)
1971 return true
1972 }
1973 break
1974 }
1975
1976
1977 for {
1978 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1979 x := v_0
1980 if v_1.Op != OpAMD64NEGQ {
1981 continue
1982 }
1983 y := v_1.Args[0]
1984 v.reset(OpAMD64SUBQ)
1985 v.AddArg2(x, y)
1986 return true
1987 }
1988 break
1989 }
1990
1991
1992
1993 for {
1994 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1995 x := v_0
1996 l := v_1
1997 if l.Op != OpAMD64MOVQload {
1998 continue
1999 }
2000 off := auxIntToInt32(l.AuxInt)
2001 sym := auxToSym(l.Aux)
2002 mem := l.Args[1]
2003 ptr := l.Args[0]
2004 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2005 continue
2006 }
2007 v.reset(OpAMD64ADDQload)
2008 v.AuxInt = int32ToAuxInt(off)
2009 v.Aux = symToAux(sym)
2010 v.AddArg3(x, ptr, mem)
2011 return true
2012 }
2013 break
2014 }
2015 return false
2016 }
2017 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2018 v_1 := v.Args[1]
2019 v_0 := v.Args[0]
2020
2021
2022
2023 for {
2024 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2025 x := v_0
2026 if v_1.Op != OpAMD64MOVQconst {
2027 continue
2028 }
2029 c := auxIntToInt64(v_1.AuxInt)
2030 if !(is32Bit(c)) {
2031 continue
2032 }
2033 v.reset(OpAMD64ADDQconstcarry)
2034 v.AuxInt = int32ToAuxInt(int32(c))
2035 v.AddArg(x)
2036 return true
2037 }
2038 break
2039 }
2040 return false
2041 }
2042 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2043 v_0 := v.Args[0]
2044
2045
2046 for {
2047 c := auxIntToInt32(v.AuxInt)
2048 if v_0.Op != OpAMD64ADDQ {
2049 break
2050 }
2051 y := v_0.Args[1]
2052 x := v_0.Args[0]
2053 v.reset(OpAMD64LEAQ1)
2054 v.AuxInt = int32ToAuxInt(c)
2055 v.AddArg2(x, y)
2056 return true
2057 }
2058
2059
2060 for {
2061 c := auxIntToInt32(v.AuxInt)
2062 if v_0.Op != OpAMD64ADDQ {
2063 break
2064 }
2065 x := v_0.Args[1]
2066 if x != v_0.Args[0] {
2067 break
2068 }
2069 v.reset(OpAMD64LEAQ1)
2070 v.AuxInt = int32ToAuxInt(c)
2071 v.AddArg2(x, x)
2072 return true
2073 }
2074
2075
2076
2077 for {
2078 c := auxIntToInt32(v.AuxInt)
2079 if v_0.Op != OpAMD64LEAQ {
2080 break
2081 }
2082 d := auxIntToInt32(v_0.AuxInt)
2083 s := auxToSym(v_0.Aux)
2084 x := v_0.Args[0]
2085 if !(is32Bit(int64(c) + int64(d))) {
2086 break
2087 }
2088 v.reset(OpAMD64LEAQ)
2089 v.AuxInt = int32ToAuxInt(c + d)
2090 v.Aux = symToAux(s)
2091 v.AddArg(x)
2092 return true
2093 }
2094
2095
2096
2097 for {
2098 c := auxIntToInt32(v.AuxInt)
2099 if v_0.Op != OpAMD64LEAQ1 {
2100 break
2101 }
2102 d := auxIntToInt32(v_0.AuxInt)
2103 s := auxToSym(v_0.Aux)
2104 y := v_0.Args[1]
2105 x := v_0.Args[0]
2106 if !(is32Bit(int64(c) + int64(d))) {
2107 break
2108 }
2109 v.reset(OpAMD64LEAQ1)
2110 v.AuxInt = int32ToAuxInt(c + d)
2111 v.Aux = symToAux(s)
2112 v.AddArg2(x, y)
2113 return true
2114 }
2115
2116
2117
2118 for {
2119 c := auxIntToInt32(v.AuxInt)
2120 if v_0.Op != OpAMD64LEAQ2 {
2121 break
2122 }
2123 d := auxIntToInt32(v_0.AuxInt)
2124 s := auxToSym(v_0.Aux)
2125 y := v_0.Args[1]
2126 x := v_0.Args[0]
2127 if !(is32Bit(int64(c) + int64(d))) {
2128 break
2129 }
2130 v.reset(OpAMD64LEAQ2)
2131 v.AuxInt = int32ToAuxInt(c + d)
2132 v.Aux = symToAux(s)
2133 v.AddArg2(x, y)
2134 return true
2135 }
2136
2137
2138
2139 for {
2140 c := auxIntToInt32(v.AuxInt)
2141 if v_0.Op != OpAMD64LEAQ4 {
2142 break
2143 }
2144 d := auxIntToInt32(v_0.AuxInt)
2145 s := auxToSym(v_0.Aux)
2146 y := v_0.Args[1]
2147 x := v_0.Args[0]
2148 if !(is32Bit(int64(c) + int64(d))) {
2149 break
2150 }
2151 v.reset(OpAMD64LEAQ4)
2152 v.AuxInt = int32ToAuxInt(c + d)
2153 v.Aux = symToAux(s)
2154 v.AddArg2(x, y)
2155 return true
2156 }
2157
2158
2159
2160 for {
2161 c := auxIntToInt32(v.AuxInt)
2162 if v_0.Op != OpAMD64LEAQ8 {
2163 break
2164 }
2165 d := auxIntToInt32(v_0.AuxInt)
2166 s := auxToSym(v_0.Aux)
2167 y := v_0.Args[1]
2168 x := v_0.Args[0]
2169 if !(is32Bit(int64(c) + int64(d))) {
2170 break
2171 }
2172 v.reset(OpAMD64LEAQ8)
2173 v.AuxInt = int32ToAuxInt(c + d)
2174 v.Aux = symToAux(s)
2175 v.AddArg2(x, y)
2176 return true
2177 }
2178
2179
2180 for {
2181 if auxIntToInt32(v.AuxInt) != 0 {
2182 break
2183 }
2184 x := v_0
2185 v.copyOf(x)
2186 return true
2187 }
2188
2189
2190 for {
2191 c := auxIntToInt32(v.AuxInt)
2192 if v_0.Op != OpAMD64MOVQconst {
2193 break
2194 }
2195 d := auxIntToInt64(v_0.AuxInt)
2196 v.reset(OpAMD64MOVQconst)
2197 v.AuxInt = int64ToAuxInt(int64(c) + d)
2198 return true
2199 }
2200
2201
2202
2203 for {
2204 c := auxIntToInt32(v.AuxInt)
2205 if v_0.Op != OpAMD64ADDQconst {
2206 break
2207 }
2208 d := auxIntToInt32(v_0.AuxInt)
2209 x := v_0.Args[0]
2210 if !(is32Bit(int64(c) + int64(d))) {
2211 break
2212 }
2213 v.reset(OpAMD64ADDQconst)
2214 v.AuxInt = int32ToAuxInt(c + d)
2215 v.AddArg(x)
2216 return true
2217 }
2218
2219
2220 for {
2221 off := auxIntToInt32(v.AuxInt)
2222 x := v_0
2223 if x.Op != OpSP {
2224 break
2225 }
2226 v.reset(OpAMD64LEAQ)
2227 v.AuxInt = int32ToAuxInt(off)
2228 v.AddArg(x)
2229 return true
2230 }
2231 return false
2232 }
2233 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2234 v_1 := v.Args[1]
2235 v_0 := v.Args[0]
2236
2237
2238
2239 for {
2240 valoff1 := auxIntToValAndOff(v.AuxInt)
2241 sym := auxToSym(v.Aux)
2242 if v_0.Op != OpAMD64ADDQconst {
2243 break
2244 }
2245 off2 := auxIntToInt32(v_0.AuxInt)
2246 base := v_0.Args[0]
2247 mem := v_1
2248 if !(ValAndOff(valoff1).canAdd32(off2)) {
2249 break
2250 }
2251 v.reset(OpAMD64ADDQconstmodify)
2252 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2253 v.Aux = symToAux(sym)
2254 v.AddArg2(base, mem)
2255 return true
2256 }
2257
2258
2259
2260 for {
2261 valoff1 := auxIntToValAndOff(v.AuxInt)
2262 sym1 := auxToSym(v.Aux)
2263 if v_0.Op != OpAMD64LEAQ {
2264 break
2265 }
2266 off2 := auxIntToInt32(v_0.AuxInt)
2267 sym2 := auxToSym(v_0.Aux)
2268 base := v_0.Args[0]
2269 mem := v_1
2270 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2271 break
2272 }
2273 v.reset(OpAMD64ADDQconstmodify)
2274 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2275 v.Aux = symToAux(mergeSym(sym1, sym2))
2276 v.AddArg2(base, mem)
2277 return true
2278 }
2279 return false
2280 }
2281 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2282 v_2 := v.Args[2]
2283 v_1 := v.Args[1]
2284 v_0 := v.Args[0]
2285 b := v.Block
2286 typ := &b.Func.Config.Types
2287
2288
2289
2290 for {
2291 off1 := auxIntToInt32(v.AuxInt)
2292 sym := auxToSym(v.Aux)
2293 val := v_0
2294 if v_1.Op != OpAMD64ADDQconst {
2295 break
2296 }
2297 off2 := auxIntToInt32(v_1.AuxInt)
2298 base := v_1.Args[0]
2299 mem := v_2
2300 if !(is32Bit(int64(off1) + int64(off2))) {
2301 break
2302 }
2303 v.reset(OpAMD64ADDQload)
2304 v.AuxInt = int32ToAuxInt(off1 + off2)
2305 v.Aux = symToAux(sym)
2306 v.AddArg3(val, base, mem)
2307 return true
2308 }
2309
2310
2311
2312 for {
2313 off1 := auxIntToInt32(v.AuxInt)
2314 sym1 := auxToSym(v.Aux)
2315 val := v_0
2316 if v_1.Op != OpAMD64LEAQ {
2317 break
2318 }
2319 off2 := auxIntToInt32(v_1.AuxInt)
2320 sym2 := auxToSym(v_1.Aux)
2321 base := v_1.Args[0]
2322 mem := v_2
2323 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2324 break
2325 }
2326 v.reset(OpAMD64ADDQload)
2327 v.AuxInt = int32ToAuxInt(off1 + off2)
2328 v.Aux = symToAux(mergeSym(sym1, sym2))
2329 v.AddArg3(val, base, mem)
2330 return true
2331 }
2332
2333
2334 for {
2335 off := auxIntToInt32(v.AuxInt)
2336 sym := auxToSym(v.Aux)
2337 x := v_0
2338 ptr := v_1
2339 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2340 break
2341 }
2342 y := v_2.Args[1]
2343 if ptr != v_2.Args[0] {
2344 break
2345 }
2346 v.reset(OpAMD64ADDQ)
2347 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2348 v0.AddArg(y)
2349 v.AddArg2(x, v0)
2350 return true
2351 }
2352 return false
2353 }
2354 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2355 v_2 := v.Args[2]
2356 v_1 := v.Args[1]
2357 v_0 := v.Args[0]
2358
2359
2360
2361 for {
2362 off1 := auxIntToInt32(v.AuxInt)
2363 sym := auxToSym(v.Aux)
2364 if v_0.Op != OpAMD64ADDQconst {
2365 break
2366 }
2367 off2 := auxIntToInt32(v_0.AuxInt)
2368 base := v_0.Args[0]
2369 val := v_1
2370 mem := v_2
2371 if !(is32Bit(int64(off1) + int64(off2))) {
2372 break
2373 }
2374 v.reset(OpAMD64ADDQmodify)
2375 v.AuxInt = int32ToAuxInt(off1 + off2)
2376 v.Aux = symToAux(sym)
2377 v.AddArg3(base, val, mem)
2378 return true
2379 }
2380
2381
2382
2383 for {
2384 off1 := auxIntToInt32(v.AuxInt)
2385 sym1 := auxToSym(v.Aux)
2386 if v_0.Op != OpAMD64LEAQ {
2387 break
2388 }
2389 off2 := auxIntToInt32(v_0.AuxInt)
2390 sym2 := auxToSym(v_0.Aux)
2391 base := v_0.Args[0]
2392 val := v_1
2393 mem := v_2
2394 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2395 break
2396 }
2397 v.reset(OpAMD64ADDQmodify)
2398 v.AuxInt = int32ToAuxInt(off1 + off2)
2399 v.Aux = symToAux(mergeSym(sym1, sym2))
2400 v.AddArg3(base, val, mem)
2401 return true
2402 }
2403 return false
2404 }
2405 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2406 v_1 := v.Args[1]
2407 v_0 := v.Args[0]
2408
2409
2410
2411 for {
2412 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2413 x := v_0
2414 l := v_1
2415 if l.Op != OpAMD64MOVSDload {
2416 continue
2417 }
2418 off := auxIntToInt32(l.AuxInt)
2419 sym := auxToSym(l.Aux)
2420 mem := l.Args[1]
2421 ptr := l.Args[0]
2422 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2423 continue
2424 }
2425 v.reset(OpAMD64ADDSDload)
2426 v.AuxInt = int32ToAuxInt(off)
2427 v.Aux = symToAux(sym)
2428 v.AddArg3(x, ptr, mem)
2429 return true
2430 }
2431 break
2432 }
2433
2434
2435
2436 for {
2437 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2438 if v_0.Op != OpAMD64MULSD {
2439 continue
2440 }
2441 y := v_0.Args[1]
2442 x := v_0.Args[0]
2443 z := v_1
2444 if !(buildcfg.GOAMD64 >= 3 && z.Block.Func.useFMA(v)) {
2445 continue
2446 }
2447 v.reset(OpAMD64VFMADD231SD)
2448 v.AddArg3(z, x, y)
2449 return true
2450 }
2451 break
2452 }
2453 return false
2454 }
2455 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2456 v_2 := v.Args[2]
2457 v_1 := v.Args[1]
2458 v_0 := v.Args[0]
2459 b := v.Block
2460 typ := &b.Func.Config.Types
2461
2462
2463
2464 for {
2465 off1 := auxIntToInt32(v.AuxInt)
2466 sym := auxToSym(v.Aux)
2467 val := v_0
2468 if v_1.Op != OpAMD64ADDQconst {
2469 break
2470 }
2471 off2 := auxIntToInt32(v_1.AuxInt)
2472 base := v_1.Args[0]
2473 mem := v_2
2474 if !(is32Bit(int64(off1) + int64(off2))) {
2475 break
2476 }
2477 v.reset(OpAMD64ADDSDload)
2478 v.AuxInt = int32ToAuxInt(off1 + off2)
2479 v.Aux = symToAux(sym)
2480 v.AddArg3(val, base, mem)
2481 return true
2482 }
2483
2484
2485
2486 for {
2487 off1 := auxIntToInt32(v.AuxInt)
2488 sym1 := auxToSym(v.Aux)
2489 val := v_0
2490 if v_1.Op != OpAMD64LEAQ {
2491 break
2492 }
2493 off2 := auxIntToInt32(v_1.AuxInt)
2494 sym2 := auxToSym(v_1.Aux)
2495 base := v_1.Args[0]
2496 mem := v_2
2497 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2498 break
2499 }
2500 v.reset(OpAMD64ADDSDload)
2501 v.AuxInt = int32ToAuxInt(off1 + off2)
2502 v.Aux = symToAux(mergeSym(sym1, sym2))
2503 v.AddArg3(val, base, mem)
2504 return true
2505 }
2506
2507
2508 for {
2509 off := auxIntToInt32(v.AuxInt)
2510 sym := auxToSym(v.Aux)
2511 x := v_0
2512 ptr := v_1
2513 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2514 break
2515 }
2516 y := v_2.Args[1]
2517 if ptr != v_2.Args[0] {
2518 break
2519 }
2520 v.reset(OpAMD64ADDSD)
2521 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2522 v0.AddArg(y)
2523 v.AddArg2(x, v0)
2524 return true
2525 }
2526 return false
2527 }
2528 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2529 v_1 := v.Args[1]
2530 v_0 := v.Args[0]
2531
2532
2533
2534 for {
2535 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2536 x := v_0
2537 l := v_1
2538 if l.Op != OpAMD64MOVSSload {
2539 continue
2540 }
2541 off := auxIntToInt32(l.AuxInt)
2542 sym := auxToSym(l.Aux)
2543 mem := l.Args[1]
2544 ptr := l.Args[0]
2545 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2546 continue
2547 }
2548 v.reset(OpAMD64ADDSSload)
2549 v.AuxInt = int32ToAuxInt(off)
2550 v.Aux = symToAux(sym)
2551 v.AddArg3(x, ptr, mem)
2552 return true
2553 }
2554 break
2555 }
2556
2557
2558
2559 for {
2560 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2561 if v_0.Op != OpAMD64MULSS {
2562 continue
2563 }
2564 y := v_0.Args[1]
2565 x := v_0.Args[0]
2566 z := v_1
2567 if !(buildcfg.GOAMD64 >= 3 && z.Block.Func.useFMA(v)) {
2568 continue
2569 }
2570 v.reset(OpAMD64VFMADD231SS)
2571 v.AddArg3(z, x, y)
2572 return true
2573 }
2574 break
2575 }
2576 return false
2577 }
2578 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2579 v_2 := v.Args[2]
2580 v_1 := v.Args[1]
2581 v_0 := v.Args[0]
2582 b := v.Block
2583 typ := &b.Func.Config.Types
2584
2585
2586
2587 for {
2588 off1 := auxIntToInt32(v.AuxInt)
2589 sym := auxToSym(v.Aux)
2590 val := v_0
2591 if v_1.Op != OpAMD64ADDQconst {
2592 break
2593 }
2594 off2 := auxIntToInt32(v_1.AuxInt)
2595 base := v_1.Args[0]
2596 mem := v_2
2597 if !(is32Bit(int64(off1) + int64(off2))) {
2598 break
2599 }
2600 v.reset(OpAMD64ADDSSload)
2601 v.AuxInt = int32ToAuxInt(off1 + off2)
2602 v.Aux = symToAux(sym)
2603 v.AddArg3(val, base, mem)
2604 return true
2605 }
2606
2607
2608
2609 for {
2610 off1 := auxIntToInt32(v.AuxInt)
2611 sym1 := auxToSym(v.Aux)
2612 val := v_0
2613 if v_1.Op != OpAMD64LEAQ {
2614 break
2615 }
2616 off2 := auxIntToInt32(v_1.AuxInt)
2617 sym2 := auxToSym(v_1.Aux)
2618 base := v_1.Args[0]
2619 mem := v_2
2620 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2621 break
2622 }
2623 v.reset(OpAMD64ADDSSload)
2624 v.AuxInt = int32ToAuxInt(off1 + off2)
2625 v.Aux = symToAux(mergeSym(sym1, sym2))
2626 v.AddArg3(val, base, mem)
2627 return true
2628 }
2629
2630
2631 for {
2632 off := auxIntToInt32(v.AuxInt)
2633 sym := auxToSym(v.Aux)
2634 x := v_0
2635 ptr := v_1
2636 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2637 break
2638 }
2639 y := v_2.Args[1]
2640 if ptr != v_2.Args[0] {
2641 break
2642 }
2643 v.reset(OpAMD64ADDSS)
2644 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2645 v0.AddArg(y)
2646 v.AddArg2(x, v0)
2647 return true
2648 }
2649 return false
2650 }
2651 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2652 v_1 := v.Args[1]
2653 v_0 := v.Args[0]
2654 b := v.Block
2655 typ := &b.Func.Config.Types
2656
2657
2658 for {
2659 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2660 if v_0.Op != OpAMD64NOTL {
2661 continue
2662 }
2663 v_0_0 := v_0.Args[0]
2664 if v_0_0.Op != OpAMD64SHLL {
2665 continue
2666 }
2667 y := v_0_0.Args[1]
2668 v_0_0_0 := v_0_0.Args[0]
2669 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
2670 continue
2671 }
2672 x := v_1
2673 v.reset(OpAMD64BTRL)
2674 v.AddArg2(x, y)
2675 return true
2676 }
2677 break
2678 }
2679
2680
2681 for {
2682 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2683 x := v_0
2684 if v_1.Op != OpAMD64MOVLconst {
2685 continue
2686 }
2687 c := auxIntToInt32(v_1.AuxInt)
2688 v.reset(OpAMD64ANDLconst)
2689 v.AuxInt = int32ToAuxInt(c)
2690 v.AddArg(x)
2691 return true
2692 }
2693 break
2694 }
2695
2696
2697 for {
2698 x := v_0
2699 if x != v_1 {
2700 break
2701 }
2702 v.copyOf(x)
2703 return true
2704 }
2705
2706
2707
2708 for {
2709 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2710 x := v_0
2711 l := v_1
2712 if l.Op != OpAMD64MOVLload {
2713 continue
2714 }
2715 off := auxIntToInt32(l.AuxInt)
2716 sym := auxToSym(l.Aux)
2717 mem := l.Args[1]
2718 ptr := l.Args[0]
2719 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2720 continue
2721 }
2722 v.reset(OpAMD64ANDLload)
2723 v.AuxInt = int32ToAuxInt(off)
2724 v.Aux = symToAux(sym)
2725 v.AddArg3(x, ptr, mem)
2726 return true
2727 }
2728 break
2729 }
2730
2731
2732
2733 for {
2734 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2735 x := v_0
2736 if v_1.Op != OpAMD64NOTL {
2737 continue
2738 }
2739 y := v_1.Args[0]
2740 if !(buildcfg.GOAMD64 >= 3) {
2741 continue
2742 }
2743 v.reset(OpAMD64ANDNL)
2744 v.AddArg2(x, y)
2745 return true
2746 }
2747 break
2748 }
2749
2750
2751
2752 for {
2753 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2754 x := v_0
2755 if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2756 continue
2757 }
2758 v.reset(OpAMD64BLSIL)
2759 v.AddArg(x)
2760 return true
2761 }
2762 break
2763 }
2764
2765
2766
2767 for {
2768 t := v.Type
2769 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2770 x := v_0
2771 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2772 continue
2773 }
2774 v.reset(OpSelect0)
2775 v.Type = t
2776 v0 := b.NewValue0(v.Pos, OpAMD64BLSRL, types.NewTuple(typ.UInt32, types.TypeFlags))
2777 v0.AddArg(x)
2778 v.AddArg(v0)
2779 return true
2780 }
2781 break
2782 }
2783 return false
2784 }
2785 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2786 v_0 := v.Args[0]
2787
2788
2789 for {
2790 c := auxIntToInt32(v.AuxInt)
2791 if v_0.Op != OpAMD64ANDLconst {
2792 break
2793 }
2794 d := auxIntToInt32(v_0.AuxInt)
2795 x := v_0.Args[0]
2796 v.reset(OpAMD64ANDLconst)
2797 v.AuxInt = int32ToAuxInt(c & d)
2798 v.AddArg(x)
2799 return true
2800 }
2801
2802
2803 for {
2804 if auxIntToInt32(v.AuxInt) != 0xFF {
2805 break
2806 }
2807 x := v_0
2808 v.reset(OpAMD64MOVBQZX)
2809 v.AddArg(x)
2810 return true
2811 }
2812
2813
2814 for {
2815 if auxIntToInt32(v.AuxInt) != 0xFFFF {
2816 break
2817 }
2818 x := v_0
2819 v.reset(OpAMD64MOVWQZX)
2820 v.AddArg(x)
2821 return true
2822 }
2823
2824
2825
2826 for {
2827 c := auxIntToInt32(v.AuxInt)
2828 if !(c == 0) {
2829 break
2830 }
2831 v.reset(OpAMD64MOVLconst)
2832 v.AuxInt = int32ToAuxInt(0)
2833 return true
2834 }
2835
2836
2837
2838 for {
2839 c := auxIntToInt32(v.AuxInt)
2840 x := v_0
2841 if !(c == -1) {
2842 break
2843 }
2844 v.copyOf(x)
2845 return true
2846 }
2847
2848
2849 for {
2850 c := auxIntToInt32(v.AuxInt)
2851 if v_0.Op != OpAMD64MOVLconst {
2852 break
2853 }
2854 d := auxIntToInt32(v_0.AuxInt)
2855 v.reset(OpAMD64MOVLconst)
2856 v.AuxInt = int32ToAuxInt(c & d)
2857 return true
2858 }
2859 return false
2860 }
2861 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2862 v_1 := v.Args[1]
2863 v_0 := v.Args[0]
2864
2865
2866
2867 for {
2868 valoff1 := auxIntToValAndOff(v.AuxInt)
2869 sym := auxToSym(v.Aux)
2870 if v_0.Op != OpAMD64ADDQconst {
2871 break
2872 }
2873 off2 := auxIntToInt32(v_0.AuxInt)
2874 base := v_0.Args[0]
2875 mem := v_1
2876 if !(ValAndOff(valoff1).canAdd32(off2)) {
2877 break
2878 }
2879 v.reset(OpAMD64ANDLconstmodify)
2880 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2881 v.Aux = symToAux(sym)
2882 v.AddArg2(base, mem)
2883 return true
2884 }
2885
2886
2887
2888 for {
2889 valoff1 := auxIntToValAndOff(v.AuxInt)
2890 sym1 := auxToSym(v.Aux)
2891 if v_0.Op != OpAMD64LEAQ {
2892 break
2893 }
2894 off2 := auxIntToInt32(v_0.AuxInt)
2895 sym2 := auxToSym(v_0.Aux)
2896 base := v_0.Args[0]
2897 mem := v_1
2898 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2899 break
2900 }
2901 v.reset(OpAMD64ANDLconstmodify)
2902 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2903 v.Aux = symToAux(mergeSym(sym1, sym2))
2904 v.AddArg2(base, mem)
2905 return true
2906 }
2907 return false
2908 }
2909 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2910 v_2 := v.Args[2]
2911 v_1 := v.Args[1]
2912 v_0 := v.Args[0]
2913 b := v.Block
2914 typ := &b.Func.Config.Types
2915
2916
2917
2918 for {
2919 off1 := auxIntToInt32(v.AuxInt)
2920 sym := auxToSym(v.Aux)
2921 val := v_0
2922 if v_1.Op != OpAMD64ADDQconst {
2923 break
2924 }
2925 off2 := auxIntToInt32(v_1.AuxInt)
2926 base := v_1.Args[0]
2927 mem := v_2
2928 if !(is32Bit(int64(off1) + int64(off2))) {
2929 break
2930 }
2931 v.reset(OpAMD64ANDLload)
2932 v.AuxInt = int32ToAuxInt(off1 + off2)
2933 v.Aux = symToAux(sym)
2934 v.AddArg3(val, base, mem)
2935 return true
2936 }
2937
2938
2939
2940 for {
2941 off1 := auxIntToInt32(v.AuxInt)
2942 sym1 := auxToSym(v.Aux)
2943 val := v_0
2944 if v_1.Op != OpAMD64LEAQ {
2945 break
2946 }
2947 off2 := auxIntToInt32(v_1.AuxInt)
2948 sym2 := auxToSym(v_1.Aux)
2949 base := v_1.Args[0]
2950 mem := v_2
2951 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2952 break
2953 }
2954 v.reset(OpAMD64ANDLload)
2955 v.AuxInt = int32ToAuxInt(off1 + off2)
2956 v.Aux = symToAux(mergeSym(sym1, sym2))
2957 v.AddArg3(val, base, mem)
2958 return true
2959 }
2960
2961
2962 for {
2963 off := auxIntToInt32(v.AuxInt)
2964 sym := auxToSym(v.Aux)
2965 x := v_0
2966 ptr := v_1
2967 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2968 break
2969 }
2970 y := v_2.Args[1]
2971 if ptr != v_2.Args[0] {
2972 break
2973 }
2974 v.reset(OpAMD64ANDL)
2975 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
2976 v0.AddArg(y)
2977 v.AddArg2(x, v0)
2978 return true
2979 }
2980 return false
2981 }
2982 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
2983 v_2 := v.Args[2]
2984 v_1 := v.Args[1]
2985 v_0 := v.Args[0]
2986
2987
2988
2989 for {
2990 off1 := auxIntToInt32(v.AuxInt)
2991 sym := auxToSym(v.Aux)
2992 if v_0.Op != OpAMD64ADDQconst {
2993 break
2994 }
2995 off2 := auxIntToInt32(v_0.AuxInt)
2996 base := v_0.Args[0]
2997 val := v_1
2998 mem := v_2
2999 if !(is32Bit(int64(off1) + int64(off2))) {
3000 break
3001 }
3002 v.reset(OpAMD64ANDLmodify)
3003 v.AuxInt = int32ToAuxInt(off1 + off2)
3004 v.Aux = symToAux(sym)
3005 v.AddArg3(base, val, mem)
3006 return true
3007 }
3008
3009
3010
3011 for {
3012 off1 := auxIntToInt32(v.AuxInt)
3013 sym1 := auxToSym(v.Aux)
3014 if v_0.Op != OpAMD64LEAQ {
3015 break
3016 }
3017 off2 := auxIntToInt32(v_0.AuxInt)
3018 sym2 := auxToSym(v_0.Aux)
3019 base := v_0.Args[0]
3020 val := v_1
3021 mem := v_2
3022 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3023 break
3024 }
3025 v.reset(OpAMD64ANDLmodify)
3026 v.AuxInt = int32ToAuxInt(off1 + off2)
3027 v.Aux = symToAux(mergeSym(sym1, sym2))
3028 v.AddArg3(base, val, mem)
3029 return true
3030 }
3031 return false
3032 }
3033 func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool {
3034 v_1 := v.Args[1]
3035 v_0 := v.Args[0]
3036
3037
3038 for {
3039 x := v_0
3040 if v_1.Op != OpAMD64SHLL {
3041 break
3042 }
3043 y := v_1.Args[1]
3044 v_1_0 := v_1.Args[0]
3045 if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
3046 break
3047 }
3048 v.reset(OpAMD64BTRL)
3049 v.AddArg2(x, y)
3050 return true
3051 }
3052 return false
3053 }
3054 func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
3055 v_1 := v.Args[1]
3056 v_0 := v.Args[0]
3057
3058
3059 for {
3060 x := v_0
3061 if v_1.Op != OpAMD64SHLQ {
3062 break
3063 }
3064 y := v_1.Args[1]
3065 v_1_0 := v_1.Args[0]
3066 if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
3067 break
3068 }
3069 v.reset(OpAMD64BTRQ)
3070 v.AddArg2(x, y)
3071 return true
3072 }
3073 return false
3074 }
3075 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3076 v_1 := v.Args[1]
3077 v_0 := v.Args[0]
3078 b := v.Block
3079 typ := &b.Func.Config.Types
3080
3081
3082 for {
3083 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3084 if v_0.Op != OpAMD64NOTQ {
3085 continue
3086 }
3087 v_0_0 := v_0.Args[0]
3088 if v_0_0.Op != OpAMD64SHLQ {
3089 continue
3090 }
3091 y := v_0_0.Args[1]
3092 v_0_0_0 := v_0_0.Args[0]
3093 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
3094 continue
3095 }
3096 x := v_1
3097 v.reset(OpAMD64BTRQ)
3098 v.AddArg2(x, y)
3099 return true
3100 }
3101 break
3102 }
3103
3104
3105
3106 for {
3107 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3108 if v_0.Op != OpAMD64MOVQconst {
3109 continue
3110 }
3111 c := auxIntToInt64(v_0.AuxInt)
3112 x := v_1
3113 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) {
3114 continue
3115 }
3116 v.reset(OpAMD64BTRQconst)
3117 v.AuxInt = int8ToAuxInt(int8(log64(^c)))
3118 v.AddArg(x)
3119 return true
3120 }
3121 break
3122 }
3123
3124
3125
3126 for {
3127 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3128 x := v_0
3129 if v_1.Op != OpAMD64MOVQconst {
3130 continue
3131 }
3132 c := auxIntToInt64(v_1.AuxInt)
3133 if !(is32Bit(c)) {
3134 continue
3135 }
3136 v.reset(OpAMD64ANDQconst)
3137 v.AuxInt = int32ToAuxInt(int32(c))
3138 v.AddArg(x)
3139 return true
3140 }
3141 break
3142 }
3143
3144
3145 for {
3146 x := v_0
3147 if x != v_1 {
3148 break
3149 }
3150 v.copyOf(x)
3151 return true
3152 }
3153
3154
3155
3156 for {
3157 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3158 x := v_0
3159 l := v_1
3160 if l.Op != OpAMD64MOVQload {
3161 continue
3162 }
3163 off := auxIntToInt32(l.AuxInt)
3164 sym := auxToSym(l.Aux)
3165 mem := l.Args[1]
3166 ptr := l.Args[0]
3167 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3168 continue
3169 }
3170 v.reset(OpAMD64ANDQload)
3171 v.AuxInt = int32ToAuxInt(off)
3172 v.Aux = symToAux(sym)
3173 v.AddArg3(x, ptr, mem)
3174 return true
3175 }
3176 break
3177 }
3178
3179
3180
3181 for {
3182 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3183 x := v_0
3184 if v_1.Op != OpAMD64NOTQ {
3185 continue
3186 }
3187 y := v_1.Args[0]
3188 if !(buildcfg.GOAMD64 >= 3) {
3189 continue
3190 }
3191 v.reset(OpAMD64ANDNQ)
3192 v.AddArg2(x, y)
3193 return true
3194 }
3195 break
3196 }
3197
3198
3199
3200 for {
3201 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3202 x := v_0
3203 if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3204 continue
3205 }
3206 v.reset(OpAMD64BLSIQ)
3207 v.AddArg(x)
3208 return true
3209 }
3210 break
3211 }
3212
3213
3214
3215 for {
3216 t := v.Type
3217 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3218 x := v_0
3219 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3220 continue
3221 }
3222 v.reset(OpSelect0)
3223 v.Type = t
3224 v0 := b.NewValue0(v.Pos, OpAMD64BLSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
3225 v0.AddArg(x)
3226 v.AddArg(v0)
3227 return true
3228 }
3229 break
3230 }
3231 return false
3232 }
3233 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3234 v_0 := v.Args[0]
3235
3236
3237 for {
3238 c := auxIntToInt32(v.AuxInt)
3239 if v_0.Op != OpAMD64ANDQconst {
3240 break
3241 }
3242 d := auxIntToInt32(v_0.AuxInt)
3243 x := v_0.Args[0]
3244 v.reset(OpAMD64ANDQconst)
3245 v.AuxInt = int32ToAuxInt(c & d)
3246 v.AddArg(x)
3247 return true
3248 }
3249
3250
3251 for {
3252 if auxIntToInt32(v.AuxInt) != 0xFF {
3253 break
3254 }
3255 x := v_0
3256 v.reset(OpAMD64MOVBQZX)
3257 v.AddArg(x)
3258 return true
3259 }
3260
3261
3262 for {
3263 if auxIntToInt32(v.AuxInt) != 0xFFFF {
3264 break
3265 }
3266 x := v_0
3267 v.reset(OpAMD64MOVWQZX)
3268 v.AddArg(x)
3269 return true
3270 }
3271
3272
3273 for {
3274 if auxIntToInt32(v.AuxInt) != 0 {
3275 break
3276 }
3277 v.reset(OpAMD64MOVQconst)
3278 v.AuxInt = int64ToAuxInt(0)
3279 return true
3280 }
3281
3282
3283 for {
3284 if auxIntToInt32(v.AuxInt) != -1 {
3285 break
3286 }
3287 x := v_0
3288 v.copyOf(x)
3289 return true
3290 }
3291
3292
3293 for {
3294 c := auxIntToInt32(v.AuxInt)
3295 if v_0.Op != OpAMD64MOVQconst {
3296 break
3297 }
3298 d := auxIntToInt64(v_0.AuxInt)
3299 v.reset(OpAMD64MOVQconst)
3300 v.AuxInt = int64ToAuxInt(int64(c) & d)
3301 return true
3302 }
3303 return false
3304 }
3305 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3306 v_1 := v.Args[1]
3307 v_0 := v.Args[0]
3308
3309
3310
3311 for {
3312 valoff1 := auxIntToValAndOff(v.AuxInt)
3313 sym := auxToSym(v.Aux)
3314 if v_0.Op != OpAMD64ADDQconst {
3315 break
3316 }
3317 off2 := auxIntToInt32(v_0.AuxInt)
3318 base := v_0.Args[0]
3319 mem := v_1
3320 if !(ValAndOff(valoff1).canAdd32(off2)) {
3321 break
3322 }
3323 v.reset(OpAMD64ANDQconstmodify)
3324 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3325 v.Aux = symToAux(sym)
3326 v.AddArg2(base, mem)
3327 return true
3328 }
3329
3330
3331
3332 for {
3333 valoff1 := auxIntToValAndOff(v.AuxInt)
3334 sym1 := auxToSym(v.Aux)
3335 if v_0.Op != OpAMD64LEAQ {
3336 break
3337 }
3338 off2 := auxIntToInt32(v_0.AuxInt)
3339 sym2 := auxToSym(v_0.Aux)
3340 base := v_0.Args[0]
3341 mem := v_1
3342 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3343 break
3344 }
3345 v.reset(OpAMD64ANDQconstmodify)
3346 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3347 v.Aux = symToAux(mergeSym(sym1, sym2))
3348 v.AddArg2(base, mem)
3349 return true
3350 }
3351 return false
3352 }
3353 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3354 v_2 := v.Args[2]
3355 v_1 := v.Args[1]
3356 v_0 := v.Args[0]
3357 b := v.Block
3358 typ := &b.Func.Config.Types
3359
3360
3361
3362 for {
3363 off1 := auxIntToInt32(v.AuxInt)
3364 sym := auxToSym(v.Aux)
3365 val := v_0
3366 if v_1.Op != OpAMD64ADDQconst {
3367 break
3368 }
3369 off2 := auxIntToInt32(v_1.AuxInt)
3370 base := v_1.Args[0]
3371 mem := v_2
3372 if !(is32Bit(int64(off1) + int64(off2))) {
3373 break
3374 }
3375 v.reset(OpAMD64ANDQload)
3376 v.AuxInt = int32ToAuxInt(off1 + off2)
3377 v.Aux = symToAux(sym)
3378 v.AddArg3(val, base, mem)
3379 return true
3380 }
3381
3382
3383
3384 for {
3385 off1 := auxIntToInt32(v.AuxInt)
3386 sym1 := auxToSym(v.Aux)
3387 val := v_0
3388 if v_1.Op != OpAMD64LEAQ {
3389 break
3390 }
3391 off2 := auxIntToInt32(v_1.AuxInt)
3392 sym2 := auxToSym(v_1.Aux)
3393 base := v_1.Args[0]
3394 mem := v_2
3395 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3396 break
3397 }
3398 v.reset(OpAMD64ANDQload)
3399 v.AuxInt = int32ToAuxInt(off1 + off2)
3400 v.Aux = symToAux(mergeSym(sym1, sym2))
3401 v.AddArg3(val, base, mem)
3402 return true
3403 }
3404
3405
3406 for {
3407 off := auxIntToInt32(v.AuxInt)
3408 sym := auxToSym(v.Aux)
3409 x := v_0
3410 ptr := v_1
3411 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3412 break
3413 }
3414 y := v_2.Args[1]
3415 if ptr != v_2.Args[0] {
3416 break
3417 }
3418 v.reset(OpAMD64ANDQ)
3419 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3420 v0.AddArg(y)
3421 v.AddArg2(x, v0)
3422 return true
3423 }
3424 return false
3425 }
3426 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3427 v_2 := v.Args[2]
3428 v_1 := v.Args[1]
3429 v_0 := v.Args[0]
3430
3431
3432
3433 for {
3434 off1 := auxIntToInt32(v.AuxInt)
3435 sym := auxToSym(v.Aux)
3436 if v_0.Op != OpAMD64ADDQconst {
3437 break
3438 }
3439 off2 := auxIntToInt32(v_0.AuxInt)
3440 base := v_0.Args[0]
3441 val := v_1
3442 mem := v_2
3443 if !(is32Bit(int64(off1) + int64(off2))) {
3444 break
3445 }
3446 v.reset(OpAMD64ANDQmodify)
3447 v.AuxInt = int32ToAuxInt(off1 + off2)
3448 v.Aux = symToAux(sym)
3449 v.AddArg3(base, val, mem)
3450 return true
3451 }
3452
3453
3454
3455 for {
3456 off1 := auxIntToInt32(v.AuxInt)
3457 sym1 := auxToSym(v.Aux)
3458 if v_0.Op != OpAMD64LEAQ {
3459 break
3460 }
3461 off2 := auxIntToInt32(v_0.AuxInt)
3462 sym2 := auxToSym(v_0.Aux)
3463 base := v_0.Args[0]
3464 val := v_1
3465 mem := v_2
3466 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3467 break
3468 }
3469 v.reset(OpAMD64ANDQmodify)
3470 v.AuxInt = int32ToAuxInt(off1 + off2)
3471 v.Aux = symToAux(mergeSym(sym1, sym2))
3472 v.AddArg3(base, val, mem)
3473 return true
3474 }
3475 return false
3476 }
3477 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3478 v_0 := v.Args[0]
3479 b := v.Block
3480
3481
3482 for {
3483 if v_0.Op != OpAMD64ORQconst {
3484 break
3485 }
3486 t := v_0.Type
3487 if auxIntToInt32(v_0.AuxInt) != 1<<8 {
3488 break
3489 }
3490 v_0_0 := v_0.Args[0]
3491 if v_0_0.Op != OpAMD64MOVBQZX {
3492 break
3493 }
3494 x := v_0_0.Args[0]
3495 v.reset(OpAMD64BSFQ)
3496 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3497 v0.AuxInt = int32ToAuxInt(1 << 8)
3498 v0.AddArg(x)
3499 v.AddArg(v0)
3500 return true
3501 }
3502
3503
3504 for {
3505 if v_0.Op != OpAMD64ORQconst {
3506 break
3507 }
3508 t := v_0.Type
3509 if auxIntToInt32(v_0.AuxInt) != 1<<16 {
3510 break
3511 }
3512 v_0_0 := v_0.Args[0]
3513 if v_0_0.Op != OpAMD64MOVWQZX {
3514 break
3515 }
3516 x := v_0_0.Args[0]
3517 v.reset(OpAMD64BSFQ)
3518 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3519 v0.AuxInt = int32ToAuxInt(1 << 16)
3520 v0.AddArg(x)
3521 v.AddArg(v0)
3522 return true
3523 }
3524 return false
3525 }
3526 func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool {
3527 v_0 := v.Args[0]
3528 b := v.Block
3529 typ := &b.Func.Config.Types
3530
3531
3532 for {
3533 if v_0.Op != OpAMD64BSWAPL {
3534 break
3535 }
3536 p := v_0.Args[0]
3537 v.copyOf(p)
3538 return true
3539 }
3540
3541
3542
3543 for {
3544 x := v_0
3545 if x.Op != OpAMD64MOVLload {
3546 break
3547 }
3548 i := auxIntToInt32(x.AuxInt)
3549 s := auxToSym(x.Aux)
3550 mem := x.Args[1]
3551 p := x.Args[0]
3552 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3553 break
3554 }
3555 b = x.Block
3556 v0 := b.NewValue0(x.Pos, OpAMD64MOVBELload, typ.UInt32)
3557 v.copyOf(v0)
3558 v0.AuxInt = int32ToAuxInt(i)
3559 v0.Aux = symToAux(s)
3560 v0.AddArg2(p, mem)
3561 return true
3562 }
3563
3564
3565
3566 for {
3567 x := v_0
3568 if x.Op != OpAMD64MOVBELload {
3569 break
3570 }
3571 i := auxIntToInt32(x.AuxInt)
3572 s := auxToSym(x.Aux)
3573 mem := x.Args[1]
3574 p := x.Args[0]
3575 if !(x.Uses == 1) {
3576 break
3577 }
3578 b = x.Block
3579 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, typ.UInt32)
3580 v.copyOf(v0)
3581 v0.AuxInt = int32ToAuxInt(i)
3582 v0.Aux = symToAux(s)
3583 v0.AddArg2(p, mem)
3584 return true
3585 }
3586 return false
3587 }
3588 func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
3589 v_0 := v.Args[0]
3590 b := v.Block
3591 typ := &b.Func.Config.Types
3592
3593
3594 for {
3595 if v_0.Op != OpAMD64BSWAPQ {
3596 break
3597 }
3598 p := v_0.Args[0]
3599 v.copyOf(p)
3600 return true
3601 }
3602
3603
3604
3605 for {
3606 x := v_0
3607 if x.Op != OpAMD64MOVQload {
3608 break
3609 }
3610 i := auxIntToInt32(x.AuxInt)
3611 s := auxToSym(x.Aux)
3612 mem := x.Args[1]
3613 p := x.Args[0]
3614 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3615 break
3616 }
3617 b = x.Block
3618 v0 := b.NewValue0(x.Pos, OpAMD64MOVBEQload, typ.UInt64)
3619 v.copyOf(v0)
3620 v0.AuxInt = int32ToAuxInt(i)
3621 v0.Aux = symToAux(s)
3622 v0.AddArg2(p, mem)
3623 return true
3624 }
3625
3626
3627
3628 for {
3629 x := v_0
3630 if x.Op != OpAMD64MOVBEQload {
3631 break
3632 }
3633 i := auxIntToInt32(x.AuxInt)
3634 s := auxToSym(x.Aux)
3635 mem := x.Args[1]
3636 p := x.Args[0]
3637 if !(x.Uses == 1) {
3638 break
3639 }
3640 b = x.Block
3641 v0 := b.NewValue0(x.Pos, OpAMD64MOVQload, typ.UInt64)
3642 v.copyOf(v0)
3643 v0.AuxInt = int32ToAuxInt(i)
3644 v0.Aux = symToAux(s)
3645 v0.AddArg2(p, mem)
3646 return true
3647 }
3648 return false
3649 }
3650 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3651 v_0 := v.Args[0]
3652
3653
3654 for {
3655 c := auxIntToInt8(v.AuxInt)
3656 if v_0.Op != OpAMD64MOVQconst {
3657 break
3658 }
3659 d := auxIntToInt64(v_0.AuxInt)
3660 v.reset(OpAMD64MOVQconst)
3661 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
3662 return true
3663 }
3664 return false
3665 }
3666 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3667 v_0 := v.Args[0]
3668
3669
3670
3671 for {
3672 c := auxIntToInt8(v.AuxInt)
3673 if v_0.Op != OpAMD64SHRQconst {
3674 break
3675 }
3676 d := auxIntToInt8(v_0.AuxInt)
3677 x := v_0.Args[0]
3678 if !((c + d) < 64) {
3679 break
3680 }
3681 v.reset(OpAMD64BTQconst)
3682 v.AuxInt = int8ToAuxInt(c + d)
3683 v.AddArg(x)
3684 return true
3685 }
3686
3687
3688
3689 for {
3690 c := auxIntToInt8(v.AuxInt)
3691 if v_0.Op != OpAMD64ADDQ {
3692 break
3693 }
3694 x := v_0.Args[1]
3695 if x != v_0.Args[0] || !(c > 1) {
3696 break
3697 }
3698 v.reset(OpAMD64BTLconst)
3699 v.AuxInt = int8ToAuxInt(c - 1)
3700 v.AddArg(x)
3701 return true
3702 }
3703
3704
3705
3706 for {
3707 c := auxIntToInt8(v.AuxInt)
3708 if v_0.Op != OpAMD64SHLQconst {
3709 break
3710 }
3711 d := auxIntToInt8(v_0.AuxInt)
3712 x := v_0.Args[0]
3713 if !(c > d) {
3714 break
3715 }
3716 v.reset(OpAMD64BTLconst)
3717 v.AuxInt = int8ToAuxInt(c - d)
3718 v.AddArg(x)
3719 return true
3720 }
3721
3722
3723 for {
3724 if auxIntToInt8(v.AuxInt) != 0 {
3725 break
3726 }
3727 s := v_0
3728 if s.Op != OpAMD64SHRQ {
3729 break
3730 }
3731 y := s.Args[1]
3732 x := s.Args[0]
3733 v.reset(OpAMD64BTQ)
3734 v.AddArg2(y, x)
3735 return true
3736 }
3737
3738
3739
3740 for {
3741 c := auxIntToInt8(v.AuxInt)
3742 if v_0.Op != OpAMD64SHRLconst {
3743 break
3744 }
3745 d := auxIntToInt8(v_0.AuxInt)
3746 x := v_0.Args[0]
3747 if !((c + d) < 32) {
3748 break
3749 }
3750 v.reset(OpAMD64BTLconst)
3751 v.AuxInt = int8ToAuxInt(c + d)
3752 v.AddArg(x)
3753 return true
3754 }
3755
3756
3757
3758 for {
3759 c := auxIntToInt8(v.AuxInt)
3760 if v_0.Op != OpAMD64ADDL {
3761 break
3762 }
3763 x := v_0.Args[1]
3764 if x != v_0.Args[0] || !(c > 1) {
3765 break
3766 }
3767 v.reset(OpAMD64BTLconst)
3768 v.AuxInt = int8ToAuxInt(c - 1)
3769 v.AddArg(x)
3770 return true
3771 }
3772
3773
3774
3775 for {
3776 c := auxIntToInt8(v.AuxInt)
3777 if v_0.Op != OpAMD64SHLLconst {
3778 break
3779 }
3780 d := auxIntToInt8(v_0.AuxInt)
3781 x := v_0.Args[0]
3782 if !(c > d) {
3783 break
3784 }
3785 v.reset(OpAMD64BTLconst)
3786 v.AuxInt = int8ToAuxInt(c - d)
3787 v.AddArg(x)
3788 return true
3789 }
3790
3791
3792 for {
3793 if auxIntToInt8(v.AuxInt) != 0 {
3794 break
3795 }
3796 s := v_0
3797 if s.Op != OpAMD64SHRL {
3798 break
3799 }
3800 y := s.Args[1]
3801 x := s.Args[0]
3802 v.reset(OpAMD64BTL)
3803 v.AddArg2(y, x)
3804 return true
3805 }
3806
3807
3808 for {
3809 if auxIntToInt8(v.AuxInt) != 0 {
3810 break
3811 }
3812 s := v_0
3813 if s.Op != OpAMD64SHRXL {
3814 break
3815 }
3816 y := s.Args[1]
3817 x := s.Args[0]
3818 v.reset(OpAMD64BTL)
3819 v.AddArg2(y, x)
3820 return true
3821 }
3822 return false
3823 }
3824 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3825 v_0 := v.Args[0]
3826
3827
3828
3829 for {
3830 c := auxIntToInt8(v.AuxInt)
3831 if v_0.Op != OpAMD64SHRQconst {
3832 break
3833 }
3834 d := auxIntToInt8(v_0.AuxInt)
3835 x := v_0.Args[0]
3836 if !((c + d) < 64) {
3837 break
3838 }
3839 v.reset(OpAMD64BTQconst)
3840 v.AuxInt = int8ToAuxInt(c + d)
3841 v.AddArg(x)
3842 return true
3843 }
3844
3845
3846
3847 for {
3848 c := auxIntToInt8(v.AuxInt)
3849 if v_0.Op != OpAMD64ADDQ {
3850 break
3851 }
3852 x := v_0.Args[1]
3853 if x != v_0.Args[0] || !(c > 1) {
3854 break
3855 }
3856 v.reset(OpAMD64BTQconst)
3857 v.AuxInt = int8ToAuxInt(c - 1)
3858 v.AddArg(x)
3859 return true
3860 }
3861
3862
3863
3864 for {
3865 c := auxIntToInt8(v.AuxInt)
3866 if v_0.Op != OpAMD64SHLQconst {
3867 break
3868 }
3869 d := auxIntToInt8(v_0.AuxInt)
3870 x := v_0.Args[0]
3871 if !(c > d) {
3872 break
3873 }
3874 v.reset(OpAMD64BTQconst)
3875 v.AuxInt = int8ToAuxInt(c - d)
3876 v.AddArg(x)
3877 return true
3878 }
3879
3880
3881 for {
3882 if auxIntToInt8(v.AuxInt) != 0 {
3883 break
3884 }
3885 s := v_0
3886 if s.Op != OpAMD64SHRQ {
3887 break
3888 }
3889 y := s.Args[1]
3890 x := s.Args[0]
3891 v.reset(OpAMD64BTQ)
3892 v.AddArg2(y, x)
3893 return true
3894 }
3895 return false
3896 }
3897 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
3898 v_0 := v.Args[0]
3899
3900
3901 for {
3902 c := auxIntToInt8(v.AuxInt)
3903 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
3904 break
3905 }
3906 x := v_0.Args[0]
3907 v.reset(OpAMD64BTRQconst)
3908 v.AuxInt = int8ToAuxInt(c)
3909 v.AddArg(x)
3910 return true
3911 }
3912
3913
3914 for {
3915 c := auxIntToInt8(v.AuxInt)
3916 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3917 break
3918 }
3919 x := v_0.Args[0]
3920 v.reset(OpAMD64BTRQconst)
3921 v.AuxInt = int8ToAuxInt(c)
3922 v.AddArg(x)
3923 return true
3924 }
3925
3926
3927 for {
3928 c := auxIntToInt8(v.AuxInt)
3929 if v_0.Op != OpAMD64MOVQconst {
3930 break
3931 }
3932 d := auxIntToInt64(v_0.AuxInt)
3933 v.reset(OpAMD64MOVQconst)
3934 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
3935 return true
3936 }
3937 return false
3938 }
3939 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
3940 v_0 := v.Args[0]
3941
3942
3943 for {
3944 c := auxIntToInt8(v.AuxInt)
3945 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
3946 break
3947 }
3948 x := v_0.Args[0]
3949 v.reset(OpAMD64BTSQconst)
3950 v.AuxInt = int8ToAuxInt(c)
3951 v.AddArg(x)
3952 return true
3953 }
3954
3955
3956 for {
3957 c := auxIntToInt8(v.AuxInt)
3958 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3959 break
3960 }
3961 x := v_0.Args[0]
3962 v.reset(OpAMD64BTSQconst)
3963 v.AuxInt = int8ToAuxInt(c)
3964 v.AddArg(x)
3965 return true
3966 }
3967
3968
3969 for {
3970 c := auxIntToInt8(v.AuxInt)
3971 if v_0.Op != OpAMD64MOVQconst {
3972 break
3973 }
3974 d := auxIntToInt64(v_0.AuxInt)
3975 v.reset(OpAMD64MOVQconst)
3976 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
3977 return true
3978 }
3979 return false
3980 }
3981 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
3982 v_2 := v.Args[2]
3983 v_1 := v.Args[1]
3984 v_0 := v.Args[0]
3985
3986
3987 for {
3988 x := v_0
3989 y := v_1
3990 if v_2.Op != OpAMD64InvertFlags {
3991 break
3992 }
3993 cond := v_2.Args[0]
3994 v.reset(OpAMD64CMOVLLS)
3995 v.AddArg3(x, y, cond)
3996 return true
3997 }
3998
3999
4000 for {
4001 x := v_1
4002 if v_2.Op != OpAMD64FlagEQ {
4003 break
4004 }
4005 v.copyOf(x)
4006 return true
4007 }
4008
4009
4010 for {
4011 x := v_1
4012 if v_2.Op != OpAMD64FlagGT_UGT {
4013 break
4014 }
4015 v.copyOf(x)
4016 return true
4017 }
4018
4019
4020 for {
4021 y := v_0
4022 if v_2.Op != OpAMD64FlagGT_ULT {
4023 break
4024 }
4025 v.copyOf(y)
4026 return true
4027 }
4028
4029
4030 for {
4031 y := v_0
4032 if v_2.Op != OpAMD64FlagLT_ULT {
4033 break
4034 }
4035 v.copyOf(y)
4036 return true
4037 }
4038
4039
4040 for {
4041 x := v_1
4042 if v_2.Op != OpAMD64FlagLT_UGT {
4043 break
4044 }
4045 v.copyOf(x)
4046 return true
4047 }
4048 return false
4049 }
4050 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
4051 v_2 := v.Args[2]
4052 v_1 := v.Args[1]
4053 v_0 := v.Args[0]
4054
4055
4056 for {
4057 x := v_0
4058 y := v_1
4059 if v_2.Op != OpAMD64InvertFlags {
4060 break
4061 }
4062 cond := v_2.Args[0]
4063 v.reset(OpAMD64CMOVLHI)
4064 v.AddArg3(x, y, cond)
4065 return true
4066 }
4067
4068
4069 for {
4070 y := v_0
4071 if v_2.Op != OpAMD64FlagEQ {
4072 break
4073 }
4074 v.copyOf(y)
4075 return true
4076 }
4077
4078
4079 for {
4080 y := v_0
4081 if v_2.Op != OpAMD64FlagGT_UGT {
4082 break
4083 }
4084 v.copyOf(y)
4085 return true
4086 }
4087
4088
4089 for {
4090 x := v_1
4091 if v_2.Op != OpAMD64FlagGT_ULT {
4092 break
4093 }
4094 v.copyOf(x)
4095 return true
4096 }
4097
4098
4099 for {
4100 x := v_1
4101 if v_2.Op != OpAMD64FlagLT_ULT {
4102 break
4103 }
4104 v.copyOf(x)
4105 return true
4106 }
4107
4108
4109 for {
4110 y := v_0
4111 if v_2.Op != OpAMD64FlagLT_UGT {
4112 break
4113 }
4114 v.copyOf(y)
4115 return true
4116 }
4117 return false
4118 }
4119 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4120 v_2 := v.Args[2]
4121 v_1 := v.Args[1]
4122 v_0 := v.Args[0]
4123 b := v.Block
4124
4125
4126 for {
4127 x := v_0
4128 y := v_1
4129 if v_2.Op != OpAMD64InvertFlags {
4130 break
4131 }
4132 cond := v_2.Args[0]
4133 v.reset(OpAMD64CMOVLEQ)
4134 v.AddArg3(x, y, cond)
4135 return true
4136 }
4137
4138
4139 for {
4140 x := v_1
4141 if v_2.Op != OpAMD64FlagEQ {
4142 break
4143 }
4144 v.copyOf(x)
4145 return true
4146 }
4147
4148
4149 for {
4150 y := v_0
4151 if v_2.Op != OpAMD64FlagGT_UGT {
4152 break
4153 }
4154 v.copyOf(y)
4155 return true
4156 }
4157
4158
4159 for {
4160 y := v_0
4161 if v_2.Op != OpAMD64FlagGT_ULT {
4162 break
4163 }
4164 v.copyOf(y)
4165 return true
4166 }
4167
4168
4169 for {
4170 y := v_0
4171 if v_2.Op != OpAMD64FlagLT_ULT {
4172 break
4173 }
4174 v.copyOf(y)
4175 return true
4176 }
4177
4178
4179 for {
4180 y := v_0
4181 if v_2.Op != OpAMD64FlagLT_UGT {
4182 break
4183 }
4184 v.copyOf(y)
4185 return true
4186 }
4187
4188
4189 for {
4190 x := v_0
4191 y := v_1
4192 if v_2.Op != OpAMD64TESTQ {
4193 break
4194 }
4195 _ = v_2.Args[1]
4196 v_2_0 := v_2.Args[0]
4197 v_2_1 := v_2.Args[1]
4198 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4199 s := v_2_0
4200 if s.Op != OpSelect0 {
4201 continue
4202 }
4203 blsr := s.Args[0]
4204 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4205 continue
4206 }
4207 v.reset(OpAMD64CMOVLEQ)
4208 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4209 v0.AddArg(blsr)
4210 v.AddArg3(x, y, v0)
4211 return true
4212 }
4213 break
4214 }
4215
4216
4217 for {
4218 x := v_0
4219 y := v_1
4220 if v_2.Op != OpAMD64TESTL {
4221 break
4222 }
4223 _ = v_2.Args[1]
4224 v_2_0 := v_2.Args[0]
4225 v_2_1 := v_2.Args[1]
4226 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4227 s := v_2_0
4228 if s.Op != OpSelect0 {
4229 continue
4230 }
4231 blsr := s.Args[0]
4232 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4233 continue
4234 }
4235 v.reset(OpAMD64CMOVLEQ)
4236 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4237 v0.AddArg(blsr)
4238 v.AddArg3(x, y, v0)
4239 return true
4240 }
4241 break
4242 }
4243 return false
4244 }
4245 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4246 v_2 := v.Args[2]
4247 v_1 := v.Args[1]
4248 v_0 := v.Args[0]
4249 b := v.Block
4250
4251
4252 for {
4253 x := v_0
4254 y := v_1
4255 if v_2.Op != OpAMD64InvertFlags {
4256 break
4257 }
4258 cond := v_2.Args[0]
4259 v.reset(OpAMD64CMOVLLE)
4260 v.AddArg3(x, y, cond)
4261 return true
4262 }
4263
4264
4265 for {
4266 x := v_1
4267 if v_2.Op != OpAMD64FlagEQ {
4268 break
4269 }
4270 v.copyOf(x)
4271 return true
4272 }
4273
4274
4275 for {
4276 x := v_1
4277 if v_2.Op != OpAMD64FlagGT_UGT {
4278 break
4279 }
4280 v.copyOf(x)
4281 return true
4282 }
4283
4284
4285 for {
4286 x := v_1
4287 if v_2.Op != OpAMD64FlagGT_ULT {
4288 break
4289 }
4290 v.copyOf(x)
4291 return true
4292 }
4293
4294
4295 for {
4296 y := v_0
4297 if v_2.Op != OpAMD64FlagLT_ULT {
4298 break
4299 }
4300 v.copyOf(y)
4301 return true
4302 }
4303
4304
4305 for {
4306 y := v_0
4307 if v_2.Op != OpAMD64FlagLT_UGT {
4308 break
4309 }
4310 v.copyOf(y)
4311 return true
4312 }
4313
4314
4315
4316 for {
4317 x := v_0
4318 y := v_1
4319 c := v_2
4320 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
4321 break
4322 }
4323 z := c.Args[0]
4324 if !(c.Uses == 1) {
4325 break
4326 }
4327 v.reset(OpAMD64CMOVLGT)
4328 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
4329 v0.AuxInt = int32ToAuxInt(127)
4330 v0.AddArg(z)
4331 v.AddArg3(x, y, v0)
4332 return true
4333 }
4334
4335
4336
4337 for {
4338 x := v_0
4339 y := v_1
4340 c := v_2
4341 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
4342 break
4343 }
4344 z := c.Args[0]
4345 if !(c.Uses == 1) {
4346 break
4347 }
4348 v.reset(OpAMD64CMOVLGT)
4349 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
4350 v0.AuxInt = int32ToAuxInt(127)
4351 v0.AddArg(z)
4352 v.AddArg3(x, y, v0)
4353 return true
4354 }
4355 return false
4356 }
4357 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4358 v_2 := v.Args[2]
4359 v_1 := v.Args[1]
4360 v_0 := v.Args[0]
4361
4362
4363 for {
4364 x := v_0
4365 y := v_1
4366 if v_2.Op != OpAMD64InvertFlags {
4367 break
4368 }
4369 cond := v_2.Args[0]
4370 v.reset(OpAMD64CMOVLLT)
4371 v.AddArg3(x, y, cond)
4372 return true
4373 }
4374
4375
4376 for {
4377 y := v_0
4378 if v_2.Op != OpAMD64FlagEQ {
4379 break
4380 }
4381 v.copyOf(y)
4382 return true
4383 }
4384
4385
4386 for {
4387 x := v_1
4388 if v_2.Op != OpAMD64FlagGT_UGT {
4389 break
4390 }
4391 v.copyOf(x)
4392 return true
4393 }
4394
4395
4396 for {
4397 x := v_1
4398 if v_2.Op != OpAMD64FlagGT_ULT {
4399 break
4400 }
4401 v.copyOf(x)
4402 return true
4403 }
4404
4405
4406 for {
4407 y := v_0
4408 if v_2.Op != OpAMD64FlagLT_ULT {
4409 break
4410 }
4411 v.copyOf(y)
4412 return true
4413 }
4414
4415
4416 for {
4417 y := v_0
4418 if v_2.Op != OpAMD64FlagLT_UGT {
4419 break
4420 }
4421 v.copyOf(y)
4422 return true
4423 }
4424 return false
4425 }
4426 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
4427 v_2 := v.Args[2]
4428 v_1 := v.Args[1]
4429 v_0 := v.Args[0]
4430
4431
4432 for {
4433 x := v_0
4434 y := v_1
4435 if v_2.Op != OpAMD64InvertFlags {
4436 break
4437 }
4438 cond := v_2.Args[0]
4439 v.reset(OpAMD64CMOVLCS)
4440 v.AddArg3(x, y, cond)
4441 return true
4442 }
4443
4444
4445 for {
4446 y := v_0
4447 if v_2.Op != OpAMD64FlagEQ {
4448 break
4449 }
4450 v.copyOf(y)
4451 return true
4452 }
4453
4454
4455 for {
4456 x := v_1
4457 if v_2.Op != OpAMD64FlagGT_UGT {
4458 break
4459 }
4460 v.copyOf(x)
4461 return true
4462 }
4463
4464
4465 for {
4466 y := v_0
4467 if v_2.Op != OpAMD64FlagGT_ULT {
4468 break
4469 }
4470 v.copyOf(y)
4471 return true
4472 }
4473
4474
4475 for {
4476 y := v_0
4477 if v_2.Op != OpAMD64FlagLT_ULT {
4478 break
4479 }
4480 v.copyOf(y)
4481 return true
4482 }
4483
4484
4485 for {
4486 x := v_1
4487 if v_2.Op != OpAMD64FlagLT_UGT {
4488 break
4489 }
4490 v.copyOf(x)
4491 return true
4492 }
4493 return false
4494 }
4495 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
4496 v_2 := v.Args[2]
4497 v_1 := v.Args[1]
4498 v_0 := v.Args[0]
4499
4500
4501 for {
4502 x := v_0
4503 y := v_1
4504 if v_2.Op != OpAMD64InvertFlags {
4505 break
4506 }
4507 cond := v_2.Args[0]
4508 v.reset(OpAMD64CMOVLGE)
4509 v.AddArg3(x, y, cond)
4510 return true
4511 }
4512
4513
4514 for {
4515 x := v_1
4516 if v_2.Op != OpAMD64FlagEQ {
4517 break
4518 }
4519 v.copyOf(x)
4520 return true
4521 }
4522
4523
4524 for {
4525 y := v_0
4526 if v_2.Op != OpAMD64FlagGT_UGT {
4527 break
4528 }
4529 v.copyOf(y)
4530 return true
4531 }
4532
4533
4534 for {
4535 y := v_0
4536 if v_2.Op != OpAMD64FlagGT_ULT {
4537 break
4538 }
4539 v.copyOf(y)
4540 return true
4541 }
4542
4543
4544 for {
4545 x := v_1
4546 if v_2.Op != OpAMD64FlagLT_ULT {
4547 break
4548 }
4549 v.copyOf(x)
4550 return true
4551 }
4552
4553
4554 for {
4555 x := v_1
4556 if v_2.Op != OpAMD64FlagLT_UGT {
4557 break
4558 }
4559 v.copyOf(x)
4560 return true
4561 }
4562 return false
4563 }
4564 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
4565 v_2 := v.Args[2]
4566 v_1 := v.Args[1]
4567 v_0 := v.Args[0]
4568
4569
4570 for {
4571 x := v_0
4572 y := v_1
4573 if v_2.Op != OpAMD64InvertFlags {
4574 break
4575 }
4576 cond := v_2.Args[0]
4577 v.reset(OpAMD64CMOVLCC)
4578 v.AddArg3(x, y, cond)
4579 return true
4580 }
4581
4582
4583 for {
4584 x := v_1
4585 if v_2.Op != OpAMD64FlagEQ {
4586 break
4587 }
4588 v.copyOf(x)
4589 return true
4590 }
4591
4592
4593 for {
4594 y := v_0
4595 if v_2.Op != OpAMD64FlagGT_UGT {
4596 break
4597 }
4598 v.copyOf(y)
4599 return true
4600 }
4601
4602
4603 for {
4604 x := v_1
4605 if v_2.Op != OpAMD64FlagGT_ULT {
4606 break
4607 }
4608 v.copyOf(x)
4609 return true
4610 }
4611
4612
4613 for {
4614 x := v_1
4615 if v_2.Op != OpAMD64FlagLT_ULT {
4616 break
4617 }
4618 v.copyOf(x)
4619 return true
4620 }
4621
4622
4623 for {
4624 y := v_0
4625 if v_2.Op != OpAMD64FlagLT_UGT {
4626 break
4627 }
4628 v.copyOf(y)
4629 return true
4630 }
4631 return false
4632 }
4633 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
4634 v_2 := v.Args[2]
4635 v_1 := v.Args[1]
4636 v_0 := v.Args[0]
4637 b := v.Block
4638
4639
4640 for {
4641 x := v_0
4642 y := v_1
4643 if v_2.Op != OpAMD64InvertFlags {
4644 break
4645 }
4646 cond := v_2.Args[0]
4647 v.reset(OpAMD64CMOVLGT)
4648 v.AddArg3(x, y, cond)
4649 return true
4650 }
4651
4652
4653 for {
4654 y := v_0
4655 if v_2.Op != OpAMD64FlagEQ {
4656 break
4657 }
4658 v.copyOf(y)
4659 return true
4660 }
4661
4662
4663 for {
4664 y := v_0
4665 if v_2.Op != OpAMD64FlagGT_UGT {
4666 break
4667 }
4668 v.copyOf(y)
4669 return true
4670 }
4671
4672
4673 for {
4674 y := v_0
4675 if v_2.Op != OpAMD64FlagGT_ULT {
4676 break
4677 }
4678 v.copyOf(y)
4679 return true
4680 }
4681
4682
4683 for {
4684 x := v_1
4685 if v_2.Op != OpAMD64FlagLT_ULT {
4686 break
4687 }
4688 v.copyOf(x)
4689 return true
4690 }
4691
4692
4693 for {
4694 x := v_1
4695 if v_2.Op != OpAMD64FlagLT_UGT {
4696 break
4697 }
4698 v.copyOf(x)
4699 return true
4700 }
4701
4702
4703
4704 for {
4705 x := v_0
4706 y := v_1
4707 c := v_2
4708 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
4709 break
4710 }
4711 z := c.Args[0]
4712 if !(c.Uses == 1) {
4713 break
4714 }
4715 v.reset(OpAMD64CMOVLLE)
4716 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
4717 v0.AuxInt = int32ToAuxInt(127)
4718 v0.AddArg(z)
4719 v.AddArg3(x, y, v0)
4720 return true
4721 }
4722
4723
4724
4725 for {
4726 x := v_0
4727 y := v_1
4728 c := v_2
4729 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
4730 break
4731 }
4732 z := c.Args[0]
4733 if !(c.Uses == 1) {
4734 break
4735 }
4736 v.reset(OpAMD64CMOVLLE)
4737 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
4738 v0.AuxInt = int32ToAuxInt(127)
4739 v0.AddArg(z)
4740 v.AddArg3(x, y, v0)
4741 return true
4742 }
4743 return false
4744 }
4745 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
4746 v_2 := v.Args[2]
4747 v_1 := v.Args[1]
4748 v_0 := v.Args[0]
4749 b := v.Block
4750
4751
4752 for {
4753 x := v_0
4754 y := v_1
4755 if v_2.Op != OpAMD64InvertFlags {
4756 break
4757 }
4758 cond := v_2.Args[0]
4759 v.reset(OpAMD64CMOVLNE)
4760 v.AddArg3(x, y, cond)
4761 return true
4762 }
4763
4764
4765 for {
4766 y := v_0
4767 if v_2.Op != OpAMD64FlagEQ {
4768 break
4769 }
4770 v.copyOf(y)
4771 return true
4772 }
4773
4774
4775 for {
4776 x := v_1
4777 if v_2.Op != OpAMD64FlagGT_UGT {
4778 break
4779 }
4780 v.copyOf(x)
4781 return true
4782 }
4783
4784
4785 for {
4786 x := v_1
4787 if v_2.Op != OpAMD64FlagGT_ULT {
4788 break
4789 }
4790 v.copyOf(x)
4791 return true
4792 }
4793
4794
4795 for {
4796 x := v_1
4797 if v_2.Op != OpAMD64FlagLT_ULT {
4798 break
4799 }
4800 v.copyOf(x)
4801 return true
4802 }
4803
4804
4805 for {
4806 x := v_1
4807 if v_2.Op != OpAMD64FlagLT_UGT {
4808 break
4809 }
4810 v.copyOf(x)
4811 return true
4812 }
4813
4814
4815 for {
4816 x := v_0
4817 y := v_1
4818 if v_2.Op != OpAMD64TESTQ {
4819 break
4820 }
4821 _ = v_2.Args[1]
4822 v_2_0 := v_2.Args[0]
4823 v_2_1 := v_2.Args[1]
4824 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4825 s := v_2_0
4826 if s.Op != OpSelect0 {
4827 continue
4828 }
4829 blsr := s.Args[0]
4830 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4831 continue
4832 }
4833 v.reset(OpAMD64CMOVLNE)
4834 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4835 v0.AddArg(blsr)
4836 v.AddArg3(x, y, v0)
4837 return true
4838 }
4839 break
4840 }
4841
4842
4843 for {
4844 x := v_0
4845 y := v_1
4846 if v_2.Op != OpAMD64TESTL {
4847 break
4848 }
4849 _ = v_2.Args[1]
4850 v_2_0 := v_2.Args[0]
4851 v_2_1 := v_2.Args[1]
4852 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4853 s := v_2_0
4854 if s.Op != OpSelect0 {
4855 continue
4856 }
4857 blsr := s.Args[0]
4858 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4859 continue
4860 }
4861 v.reset(OpAMD64CMOVLNE)
4862 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4863 v0.AddArg(blsr)
4864 v.AddArg3(x, y, v0)
4865 return true
4866 }
4867 break
4868 }
4869 return false
4870 }
4871 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
4872 v_2 := v.Args[2]
4873 v_1 := v.Args[1]
4874 v_0 := v.Args[0]
4875
4876
4877 for {
4878 x := v_0
4879 y := v_1
4880 if v_2.Op != OpAMD64InvertFlags {
4881 break
4882 }
4883 cond := v_2.Args[0]
4884 v.reset(OpAMD64CMOVQLS)
4885 v.AddArg3(x, y, cond)
4886 return true
4887 }
4888
4889
4890 for {
4891 x := v_1
4892 if v_2.Op != OpAMD64FlagEQ {
4893 break
4894 }
4895 v.copyOf(x)
4896 return true
4897 }
4898
4899
4900 for {
4901 x := v_1
4902 if v_2.Op != OpAMD64FlagGT_UGT {
4903 break
4904 }
4905 v.copyOf(x)
4906 return true
4907 }
4908
4909
4910 for {
4911 y := v_0
4912 if v_2.Op != OpAMD64FlagGT_ULT {
4913 break
4914 }
4915 v.copyOf(y)
4916 return true
4917 }
4918
4919
4920 for {
4921 y := v_0
4922 if v_2.Op != OpAMD64FlagLT_ULT {
4923 break
4924 }
4925 v.copyOf(y)
4926 return true
4927 }
4928
4929
4930 for {
4931 x := v_1
4932 if v_2.Op != OpAMD64FlagLT_UGT {
4933 break
4934 }
4935 v.copyOf(x)
4936 return true
4937 }
4938 return false
4939 }
4940 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
4941 v_2 := v.Args[2]
4942 v_1 := v.Args[1]
4943 v_0 := v.Args[0]
4944
4945
4946 for {
4947 x := v_0
4948 y := v_1
4949 if v_2.Op != OpAMD64InvertFlags {
4950 break
4951 }
4952 cond := v_2.Args[0]
4953 v.reset(OpAMD64CMOVQHI)
4954 v.AddArg3(x, y, cond)
4955 return true
4956 }
4957
4958
4959 for {
4960 y := v_0
4961 if v_2.Op != OpAMD64FlagEQ {
4962 break
4963 }
4964 v.copyOf(y)
4965 return true
4966 }
4967
4968
4969 for {
4970 y := v_0
4971 if v_2.Op != OpAMD64FlagGT_UGT {
4972 break
4973 }
4974 v.copyOf(y)
4975 return true
4976 }
4977
4978
4979 for {
4980 x := v_1
4981 if v_2.Op != OpAMD64FlagGT_ULT {
4982 break
4983 }
4984 v.copyOf(x)
4985 return true
4986 }
4987
4988
4989 for {
4990 x := v_1
4991 if v_2.Op != OpAMD64FlagLT_ULT {
4992 break
4993 }
4994 v.copyOf(x)
4995 return true
4996 }
4997
4998
4999 for {
5000 y := v_0
5001 if v_2.Op != OpAMD64FlagLT_UGT {
5002 break
5003 }
5004 v.copyOf(y)
5005 return true
5006 }
5007 return false
5008 }
5009 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
5010 v_2 := v.Args[2]
5011 v_1 := v.Args[1]
5012 v_0 := v.Args[0]
5013 b := v.Block
5014
5015
5016 for {
5017 x := v_0
5018 y := v_1
5019 if v_2.Op != OpAMD64InvertFlags {
5020 break
5021 }
5022 cond := v_2.Args[0]
5023 v.reset(OpAMD64CMOVQEQ)
5024 v.AddArg3(x, y, cond)
5025 return true
5026 }
5027
5028
5029 for {
5030 x := v_1
5031 if v_2.Op != OpAMD64FlagEQ {
5032 break
5033 }
5034 v.copyOf(x)
5035 return true
5036 }
5037
5038
5039 for {
5040 y := v_0
5041 if v_2.Op != OpAMD64FlagGT_UGT {
5042 break
5043 }
5044 v.copyOf(y)
5045 return true
5046 }
5047
5048
5049 for {
5050 y := v_0
5051 if v_2.Op != OpAMD64FlagGT_ULT {
5052 break
5053 }
5054 v.copyOf(y)
5055 return true
5056 }
5057
5058
5059 for {
5060 y := v_0
5061 if v_2.Op != OpAMD64FlagLT_ULT {
5062 break
5063 }
5064 v.copyOf(y)
5065 return true
5066 }
5067
5068
5069 for {
5070 y := v_0
5071 if v_2.Op != OpAMD64FlagLT_UGT {
5072 break
5073 }
5074 v.copyOf(y)
5075 return true
5076 }
5077
5078
5079
5080 for {
5081 x := v_0
5082 if v_2.Op != OpSelect1 {
5083 break
5084 }
5085 v_2_0 := v_2.Args[0]
5086 if v_2_0.Op != OpAMD64BSFQ {
5087 break
5088 }
5089 v_2_0_0 := v_2_0.Args[0]
5090 if v_2_0_0.Op != OpAMD64ORQconst {
5091 break
5092 }
5093 c := auxIntToInt32(v_2_0_0.AuxInt)
5094 if !(c != 0) {
5095 break
5096 }
5097 v.copyOf(x)
5098 return true
5099 }
5100
5101
5102
5103 for {
5104 x := v_0
5105 if v_2.Op != OpSelect1 {
5106 break
5107 }
5108 v_2_0 := v_2.Args[0]
5109 if v_2_0.Op != OpAMD64BSRQ {
5110 break
5111 }
5112 v_2_0_0 := v_2_0.Args[0]
5113 if v_2_0_0.Op != OpAMD64ORQconst {
5114 break
5115 }
5116 c := auxIntToInt32(v_2_0_0.AuxInt)
5117 if !(c != 0) {
5118 break
5119 }
5120 v.copyOf(x)
5121 return true
5122 }
5123
5124
5125 for {
5126 x := v_0
5127 y := v_1
5128 if v_2.Op != OpAMD64TESTQ {
5129 break
5130 }
5131 _ = v_2.Args[1]
5132 v_2_0 := v_2.Args[0]
5133 v_2_1 := v_2.Args[1]
5134 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5135 s := v_2_0
5136 if s.Op != OpSelect0 {
5137 continue
5138 }
5139 blsr := s.Args[0]
5140 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
5141 continue
5142 }
5143 v.reset(OpAMD64CMOVQEQ)
5144 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5145 v0.AddArg(blsr)
5146 v.AddArg3(x, y, v0)
5147 return true
5148 }
5149 break
5150 }
5151
5152
5153 for {
5154 x := v_0
5155 y := v_1
5156 if v_2.Op != OpAMD64TESTL {
5157 break
5158 }
5159 _ = v_2.Args[1]
5160 v_2_0 := v_2.Args[0]
5161 v_2_1 := v_2.Args[1]
5162 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5163 s := v_2_0
5164 if s.Op != OpSelect0 {
5165 continue
5166 }
5167 blsr := s.Args[0]
5168 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
5169 continue
5170 }
5171 v.reset(OpAMD64CMOVQEQ)
5172 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5173 v0.AddArg(blsr)
5174 v.AddArg3(x, y, v0)
5175 return true
5176 }
5177 break
5178 }
5179 return false
5180 }
5181 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
5182 v_2 := v.Args[2]
5183 v_1 := v.Args[1]
5184 v_0 := v.Args[0]
5185 b := v.Block
5186
5187
5188 for {
5189 x := v_0
5190 y := v_1
5191 if v_2.Op != OpAMD64InvertFlags {
5192 break
5193 }
5194 cond := v_2.Args[0]
5195 v.reset(OpAMD64CMOVQLE)
5196 v.AddArg3(x, y, cond)
5197 return true
5198 }
5199
5200
5201 for {
5202 x := v_1
5203 if v_2.Op != OpAMD64FlagEQ {
5204 break
5205 }
5206 v.copyOf(x)
5207 return true
5208 }
5209
5210
5211 for {
5212 x := v_1
5213 if v_2.Op != OpAMD64FlagGT_UGT {
5214 break
5215 }
5216 v.copyOf(x)
5217 return true
5218 }
5219
5220
5221 for {
5222 x := v_1
5223 if v_2.Op != OpAMD64FlagGT_ULT {
5224 break
5225 }
5226 v.copyOf(x)
5227 return true
5228 }
5229
5230
5231 for {
5232 y := v_0
5233 if v_2.Op != OpAMD64FlagLT_ULT {
5234 break
5235 }
5236 v.copyOf(y)
5237 return true
5238 }
5239
5240
5241 for {
5242 y := v_0
5243 if v_2.Op != OpAMD64FlagLT_UGT {
5244 break
5245 }
5246 v.copyOf(y)
5247 return true
5248 }
5249
5250
5251
5252 for {
5253 x := v_0
5254 y := v_1
5255 c := v_2
5256 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
5257 break
5258 }
5259 z := c.Args[0]
5260 if !(c.Uses == 1) {
5261 break
5262 }
5263 v.reset(OpAMD64CMOVQGT)
5264 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
5265 v0.AuxInt = int32ToAuxInt(127)
5266 v0.AddArg(z)
5267 v.AddArg3(x, y, v0)
5268 return true
5269 }
5270
5271
5272
5273 for {
5274 x := v_0
5275 y := v_1
5276 c := v_2
5277 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
5278 break
5279 }
5280 z := c.Args[0]
5281 if !(c.Uses == 1) {
5282 break
5283 }
5284 v.reset(OpAMD64CMOVQGT)
5285 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
5286 v0.AuxInt = int32ToAuxInt(127)
5287 v0.AddArg(z)
5288 v.AddArg3(x, y, v0)
5289 return true
5290 }
5291 return false
5292 }
5293 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5294 v_2 := v.Args[2]
5295 v_1 := v.Args[1]
5296 v_0 := v.Args[0]
5297
5298
5299 for {
5300 x := v_0
5301 y := v_1
5302 if v_2.Op != OpAMD64InvertFlags {
5303 break
5304 }
5305 cond := v_2.Args[0]
5306 v.reset(OpAMD64CMOVQLT)
5307 v.AddArg3(x, y, cond)
5308 return true
5309 }
5310
5311
5312 for {
5313 y := v_0
5314 if v_2.Op != OpAMD64FlagEQ {
5315 break
5316 }
5317 v.copyOf(y)
5318 return true
5319 }
5320
5321
5322 for {
5323 x := v_1
5324 if v_2.Op != OpAMD64FlagGT_UGT {
5325 break
5326 }
5327 v.copyOf(x)
5328 return true
5329 }
5330
5331
5332 for {
5333 x := v_1
5334 if v_2.Op != OpAMD64FlagGT_ULT {
5335 break
5336 }
5337 v.copyOf(x)
5338 return true
5339 }
5340
5341
5342 for {
5343 y := v_0
5344 if v_2.Op != OpAMD64FlagLT_ULT {
5345 break
5346 }
5347 v.copyOf(y)
5348 return true
5349 }
5350
5351
5352 for {
5353 y := v_0
5354 if v_2.Op != OpAMD64FlagLT_UGT {
5355 break
5356 }
5357 v.copyOf(y)
5358 return true
5359 }
5360 return false
5361 }
5362 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5363 v_2 := v.Args[2]
5364 v_1 := v.Args[1]
5365 v_0 := v.Args[0]
5366
5367
5368 for {
5369 x := v_0
5370 y := v_1
5371 if v_2.Op != OpAMD64InvertFlags {
5372 break
5373 }
5374 cond := v_2.Args[0]
5375 v.reset(OpAMD64CMOVQCS)
5376 v.AddArg3(x, y, cond)
5377 return true
5378 }
5379
5380
5381 for {
5382 y := v_0
5383 if v_2.Op != OpAMD64FlagEQ {
5384 break
5385 }
5386 v.copyOf(y)
5387 return true
5388 }
5389
5390
5391 for {
5392 x := v_1
5393 if v_2.Op != OpAMD64FlagGT_UGT {
5394 break
5395 }
5396 v.copyOf(x)
5397 return true
5398 }
5399
5400
5401 for {
5402 y := v_0
5403 if v_2.Op != OpAMD64FlagGT_ULT {
5404 break
5405 }
5406 v.copyOf(y)
5407 return true
5408 }
5409
5410
5411 for {
5412 y := v_0
5413 if v_2.Op != OpAMD64FlagLT_ULT {
5414 break
5415 }
5416 v.copyOf(y)
5417 return true
5418 }
5419
5420
5421 for {
5422 x := v_1
5423 if v_2.Op != OpAMD64FlagLT_UGT {
5424 break
5425 }
5426 v.copyOf(x)
5427 return true
5428 }
5429 return false
5430 }
5431 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5432 v_2 := v.Args[2]
5433 v_1 := v.Args[1]
5434 v_0 := v.Args[0]
5435
5436
5437 for {
5438 x := v_0
5439 y := v_1
5440 if v_2.Op != OpAMD64InvertFlags {
5441 break
5442 }
5443 cond := v_2.Args[0]
5444 v.reset(OpAMD64CMOVQGE)
5445 v.AddArg3(x, y, cond)
5446 return true
5447 }
5448
5449
5450 for {
5451 x := v_1
5452 if v_2.Op != OpAMD64FlagEQ {
5453 break
5454 }
5455 v.copyOf(x)
5456 return true
5457 }
5458
5459
5460 for {
5461 y := v_0
5462 if v_2.Op != OpAMD64FlagGT_UGT {
5463 break
5464 }
5465 v.copyOf(y)
5466 return true
5467 }
5468
5469
5470 for {
5471 y := v_0
5472 if v_2.Op != OpAMD64FlagGT_ULT {
5473 break
5474 }
5475 v.copyOf(y)
5476 return true
5477 }
5478
5479
5480 for {
5481 x := v_1
5482 if v_2.Op != OpAMD64FlagLT_ULT {
5483 break
5484 }
5485 v.copyOf(x)
5486 return true
5487 }
5488
5489
5490 for {
5491 x := v_1
5492 if v_2.Op != OpAMD64FlagLT_UGT {
5493 break
5494 }
5495 v.copyOf(x)
5496 return true
5497 }
5498 return false
5499 }
5500 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5501 v_2 := v.Args[2]
5502 v_1 := v.Args[1]
5503 v_0 := v.Args[0]
5504
5505
5506 for {
5507 x := v_0
5508 y := v_1
5509 if v_2.Op != OpAMD64InvertFlags {
5510 break
5511 }
5512 cond := v_2.Args[0]
5513 v.reset(OpAMD64CMOVQCC)
5514 v.AddArg3(x, y, cond)
5515 return true
5516 }
5517
5518
5519 for {
5520 x := v_1
5521 if v_2.Op != OpAMD64FlagEQ {
5522 break
5523 }
5524 v.copyOf(x)
5525 return true
5526 }
5527
5528
5529 for {
5530 y := v_0
5531 if v_2.Op != OpAMD64FlagGT_UGT {
5532 break
5533 }
5534 v.copyOf(y)
5535 return true
5536 }
5537
5538
5539 for {
5540 x := v_1
5541 if v_2.Op != OpAMD64FlagGT_ULT {
5542 break
5543 }
5544 v.copyOf(x)
5545 return true
5546 }
5547
5548
5549 for {
5550 x := v_1
5551 if v_2.Op != OpAMD64FlagLT_ULT {
5552 break
5553 }
5554 v.copyOf(x)
5555 return true
5556 }
5557
5558
5559 for {
5560 y := v_0
5561 if v_2.Op != OpAMD64FlagLT_UGT {
5562 break
5563 }
5564 v.copyOf(y)
5565 return true
5566 }
5567 return false
5568 }
5569 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5570 v_2 := v.Args[2]
5571 v_1 := v.Args[1]
5572 v_0 := v.Args[0]
5573 b := v.Block
5574
5575
5576 for {
5577 x := v_0
5578 y := v_1
5579 if v_2.Op != OpAMD64InvertFlags {
5580 break
5581 }
5582 cond := v_2.Args[0]
5583 v.reset(OpAMD64CMOVQGT)
5584 v.AddArg3(x, y, cond)
5585 return true
5586 }
5587
5588
5589 for {
5590 y := v_0
5591 if v_2.Op != OpAMD64FlagEQ {
5592 break
5593 }
5594 v.copyOf(y)
5595 return true
5596 }
5597
5598
5599 for {
5600 y := v_0
5601 if v_2.Op != OpAMD64FlagGT_UGT {
5602 break
5603 }
5604 v.copyOf(y)
5605 return true
5606 }
5607
5608
5609 for {
5610 y := v_0
5611 if v_2.Op != OpAMD64FlagGT_ULT {
5612 break
5613 }
5614 v.copyOf(y)
5615 return true
5616 }
5617
5618
5619 for {
5620 x := v_1
5621 if v_2.Op != OpAMD64FlagLT_ULT {
5622 break
5623 }
5624 v.copyOf(x)
5625 return true
5626 }
5627
5628
5629 for {
5630 x := v_1
5631 if v_2.Op != OpAMD64FlagLT_UGT {
5632 break
5633 }
5634 v.copyOf(x)
5635 return true
5636 }
5637
5638
5639
5640 for {
5641 x := v_0
5642 y := v_1
5643 c := v_2
5644 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
5645 break
5646 }
5647 z := c.Args[0]
5648 if !(c.Uses == 1) {
5649 break
5650 }
5651 v.reset(OpAMD64CMOVQLE)
5652 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
5653 v0.AuxInt = int32ToAuxInt(127)
5654 v0.AddArg(z)
5655 v.AddArg3(x, y, v0)
5656 return true
5657 }
5658
5659
5660
5661 for {
5662 x := v_0
5663 y := v_1
5664 c := v_2
5665 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
5666 break
5667 }
5668 z := c.Args[0]
5669 if !(c.Uses == 1) {
5670 break
5671 }
5672 v.reset(OpAMD64CMOVQLE)
5673 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
5674 v0.AuxInt = int32ToAuxInt(127)
5675 v0.AddArg(z)
5676 v.AddArg3(x, y, v0)
5677 return true
5678 }
5679 return false
5680 }
5681 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
5682 v_2 := v.Args[2]
5683 v_1 := v.Args[1]
5684 v_0 := v.Args[0]
5685 b := v.Block
5686
5687
5688 for {
5689 x := v_0
5690 y := v_1
5691 if v_2.Op != OpAMD64InvertFlags {
5692 break
5693 }
5694 cond := v_2.Args[0]
5695 v.reset(OpAMD64CMOVQNE)
5696 v.AddArg3(x, y, cond)
5697 return true
5698 }
5699
5700
5701 for {
5702 y := v_0
5703 if v_2.Op != OpAMD64FlagEQ {
5704 break
5705 }
5706 v.copyOf(y)
5707 return true
5708 }
5709
5710
5711 for {
5712 x := v_1
5713 if v_2.Op != OpAMD64FlagGT_UGT {
5714 break
5715 }
5716 v.copyOf(x)
5717 return true
5718 }
5719
5720
5721 for {
5722 x := v_1
5723 if v_2.Op != OpAMD64FlagGT_ULT {
5724 break
5725 }
5726 v.copyOf(x)
5727 return true
5728 }
5729
5730
5731 for {
5732 x := v_1
5733 if v_2.Op != OpAMD64FlagLT_ULT {
5734 break
5735 }
5736 v.copyOf(x)
5737 return true
5738 }
5739
5740
5741 for {
5742 x := v_1
5743 if v_2.Op != OpAMD64FlagLT_UGT {
5744 break
5745 }
5746 v.copyOf(x)
5747 return true
5748 }
5749
5750
5751 for {
5752 x := v_0
5753 y := v_1
5754 if v_2.Op != OpAMD64TESTQ {
5755 break
5756 }
5757 _ = v_2.Args[1]
5758 v_2_0 := v_2.Args[0]
5759 v_2_1 := v_2.Args[1]
5760 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5761 s := v_2_0
5762 if s.Op != OpSelect0 {
5763 continue
5764 }
5765 blsr := s.Args[0]
5766 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
5767 continue
5768 }
5769 v.reset(OpAMD64CMOVQNE)
5770 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5771 v0.AddArg(blsr)
5772 v.AddArg3(x, y, v0)
5773 return true
5774 }
5775 break
5776 }
5777
5778
5779 for {
5780 x := v_0
5781 y := v_1
5782 if v_2.Op != OpAMD64TESTL {
5783 break
5784 }
5785 _ = v_2.Args[1]
5786 v_2_0 := v_2.Args[0]
5787 v_2_1 := v_2.Args[1]
5788 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5789 s := v_2_0
5790 if s.Op != OpSelect0 {
5791 continue
5792 }
5793 blsr := s.Args[0]
5794 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
5795 continue
5796 }
5797 v.reset(OpAMD64CMOVQNE)
5798 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5799 v0.AddArg(blsr)
5800 v.AddArg3(x, y, v0)
5801 return true
5802 }
5803 break
5804 }
5805 return false
5806 }
5807 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
5808 v_2 := v.Args[2]
5809 v_1 := v.Args[1]
5810 v_0 := v.Args[0]
5811
5812
5813 for {
5814 x := v_0
5815 y := v_1
5816 if v_2.Op != OpAMD64InvertFlags {
5817 break
5818 }
5819 cond := v_2.Args[0]
5820 v.reset(OpAMD64CMOVWLS)
5821 v.AddArg3(x, y, cond)
5822 return true
5823 }
5824
5825
5826 for {
5827 x := v_1
5828 if v_2.Op != OpAMD64FlagEQ {
5829 break
5830 }
5831 v.copyOf(x)
5832 return true
5833 }
5834
5835
5836 for {
5837 x := v_1
5838 if v_2.Op != OpAMD64FlagGT_UGT {
5839 break
5840 }
5841 v.copyOf(x)
5842 return true
5843 }
5844
5845
5846 for {
5847 y := v_0
5848 if v_2.Op != OpAMD64FlagGT_ULT {
5849 break
5850 }
5851 v.copyOf(y)
5852 return true
5853 }
5854
5855
5856 for {
5857 y := v_0
5858 if v_2.Op != OpAMD64FlagLT_ULT {
5859 break
5860 }
5861 v.copyOf(y)
5862 return true
5863 }
5864
5865
5866 for {
5867 x := v_1
5868 if v_2.Op != OpAMD64FlagLT_UGT {
5869 break
5870 }
5871 v.copyOf(x)
5872 return true
5873 }
5874 return false
5875 }
5876 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
5877 v_2 := v.Args[2]
5878 v_1 := v.Args[1]
5879 v_0 := v.Args[0]
5880
5881
5882 for {
5883 x := v_0
5884 y := v_1
5885 if v_2.Op != OpAMD64InvertFlags {
5886 break
5887 }
5888 cond := v_2.Args[0]
5889 v.reset(OpAMD64CMOVWHI)
5890 v.AddArg3(x, y, cond)
5891 return true
5892 }
5893
5894
5895 for {
5896 y := v_0
5897 if v_2.Op != OpAMD64FlagEQ {
5898 break
5899 }
5900 v.copyOf(y)
5901 return true
5902 }
5903
5904
5905 for {
5906 y := v_0
5907 if v_2.Op != OpAMD64FlagGT_UGT {
5908 break
5909 }
5910 v.copyOf(y)
5911 return true
5912 }
5913
5914
5915 for {
5916 x := v_1
5917 if v_2.Op != OpAMD64FlagGT_ULT {
5918 break
5919 }
5920 v.copyOf(x)
5921 return true
5922 }
5923
5924
5925 for {
5926 x := v_1
5927 if v_2.Op != OpAMD64FlagLT_ULT {
5928 break
5929 }
5930 v.copyOf(x)
5931 return true
5932 }
5933
5934
5935 for {
5936 y := v_0
5937 if v_2.Op != OpAMD64FlagLT_UGT {
5938 break
5939 }
5940 v.copyOf(y)
5941 return true
5942 }
5943 return false
5944 }
5945 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
5946 v_2 := v.Args[2]
5947 v_1 := v.Args[1]
5948 v_0 := v.Args[0]
5949
5950
5951 for {
5952 x := v_0
5953 y := v_1
5954 if v_2.Op != OpAMD64InvertFlags {
5955 break
5956 }
5957 cond := v_2.Args[0]
5958 v.reset(OpAMD64CMOVWEQ)
5959 v.AddArg3(x, y, cond)
5960 return true
5961 }
5962
5963
5964 for {
5965 x := v_1
5966 if v_2.Op != OpAMD64FlagEQ {
5967 break
5968 }
5969 v.copyOf(x)
5970 return true
5971 }
5972
5973
5974 for {
5975 y := v_0
5976 if v_2.Op != OpAMD64FlagGT_UGT {
5977 break
5978 }
5979 v.copyOf(y)
5980 return true
5981 }
5982
5983
5984 for {
5985 y := v_0
5986 if v_2.Op != OpAMD64FlagGT_ULT {
5987 break
5988 }
5989 v.copyOf(y)
5990 return true
5991 }
5992
5993
5994 for {
5995 y := v_0
5996 if v_2.Op != OpAMD64FlagLT_ULT {
5997 break
5998 }
5999 v.copyOf(y)
6000 return true
6001 }
6002
6003
6004 for {
6005 y := v_0
6006 if v_2.Op != OpAMD64FlagLT_UGT {
6007 break
6008 }
6009 v.copyOf(y)
6010 return true
6011 }
6012 return false
6013 }
6014 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
6015 v_2 := v.Args[2]
6016 v_1 := v.Args[1]
6017 v_0 := v.Args[0]
6018
6019
6020 for {
6021 x := v_0
6022 y := v_1
6023 if v_2.Op != OpAMD64InvertFlags {
6024 break
6025 }
6026 cond := v_2.Args[0]
6027 v.reset(OpAMD64CMOVWLE)
6028 v.AddArg3(x, y, cond)
6029 return true
6030 }
6031
6032
6033 for {
6034 x := v_1
6035 if v_2.Op != OpAMD64FlagEQ {
6036 break
6037 }
6038 v.copyOf(x)
6039 return true
6040 }
6041
6042
6043 for {
6044 x := v_1
6045 if v_2.Op != OpAMD64FlagGT_UGT {
6046 break
6047 }
6048 v.copyOf(x)
6049 return true
6050 }
6051
6052
6053 for {
6054 x := v_1
6055 if v_2.Op != OpAMD64FlagGT_ULT {
6056 break
6057 }
6058 v.copyOf(x)
6059 return true
6060 }
6061
6062
6063 for {
6064 y := v_0
6065 if v_2.Op != OpAMD64FlagLT_ULT {
6066 break
6067 }
6068 v.copyOf(y)
6069 return true
6070 }
6071
6072
6073 for {
6074 y := v_0
6075 if v_2.Op != OpAMD64FlagLT_UGT {
6076 break
6077 }
6078 v.copyOf(y)
6079 return true
6080 }
6081 return false
6082 }
6083 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
6084 v_2 := v.Args[2]
6085 v_1 := v.Args[1]
6086 v_0 := v.Args[0]
6087
6088
6089 for {
6090 x := v_0
6091 y := v_1
6092 if v_2.Op != OpAMD64InvertFlags {
6093 break
6094 }
6095 cond := v_2.Args[0]
6096 v.reset(OpAMD64CMOVWLT)
6097 v.AddArg3(x, y, cond)
6098 return true
6099 }
6100
6101
6102 for {
6103 y := v_0
6104 if v_2.Op != OpAMD64FlagEQ {
6105 break
6106 }
6107 v.copyOf(y)
6108 return true
6109 }
6110
6111
6112 for {
6113 x := v_1
6114 if v_2.Op != OpAMD64FlagGT_UGT {
6115 break
6116 }
6117 v.copyOf(x)
6118 return true
6119 }
6120
6121
6122 for {
6123 x := v_1
6124 if v_2.Op != OpAMD64FlagGT_ULT {
6125 break
6126 }
6127 v.copyOf(x)
6128 return true
6129 }
6130
6131
6132 for {
6133 y := v_0
6134 if v_2.Op != OpAMD64FlagLT_ULT {
6135 break
6136 }
6137 v.copyOf(y)
6138 return true
6139 }
6140
6141
6142 for {
6143 y := v_0
6144 if v_2.Op != OpAMD64FlagLT_UGT {
6145 break
6146 }
6147 v.copyOf(y)
6148 return true
6149 }
6150 return false
6151 }
6152 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
6153 v_2 := v.Args[2]
6154 v_1 := v.Args[1]
6155 v_0 := v.Args[0]
6156
6157
6158 for {
6159 x := v_0
6160 y := v_1
6161 if v_2.Op != OpAMD64InvertFlags {
6162 break
6163 }
6164 cond := v_2.Args[0]
6165 v.reset(OpAMD64CMOVWCS)
6166 v.AddArg3(x, y, cond)
6167 return true
6168 }
6169
6170
6171 for {
6172 y := v_0
6173 if v_2.Op != OpAMD64FlagEQ {
6174 break
6175 }
6176 v.copyOf(y)
6177 return true
6178 }
6179
6180
6181 for {
6182 x := v_1
6183 if v_2.Op != OpAMD64FlagGT_UGT {
6184 break
6185 }
6186 v.copyOf(x)
6187 return true
6188 }
6189
6190
6191 for {
6192 y := v_0
6193 if v_2.Op != OpAMD64FlagGT_ULT {
6194 break
6195 }
6196 v.copyOf(y)
6197 return true
6198 }
6199
6200
6201 for {
6202 y := v_0
6203 if v_2.Op != OpAMD64FlagLT_ULT {
6204 break
6205 }
6206 v.copyOf(y)
6207 return true
6208 }
6209
6210
6211 for {
6212 x := v_1
6213 if v_2.Op != OpAMD64FlagLT_UGT {
6214 break
6215 }
6216 v.copyOf(x)
6217 return true
6218 }
6219 return false
6220 }
6221 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
6222 v_2 := v.Args[2]
6223 v_1 := v.Args[1]
6224 v_0 := v.Args[0]
6225
6226
6227 for {
6228 x := v_0
6229 y := v_1
6230 if v_2.Op != OpAMD64InvertFlags {
6231 break
6232 }
6233 cond := v_2.Args[0]
6234 v.reset(OpAMD64CMOVWGE)
6235 v.AddArg3(x, y, cond)
6236 return true
6237 }
6238
6239
6240 for {
6241 x := v_1
6242 if v_2.Op != OpAMD64FlagEQ {
6243 break
6244 }
6245 v.copyOf(x)
6246 return true
6247 }
6248
6249
6250 for {
6251 y := v_0
6252 if v_2.Op != OpAMD64FlagGT_UGT {
6253 break
6254 }
6255 v.copyOf(y)
6256 return true
6257 }
6258
6259
6260 for {
6261 y := v_0
6262 if v_2.Op != OpAMD64FlagGT_ULT {
6263 break
6264 }
6265 v.copyOf(y)
6266 return true
6267 }
6268
6269
6270 for {
6271 x := v_1
6272 if v_2.Op != OpAMD64FlagLT_ULT {
6273 break
6274 }
6275 v.copyOf(x)
6276 return true
6277 }
6278
6279
6280 for {
6281 x := v_1
6282 if v_2.Op != OpAMD64FlagLT_UGT {
6283 break
6284 }
6285 v.copyOf(x)
6286 return true
6287 }
6288 return false
6289 }
6290 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6291 v_2 := v.Args[2]
6292 v_1 := v.Args[1]
6293 v_0 := v.Args[0]
6294
6295
6296 for {
6297 x := v_0
6298 y := v_1
6299 if v_2.Op != OpAMD64InvertFlags {
6300 break
6301 }
6302 cond := v_2.Args[0]
6303 v.reset(OpAMD64CMOVWCC)
6304 v.AddArg3(x, y, cond)
6305 return true
6306 }
6307
6308
6309 for {
6310 x := v_1
6311 if v_2.Op != OpAMD64FlagEQ {
6312 break
6313 }
6314 v.copyOf(x)
6315 return true
6316 }
6317
6318
6319 for {
6320 y := v_0
6321 if v_2.Op != OpAMD64FlagGT_UGT {
6322 break
6323 }
6324 v.copyOf(y)
6325 return true
6326 }
6327
6328
6329 for {
6330 x := v_1
6331 if v_2.Op != OpAMD64FlagGT_ULT {
6332 break
6333 }
6334 v.copyOf(x)
6335 return true
6336 }
6337
6338
6339 for {
6340 x := v_1
6341 if v_2.Op != OpAMD64FlagLT_ULT {
6342 break
6343 }
6344 v.copyOf(x)
6345 return true
6346 }
6347
6348
6349 for {
6350 y := v_0
6351 if v_2.Op != OpAMD64FlagLT_UGT {
6352 break
6353 }
6354 v.copyOf(y)
6355 return true
6356 }
6357 return false
6358 }
6359 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6360 v_2 := v.Args[2]
6361 v_1 := v.Args[1]
6362 v_0 := v.Args[0]
6363
6364
6365 for {
6366 x := v_0
6367 y := v_1
6368 if v_2.Op != OpAMD64InvertFlags {
6369 break
6370 }
6371 cond := v_2.Args[0]
6372 v.reset(OpAMD64CMOVWGT)
6373 v.AddArg3(x, y, cond)
6374 return true
6375 }
6376
6377
6378 for {
6379 y := v_0
6380 if v_2.Op != OpAMD64FlagEQ {
6381 break
6382 }
6383 v.copyOf(y)
6384 return true
6385 }
6386
6387
6388 for {
6389 y := v_0
6390 if v_2.Op != OpAMD64FlagGT_UGT {
6391 break
6392 }
6393 v.copyOf(y)
6394 return true
6395 }
6396
6397
6398 for {
6399 y := v_0
6400 if v_2.Op != OpAMD64FlagGT_ULT {
6401 break
6402 }
6403 v.copyOf(y)
6404 return true
6405 }
6406
6407
6408 for {
6409 x := v_1
6410 if v_2.Op != OpAMD64FlagLT_ULT {
6411 break
6412 }
6413 v.copyOf(x)
6414 return true
6415 }
6416
6417
6418 for {
6419 x := v_1
6420 if v_2.Op != OpAMD64FlagLT_UGT {
6421 break
6422 }
6423 v.copyOf(x)
6424 return true
6425 }
6426 return false
6427 }
6428 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6429 v_2 := v.Args[2]
6430 v_1 := v.Args[1]
6431 v_0 := v.Args[0]
6432
6433
6434 for {
6435 x := v_0
6436 y := v_1
6437 if v_2.Op != OpAMD64InvertFlags {
6438 break
6439 }
6440 cond := v_2.Args[0]
6441 v.reset(OpAMD64CMOVWNE)
6442 v.AddArg3(x, y, cond)
6443 return true
6444 }
6445
6446
6447 for {
6448 y := v_0
6449 if v_2.Op != OpAMD64FlagEQ {
6450 break
6451 }
6452 v.copyOf(y)
6453 return true
6454 }
6455
6456
6457 for {
6458 x := v_1
6459 if v_2.Op != OpAMD64FlagGT_UGT {
6460 break
6461 }
6462 v.copyOf(x)
6463 return true
6464 }
6465
6466
6467 for {
6468 x := v_1
6469 if v_2.Op != OpAMD64FlagGT_ULT {
6470 break
6471 }
6472 v.copyOf(x)
6473 return true
6474 }
6475
6476
6477 for {
6478 x := v_1
6479 if v_2.Op != OpAMD64FlagLT_ULT {
6480 break
6481 }
6482 v.copyOf(x)
6483 return true
6484 }
6485
6486
6487 for {
6488 x := v_1
6489 if v_2.Op != OpAMD64FlagLT_UGT {
6490 break
6491 }
6492 v.copyOf(x)
6493 return true
6494 }
6495 return false
6496 }
6497 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6498 v_1 := v.Args[1]
6499 v_0 := v.Args[0]
6500 b := v.Block
6501
6502
6503 for {
6504 x := v_0
6505 if v_1.Op != OpAMD64MOVLconst {
6506 break
6507 }
6508 c := auxIntToInt32(v_1.AuxInt)
6509 v.reset(OpAMD64CMPBconst)
6510 v.AuxInt = int8ToAuxInt(int8(c))
6511 v.AddArg(x)
6512 return true
6513 }
6514
6515
6516 for {
6517 if v_0.Op != OpAMD64MOVLconst {
6518 break
6519 }
6520 c := auxIntToInt32(v_0.AuxInt)
6521 x := v_1
6522 v.reset(OpAMD64InvertFlags)
6523 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6524 v0.AuxInt = int8ToAuxInt(int8(c))
6525 v0.AddArg(x)
6526 v.AddArg(v0)
6527 return true
6528 }
6529
6530
6531
6532 for {
6533 x := v_0
6534 y := v_1
6535 if !(canonLessThan(x, y)) {
6536 break
6537 }
6538 v.reset(OpAMD64InvertFlags)
6539 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6540 v0.AddArg2(y, x)
6541 v.AddArg(v0)
6542 return true
6543 }
6544
6545
6546
6547 for {
6548 l := v_0
6549 if l.Op != OpAMD64MOVBload {
6550 break
6551 }
6552 off := auxIntToInt32(l.AuxInt)
6553 sym := auxToSym(l.Aux)
6554 mem := l.Args[1]
6555 ptr := l.Args[0]
6556 x := v_1
6557 if !(canMergeLoad(v, l) && clobber(l)) {
6558 break
6559 }
6560 v.reset(OpAMD64CMPBload)
6561 v.AuxInt = int32ToAuxInt(off)
6562 v.Aux = symToAux(sym)
6563 v.AddArg3(ptr, x, mem)
6564 return true
6565 }
6566
6567
6568
6569 for {
6570 x := v_0
6571 l := v_1
6572 if l.Op != OpAMD64MOVBload {
6573 break
6574 }
6575 off := auxIntToInt32(l.AuxInt)
6576 sym := auxToSym(l.Aux)
6577 mem := l.Args[1]
6578 ptr := l.Args[0]
6579 if !(canMergeLoad(v, l) && clobber(l)) {
6580 break
6581 }
6582 v.reset(OpAMD64InvertFlags)
6583 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6584 v0.AuxInt = int32ToAuxInt(off)
6585 v0.Aux = symToAux(sym)
6586 v0.AddArg3(ptr, x, mem)
6587 v.AddArg(v0)
6588 return true
6589 }
6590 return false
6591 }
6592 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6593 v_0 := v.Args[0]
6594 b := v.Block
6595
6596
6597
6598 for {
6599 y := auxIntToInt8(v.AuxInt)
6600 if v_0.Op != OpAMD64MOVLconst {
6601 break
6602 }
6603 x := auxIntToInt32(v_0.AuxInt)
6604 if !(int8(x) == y) {
6605 break
6606 }
6607 v.reset(OpAMD64FlagEQ)
6608 return true
6609 }
6610
6611
6612
6613 for {
6614 y := auxIntToInt8(v.AuxInt)
6615 if v_0.Op != OpAMD64MOVLconst {
6616 break
6617 }
6618 x := auxIntToInt32(v_0.AuxInt)
6619 if !(int8(x) < y && uint8(x) < uint8(y)) {
6620 break
6621 }
6622 v.reset(OpAMD64FlagLT_ULT)
6623 return true
6624 }
6625
6626
6627
6628 for {
6629 y := auxIntToInt8(v.AuxInt)
6630 if v_0.Op != OpAMD64MOVLconst {
6631 break
6632 }
6633 x := auxIntToInt32(v_0.AuxInt)
6634 if !(int8(x) < y && uint8(x) > uint8(y)) {
6635 break
6636 }
6637 v.reset(OpAMD64FlagLT_UGT)
6638 return true
6639 }
6640
6641
6642
6643 for {
6644 y := auxIntToInt8(v.AuxInt)
6645 if v_0.Op != OpAMD64MOVLconst {
6646 break
6647 }
6648 x := auxIntToInt32(v_0.AuxInt)
6649 if !(int8(x) > y && uint8(x) < uint8(y)) {
6650 break
6651 }
6652 v.reset(OpAMD64FlagGT_ULT)
6653 return true
6654 }
6655
6656
6657
6658 for {
6659 y := auxIntToInt8(v.AuxInt)
6660 if v_0.Op != OpAMD64MOVLconst {
6661 break
6662 }
6663 x := auxIntToInt32(v_0.AuxInt)
6664 if !(int8(x) > y && uint8(x) > uint8(y)) {
6665 break
6666 }
6667 v.reset(OpAMD64FlagGT_UGT)
6668 return true
6669 }
6670
6671
6672
6673 for {
6674 n := auxIntToInt8(v.AuxInt)
6675 if v_0.Op != OpAMD64ANDLconst {
6676 break
6677 }
6678 m := auxIntToInt32(v_0.AuxInt)
6679 if !(0 <= int8(m) && int8(m) < n) {
6680 break
6681 }
6682 v.reset(OpAMD64FlagLT_ULT)
6683 return true
6684 }
6685
6686
6687
6688 for {
6689 if auxIntToInt8(v.AuxInt) != 0 {
6690 break
6691 }
6692 a := v_0
6693 if a.Op != OpAMD64ANDL {
6694 break
6695 }
6696 y := a.Args[1]
6697 x := a.Args[0]
6698 if !(a.Uses == 1) {
6699 break
6700 }
6701 v.reset(OpAMD64TESTB)
6702 v.AddArg2(x, y)
6703 return true
6704 }
6705
6706
6707
6708 for {
6709 if auxIntToInt8(v.AuxInt) != 0 {
6710 break
6711 }
6712 a := v_0
6713 if a.Op != OpAMD64ANDLconst {
6714 break
6715 }
6716 c := auxIntToInt32(a.AuxInt)
6717 x := a.Args[0]
6718 if !(a.Uses == 1) {
6719 break
6720 }
6721 v.reset(OpAMD64TESTBconst)
6722 v.AuxInt = int8ToAuxInt(int8(c))
6723 v.AddArg(x)
6724 return true
6725 }
6726
6727
6728 for {
6729 if auxIntToInt8(v.AuxInt) != 0 {
6730 break
6731 }
6732 x := v_0
6733 v.reset(OpAMD64TESTB)
6734 v.AddArg2(x, x)
6735 return true
6736 }
6737
6738
6739
6740 for {
6741 c := auxIntToInt8(v.AuxInt)
6742 l := v_0
6743 if l.Op != OpAMD64MOVBload {
6744 break
6745 }
6746 off := auxIntToInt32(l.AuxInt)
6747 sym := auxToSym(l.Aux)
6748 mem := l.Args[1]
6749 ptr := l.Args[0]
6750 if !(l.Uses == 1 && clobber(l)) {
6751 break
6752 }
6753 b = l.Block
6754 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
6755 v.copyOf(v0)
6756 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
6757 v0.Aux = symToAux(sym)
6758 v0.AddArg2(ptr, mem)
6759 return true
6760 }
6761 return false
6762 }
6763 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
6764 v_1 := v.Args[1]
6765 v_0 := v.Args[0]
6766
6767
6768
6769 for {
6770 valoff1 := auxIntToValAndOff(v.AuxInt)
6771 sym := auxToSym(v.Aux)
6772 if v_0.Op != OpAMD64ADDQconst {
6773 break
6774 }
6775 off2 := auxIntToInt32(v_0.AuxInt)
6776 base := v_0.Args[0]
6777 mem := v_1
6778 if !(ValAndOff(valoff1).canAdd32(off2)) {
6779 break
6780 }
6781 v.reset(OpAMD64CMPBconstload)
6782 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6783 v.Aux = symToAux(sym)
6784 v.AddArg2(base, mem)
6785 return true
6786 }
6787
6788
6789
6790 for {
6791 valoff1 := auxIntToValAndOff(v.AuxInt)
6792 sym1 := auxToSym(v.Aux)
6793 if v_0.Op != OpAMD64LEAQ {
6794 break
6795 }
6796 off2 := auxIntToInt32(v_0.AuxInt)
6797 sym2 := auxToSym(v_0.Aux)
6798 base := v_0.Args[0]
6799 mem := v_1
6800 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6801 break
6802 }
6803 v.reset(OpAMD64CMPBconstload)
6804 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6805 v.Aux = symToAux(mergeSym(sym1, sym2))
6806 v.AddArg2(base, mem)
6807 return true
6808 }
6809 return false
6810 }
6811 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
6812 v_2 := v.Args[2]
6813 v_1 := v.Args[1]
6814 v_0 := v.Args[0]
6815
6816
6817
6818 for {
6819 off1 := auxIntToInt32(v.AuxInt)
6820 sym := auxToSym(v.Aux)
6821 if v_0.Op != OpAMD64ADDQconst {
6822 break
6823 }
6824 off2 := auxIntToInt32(v_0.AuxInt)
6825 base := v_0.Args[0]
6826 val := v_1
6827 mem := v_2
6828 if !(is32Bit(int64(off1) + int64(off2))) {
6829 break
6830 }
6831 v.reset(OpAMD64CMPBload)
6832 v.AuxInt = int32ToAuxInt(off1 + off2)
6833 v.Aux = symToAux(sym)
6834 v.AddArg3(base, val, mem)
6835 return true
6836 }
6837
6838
6839
6840 for {
6841 off1 := auxIntToInt32(v.AuxInt)
6842 sym1 := auxToSym(v.Aux)
6843 if v_0.Op != OpAMD64LEAQ {
6844 break
6845 }
6846 off2 := auxIntToInt32(v_0.AuxInt)
6847 sym2 := auxToSym(v_0.Aux)
6848 base := v_0.Args[0]
6849 val := v_1
6850 mem := v_2
6851 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6852 break
6853 }
6854 v.reset(OpAMD64CMPBload)
6855 v.AuxInt = int32ToAuxInt(off1 + off2)
6856 v.Aux = symToAux(mergeSym(sym1, sym2))
6857 v.AddArg3(base, val, mem)
6858 return true
6859 }
6860
6861
6862 for {
6863 off := auxIntToInt32(v.AuxInt)
6864 sym := auxToSym(v.Aux)
6865 ptr := v_0
6866 if v_1.Op != OpAMD64MOVLconst {
6867 break
6868 }
6869 c := auxIntToInt32(v_1.AuxInt)
6870 mem := v_2
6871 v.reset(OpAMD64CMPBconstload)
6872 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
6873 v.Aux = symToAux(sym)
6874 v.AddArg2(ptr, mem)
6875 return true
6876 }
6877 return false
6878 }
6879 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
6880 v_1 := v.Args[1]
6881 v_0 := v.Args[0]
6882 b := v.Block
6883
6884
6885 for {
6886 x := v_0
6887 if v_1.Op != OpAMD64MOVLconst {
6888 break
6889 }
6890 c := auxIntToInt32(v_1.AuxInt)
6891 v.reset(OpAMD64CMPLconst)
6892 v.AuxInt = int32ToAuxInt(c)
6893 v.AddArg(x)
6894 return true
6895 }
6896
6897
6898 for {
6899 if v_0.Op != OpAMD64MOVLconst {
6900 break
6901 }
6902 c := auxIntToInt32(v_0.AuxInt)
6903 x := v_1
6904 v.reset(OpAMD64InvertFlags)
6905 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
6906 v0.AuxInt = int32ToAuxInt(c)
6907 v0.AddArg(x)
6908 v.AddArg(v0)
6909 return true
6910 }
6911
6912
6913
6914 for {
6915 x := v_0
6916 y := v_1
6917 if !(canonLessThan(x, y)) {
6918 break
6919 }
6920 v.reset(OpAMD64InvertFlags)
6921 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
6922 v0.AddArg2(y, x)
6923 v.AddArg(v0)
6924 return true
6925 }
6926
6927
6928
6929 for {
6930 l := v_0
6931 if l.Op != OpAMD64MOVLload {
6932 break
6933 }
6934 off := auxIntToInt32(l.AuxInt)
6935 sym := auxToSym(l.Aux)
6936 mem := l.Args[1]
6937 ptr := l.Args[0]
6938 x := v_1
6939 if !(canMergeLoad(v, l) && clobber(l)) {
6940 break
6941 }
6942 v.reset(OpAMD64CMPLload)
6943 v.AuxInt = int32ToAuxInt(off)
6944 v.Aux = symToAux(sym)
6945 v.AddArg3(ptr, x, mem)
6946 return true
6947 }
6948
6949
6950
6951 for {
6952 x := v_0
6953 l := v_1
6954 if l.Op != OpAMD64MOVLload {
6955 break
6956 }
6957 off := auxIntToInt32(l.AuxInt)
6958 sym := auxToSym(l.Aux)
6959 mem := l.Args[1]
6960 ptr := l.Args[0]
6961 if !(canMergeLoad(v, l) && clobber(l)) {
6962 break
6963 }
6964 v.reset(OpAMD64InvertFlags)
6965 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
6966 v0.AuxInt = int32ToAuxInt(off)
6967 v0.Aux = symToAux(sym)
6968 v0.AddArg3(ptr, x, mem)
6969 v.AddArg(v0)
6970 return true
6971 }
6972 return false
6973 }
6974 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
6975 v_0 := v.Args[0]
6976 b := v.Block
6977
6978
6979
6980 for {
6981 y := auxIntToInt32(v.AuxInt)
6982 if v_0.Op != OpAMD64MOVLconst {
6983 break
6984 }
6985 x := auxIntToInt32(v_0.AuxInt)
6986 if !(x == y) {
6987 break
6988 }
6989 v.reset(OpAMD64FlagEQ)
6990 return true
6991 }
6992
6993
6994
6995 for {
6996 y := auxIntToInt32(v.AuxInt)
6997 if v_0.Op != OpAMD64MOVLconst {
6998 break
6999 }
7000 x := auxIntToInt32(v_0.AuxInt)
7001 if !(x < y && uint32(x) < uint32(y)) {
7002 break
7003 }
7004 v.reset(OpAMD64FlagLT_ULT)
7005 return true
7006 }
7007
7008
7009
7010 for {
7011 y := auxIntToInt32(v.AuxInt)
7012 if v_0.Op != OpAMD64MOVLconst {
7013 break
7014 }
7015 x := auxIntToInt32(v_0.AuxInt)
7016 if !(x < y && uint32(x) > uint32(y)) {
7017 break
7018 }
7019 v.reset(OpAMD64FlagLT_UGT)
7020 return true
7021 }
7022
7023
7024
7025 for {
7026 y := auxIntToInt32(v.AuxInt)
7027 if v_0.Op != OpAMD64MOVLconst {
7028 break
7029 }
7030 x := auxIntToInt32(v_0.AuxInt)
7031 if !(x > y && uint32(x) < uint32(y)) {
7032 break
7033 }
7034 v.reset(OpAMD64FlagGT_ULT)
7035 return true
7036 }
7037
7038
7039
7040 for {
7041 y := auxIntToInt32(v.AuxInt)
7042 if v_0.Op != OpAMD64MOVLconst {
7043 break
7044 }
7045 x := auxIntToInt32(v_0.AuxInt)
7046 if !(x > y && uint32(x) > uint32(y)) {
7047 break
7048 }
7049 v.reset(OpAMD64FlagGT_UGT)
7050 return true
7051 }
7052
7053
7054
7055 for {
7056 n := auxIntToInt32(v.AuxInt)
7057 if v_0.Op != OpAMD64SHRLconst {
7058 break
7059 }
7060 c := auxIntToInt8(v_0.AuxInt)
7061 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
7062 break
7063 }
7064 v.reset(OpAMD64FlagLT_ULT)
7065 return true
7066 }
7067
7068
7069
7070 for {
7071 n := auxIntToInt32(v.AuxInt)
7072 if v_0.Op != OpAMD64ANDLconst {
7073 break
7074 }
7075 m := auxIntToInt32(v_0.AuxInt)
7076 if !(0 <= m && m < n) {
7077 break
7078 }
7079 v.reset(OpAMD64FlagLT_ULT)
7080 return true
7081 }
7082
7083
7084
7085 for {
7086 if auxIntToInt32(v.AuxInt) != 0 {
7087 break
7088 }
7089 a := v_0
7090 if a.Op != OpAMD64ANDL {
7091 break
7092 }
7093 y := a.Args[1]
7094 x := a.Args[0]
7095 if !(a.Uses == 1) {
7096 break
7097 }
7098 v.reset(OpAMD64TESTL)
7099 v.AddArg2(x, y)
7100 return true
7101 }
7102
7103
7104
7105 for {
7106 if auxIntToInt32(v.AuxInt) != 0 {
7107 break
7108 }
7109 a := v_0
7110 if a.Op != OpAMD64ANDLconst {
7111 break
7112 }
7113 c := auxIntToInt32(a.AuxInt)
7114 x := a.Args[0]
7115 if !(a.Uses == 1) {
7116 break
7117 }
7118 v.reset(OpAMD64TESTLconst)
7119 v.AuxInt = int32ToAuxInt(c)
7120 v.AddArg(x)
7121 return true
7122 }
7123
7124
7125 for {
7126 if auxIntToInt32(v.AuxInt) != 0 {
7127 break
7128 }
7129 x := v_0
7130 v.reset(OpAMD64TESTL)
7131 v.AddArg2(x, x)
7132 return true
7133 }
7134
7135
7136
7137 for {
7138 c := auxIntToInt32(v.AuxInt)
7139 l := v_0
7140 if l.Op != OpAMD64MOVLload {
7141 break
7142 }
7143 off := auxIntToInt32(l.AuxInt)
7144 sym := auxToSym(l.Aux)
7145 mem := l.Args[1]
7146 ptr := l.Args[0]
7147 if !(l.Uses == 1 && clobber(l)) {
7148 break
7149 }
7150 b = l.Block
7151 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
7152 v.copyOf(v0)
7153 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7154 v0.Aux = symToAux(sym)
7155 v0.AddArg2(ptr, mem)
7156 return true
7157 }
7158 return false
7159 }
7160 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
7161 v_1 := v.Args[1]
7162 v_0 := v.Args[0]
7163
7164
7165
7166 for {
7167 valoff1 := auxIntToValAndOff(v.AuxInt)
7168 sym := auxToSym(v.Aux)
7169 if v_0.Op != OpAMD64ADDQconst {
7170 break
7171 }
7172 off2 := auxIntToInt32(v_0.AuxInt)
7173 base := v_0.Args[0]
7174 mem := v_1
7175 if !(ValAndOff(valoff1).canAdd32(off2)) {
7176 break
7177 }
7178 v.reset(OpAMD64CMPLconstload)
7179 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7180 v.Aux = symToAux(sym)
7181 v.AddArg2(base, mem)
7182 return true
7183 }
7184
7185
7186
7187 for {
7188 valoff1 := auxIntToValAndOff(v.AuxInt)
7189 sym1 := auxToSym(v.Aux)
7190 if v_0.Op != OpAMD64LEAQ {
7191 break
7192 }
7193 off2 := auxIntToInt32(v_0.AuxInt)
7194 sym2 := auxToSym(v_0.Aux)
7195 base := v_0.Args[0]
7196 mem := v_1
7197 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7198 break
7199 }
7200 v.reset(OpAMD64CMPLconstload)
7201 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7202 v.Aux = symToAux(mergeSym(sym1, sym2))
7203 v.AddArg2(base, mem)
7204 return true
7205 }
7206 return false
7207 }
7208 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
7209 v_2 := v.Args[2]
7210 v_1 := v.Args[1]
7211 v_0 := v.Args[0]
7212
7213
7214
7215 for {
7216 off1 := auxIntToInt32(v.AuxInt)
7217 sym := auxToSym(v.Aux)
7218 if v_0.Op != OpAMD64ADDQconst {
7219 break
7220 }
7221 off2 := auxIntToInt32(v_0.AuxInt)
7222 base := v_0.Args[0]
7223 val := v_1
7224 mem := v_2
7225 if !(is32Bit(int64(off1) + int64(off2))) {
7226 break
7227 }
7228 v.reset(OpAMD64CMPLload)
7229 v.AuxInt = int32ToAuxInt(off1 + off2)
7230 v.Aux = symToAux(sym)
7231 v.AddArg3(base, val, mem)
7232 return true
7233 }
7234
7235
7236
7237 for {
7238 off1 := auxIntToInt32(v.AuxInt)
7239 sym1 := auxToSym(v.Aux)
7240 if v_0.Op != OpAMD64LEAQ {
7241 break
7242 }
7243 off2 := auxIntToInt32(v_0.AuxInt)
7244 sym2 := auxToSym(v_0.Aux)
7245 base := v_0.Args[0]
7246 val := v_1
7247 mem := v_2
7248 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7249 break
7250 }
7251 v.reset(OpAMD64CMPLload)
7252 v.AuxInt = int32ToAuxInt(off1 + off2)
7253 v.Aux = symToAux(mergeSym(sym1, sym2))
7254 v.AddArg3(base, val, mem)
7255 return true
7256 }
7257
7258
7259 for {
7260 off := auxIntToInt32(v.AuxInt)
7261 sym := auxToSym(v.Aux)
7262 ptr := v_0
7263 if v_1.Op != OpAMD64MOVLconst {
7264 break
7265 }
7266 c := auxIntToInt32(v_1.AuxInt)
7267 mem := v_2
7268 v.reset(OpAMD64CMPLconstload)
7269 v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7270 v.Aux = symToAux(sym)
7271 v.AddArg2(ptr, mem)
7272 return true
7273 }
7274 return false
7275 }
7276 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
7277 v_1 := v.Args[1]
7278 v_0 := v.Args[0]
7279 b := v.Block
7280
7281
7282
7283 for {
7284 x := v_0
7285 if v_1.Op != OpAMD64MOVQconst {
7286 break
7287 }
7288 c := auxIntToInt64(v_1.AuxInt)
7289 if !(is32Bit(c)) {
7290 break
7291 }
7292 v.reset(OpAMD64CMPQconst)
7293 v.AuxInt = int32ToAuxInt(int32(c))
7294 v.AddArg(x)
7295 return true
7296 }
7297
7298
7299
7300 for {
7301 if v_0.Op != OpAMD64MOVQconst {
7302 break
7303 }
7304 c := auxIntToInt64(v_0.AuxInt)
7305 x := v_1
7306 if !(is32Bit(c)) {
7307 break
7308 }
7309 v.reset(OpAMD64InvertFlags)
7310 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7311 v0.AuxInt = int32ToAuxInt(int32(c))
7312 v0.AddArg(x)
7313 v.AddArg(v0)
7314 return true
7315 }
7316
7317
7318
7319 for {
7320 x := v_0
7321 y := v_1
7322 if !(canonLessThan(x, y)) {
7323 break
7324 }
7325 v.reset(OpAMD64InvertFlags)
7326 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7327 v0.AddArg2(y, x)
7328 v.AddArg(v0)
7329 return true
7330 }
7331
7332
7333
7334 for {
7335 if v_0.Op != OpAMD64MOVQconst {
7336 break
7337 }
7338 x := auxIntToInt64(v_0.AuxInt)
7339 if v_1.Op != OpAMD64MOVQconst {
7340 break
7341 }
7342 y := auxIntToInt64(v_1.AuxInt)
7343 if !(x == y) {
7344 break
7345 }
7346 v.reset(OpAMD64FlagEQ)
7347 return true
7348 }
7349
7350
7351
7352 for {
7353 if v_0.Op != OpAMD64MOVQconst {
7354 break
7355 }
7356 x := auxIntToInt64(v_0.AuxInt)
7357 if v_1.Op != OpAMD64MOVQconst {
7358 break
7359 }
7360 y := auxIntToInt64(v_1.AuxInt)
7361 if !(x < y && uint64(x) < uint64(y)) {
7362 break
7363 }
7364 v.reset(OpAMD64FlagLT_ULT)
7365 return true
7366 }
7367
7368
7369
7370 for {
7371 if v_0.Op != OpAMD64MOVQconst {
7372 break
7373 }
7374 x := auxIntToInt64(v_0.AuxInt)
7375 if v_1.Op != OpAMD64MOVQconst {
7376 break
7377 }
7378 y := auxIntToInt64(v_1.AuxInt)
7379 if !(x < y && uint64(x) > uint64(y)) {
7380 break
7381 }
7382 v.reset(OpAMD64FlagLT_UGT)
7383 return true
7384 }
7385
7386
7387
7388 for {
7389 if v_0.Op != OpAMD64MOVQconst {
7390 break
7391 }
7392 x := auxIntToInt64(v_0.AuxInt)
7393 if v_1.Op != OpAMD64MOVQconst {
7394 break
7395 }
7396 y := auxIntToInt64(v_1.AuxInt)
7397 if !(x > y && uint64(x) < uint64(y)) {
7398 break
7399 }
7400 v.reset(OpAMD64FlagGT_ULT)
7401 return true
7402 }
7403
7404
7405
7406 for {
7407 if v_0.Op != OpAMD64MOVQconst {
7408 break
7409 }
7410 x := auxIntToInt64(v_0.AuxInt)
7411 if v_1.Op != OpAMD64MOVQconst {
7412 break
7413 }
7414 y := auxIntToInt64(v_1.AuxInt)
7415 if !(x > y && uint64(x) > uint64(y)) {
7416 break
7417 }
7418 v.reset(OpAMD64FlagGT_UGT)
7419 return true
7420 }
7421
7422
7423
7424 for {
7425 l := v_0
7426 if l.Op != OpAMD64MOVQload {
7427 break
7428 }
7429 off := auxIntToInt32(l.AuxInt)
7430 sym := auxToSym(l.Aux)
7431 mem := l.Args[1]
7432 ptr := l.Args[0]
7433 x := v_1
7434 if !(canMergeLoad(v, l) && clobber(l)) {
7435 break
7436 }
7437 v.reset(OpAMD64CMPQload)
7438 v.AuxInt = int32ToAuxInt(off)
7439 v.Aux = symToAux(sym)
7440 v.AddArg3(ptr, x, mem)
7441 return true
7442 }
7443
7444
7445
7446 for {
7447 x := v_0
7448 l := v_1
7449 if l.Op != OpAMD64MOVQload {
7450 break
7451 }
7452 off := auxIntToInt32(l.AuxInt)
7453 sym := auxToSym(l.Aux)
7454 mem := l.Args[1]
7455 ptr := l.Args[0]
7456 if !(canMergeLoad(v, l) && clobber(l)) {
7457 break
7458 }
7459 v.reset(OpAMD64InvertFlags)
7460 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7461 v0.AuxInt = int32ToAuxInt(off)
7462 v0.Aux = symToAux(sym)
7463 v0.AddArg3(ptr, x, mem)
7464 v.AddArg(v0)
7465 return true
7466 }
7467 return false
7468 }
7469 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7470 v_0 := v.Args[0]
7471 b := v.Block
7472
7473
7474
7475 for {
7476 y := auxIntToInt32(v.AuxInt)
7477 if v_0.Op != OpAMD64MOVQconst {
7478 break
7479 }
7480 x := auxIntToInt64(v_0.AuxInt)
7481 if !(x == int64(y)) {
7482 break
7483 }
7484 v.reset(OpAMD64FlagEQ)
7485 return true
7486 }
7487
7488
7489
7490 for {
7491 y := auxIntToInt32(v.AuxInt)
7492 if v_0.Op != OpAMD64MOVQconst {
7493 break
7494 }
7495 x := auxIntToInt64(v_0.AuxInt)
7496 if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
7497 break
7498 }
7499 v.reset(OpAMD64FlagLT_ULT)
7500 return true
7501 }
7502
7503
7504
7505 for {
7506 y := auxIntToInt32(v.AuxInt)
7507 if v_0.Op != OpAMD64MOVQconst {
7508 break
7509 }
7510 x := auxIntToInt64(v_0.AuxInt)
7511 if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
7512 break
7513 }
7514 v.reset(OpAMD64FlagLT_UGT)
7515 return true
7516 }
7517
7518
7519
7520 for {
7521 y := auxIntToInt32(v.AuxInt)
7522 if v_0.Op != OpAMD64MOVQconst {
7523 break
7524 }
7525 x := auxIntToInt64(v_0.AuxInt)
7526 if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
7527 break
7528 }
7529 v.reset(OpAMD64FlagGT_ULT)
7530 return true
7531 }
7532
7533
7534
7535 for {
7536 y := auxIntToInt32(v.AuxInt)
7537 if v_0.Op != OpAMD64MOVQconst {
7538 break
7539 }
7540 x := auxIntToInt64(v_0.AuxInt)
7541 if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
7542 break
7543 }
7544 v.reset(OpAMD64FlagGT_UGT)
7545 return true
7546 }
7547
7548
7549
7550 for {
7551 c := auxIntToInt32(v.AuxInt)
7552 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7553 break
7554 }
7555 v.reset(OpAMD64FlagLT_ULT)
7556 return true
7557 }
7558
7559
7560
7561 for {
7562 c := auxIntToInt32(v.AuxInt)
7563 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7564 break
7565 }
7566 v.reset(OpAMD64FlagLT_ULT)
7567 return true
7568 }
7569
7570
7571
7572 for {
7573 n := auxIntToInt32(v.AuxInt)
7574 if v_0.Op != OpAMD64SHRQconst {
7575 break
7576 }
7577 c := auxIntToInt8(v_0.AuxInt)
7578 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7579 break
7580 }
7581 v.reset(OpAMD64FlagLT_ULT)
7582 return true
7583 }
7584
7585
7586
7587 for {
7588 n := auxIntToInt32(v.AuxInt)
7589 if v_0.Op != OpAMD64ANDQconst {
7590 break
7591 }
7592 m := auxIntToInt32(v_0.AuxInt)
7593 if !(0 <= m && m < n) {
7594 break
7595 }
7596 v.reset(OpAMD64FlagLT_ULT)
7597 return true
7598 }
7599
7600
7601
7602 for {
7603 n := auxIntToInt32(v.AuxInt)
7604 if v_0.Op != OpAMD64ANDLconst {
7605 break
7606 }
7607 m := auxIntToInt32(v_0.AuxInt)
7608 if !(0 <= m && m < n) {
7609 break
7610 }
7611 v.reset(OpAMD64FlagLT_ULT)
7612 return true
7613 }
7614
7615
7616
7617 for {
7618 if auxIntToInt32(v.AuxInt) != 0 {
7619 break
7620 }
7621 a := v_0
7622 if a.Op != OpAMD64ANDQ {
7623 break
7624 }
7625 y := a.Args[1]
7626 x := a.Args[0]
7627 if !(a.Uses == 1) {
7628 break
7629 }
7630 v.reset(OpAMD64TESTQ)
7631 v.AddArg2(x, y)
7632 return true
7633 }
7634
7635
7636
7637 for {
7638 if auxIntToInt32(v.AuxInt) != 0 {
7639 break
7640 }
7641 a := v_0
7642 if a.Op != OpAMD64ANDQconst {
7643 break
7644 }
7645 c := auxIntToInt32(a.AuxInt)
7646 x := a.Args[0]
7647 if !(a.Uses == 1) {
7648 break
7649 }
7650 v.reset(OpAMD64TESTQconst)
7651 v.AuxInt = int32ToAuxInt(c)
7652 v.AddArg(x)
7653 return true
7654 }
7655
7656
7657 for {
7658 if auxIntToInt32(v.AuxInt) != 0 {
7659 break
7660 }
7661 x := v_0
7662 v.reset(OpAMD64TESTQ)
7663 v.AddArg2(x, x)
7664 return true
7665 }
7666
7667
7668
7669 for {
7670 c := auxIntToInt32(v.AuxInt)
7671 l := v_0
7672 if l.Op != OpAMD64MOVQload {
7673 break
7674 }
7675 off := auxIntToInt32(l.AuxInt)
7676 sym := auxToSym(l.Aux)
7677 mem := l.Args[1]
7678 ptr := l.Args[0]
7679 if !(l.Uses == 1 && clobber(l)) {
7680 break
7681 }
7682 b = l.Block
7683 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
7684 v.copyOf(v0)
7685 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7686 v0.Aux = symToAux(sym)
7687 v0.AddArg2(ptr, mem)
7688 return true
7689 }
7690 return false
7691 }
7692 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
7693 v_1 := v.Args[1]
7694 v_0 := v.Args[0]
7695
7696
7697
7698 for {
7699 valoff1 := auxIntToValAndOff(v.AuxInt)
7700 sym := auxToSym(v.Aux)
7701 if v_0.Op != OpAMD64ADDQconst {
7702 break
7703 }
7704 off2 := auxIntToInt32(v_0.AuxInt)
7705 base := v_0.Args[0]
7706 mem := v_1
7707 if !(ValAndOff(valoff1).canAdd32(off2)) {
7708 break
7709 }
7710 v.reset(OpAMD64CMPQconstload)
7711 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7712 v.Aux = symToAux(sym)
7713 v.AddArg2(base, mem)
7714 return true
7715 }
7716
7717
7718
7719 for {
7720 valoff1 := auxIntToValAndOff(v.AuxInt)
7721 sym1 := auxToSym(v.Aux)
7722 if v_0.Op != OpAMD64LEAQ {
7723 break
7724 }
7725 off2 := auxIntToInt32(v_0.AuxInt)
7726 sym2 := auxToSym(v_0.Aux)
7727 base := v_0.Args[0]
7728 mem := v_1
7729 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7730 break
7731 }
7732 v.reset(OpAMD64CMPQconstload)
7733 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7734 v.Aux = symToAux(mergeSym(sym1, sym2))
7735 v.AddArg2(base, mem)
7736 return true
7737 }
7738 return false
7739 }
7740 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
7741 v_2 := v.Args[2]
7742 v_1 := v.Args[1]
7743 v_0 := v.Args[0]
7744
7745
7746
7747 for {
7748 off1 := auxIntToInt32(v.AuxInt)
7749 sym := auxToSym(v.Aux)
7750 if v_0.Op != OpAMD64ADDQconst {
7751 break
7752 }
7753 off2 := auxIntToInt32(v_0.AuxInt)
7754 base := v_0.Args[0]
7755 val := v_1
7756 mem := v_2
7757 if !(is32Bit(int64(off1) + int64(off2))) {
7758 break
7759 }
7760 v.reset(OpAMD64CMPQload)
7761 v.AuxInt = int32ToAuxInt(off1 + off2)
7762 v.Aux = symToAux(sym)
7763 v.AddArg3(base, val, mem)
7764 return true
7765 }
7766
7767
7768
7769 for {
7770 off1 := auxIntToInt32(v.AuxInt)
7771 sym1 := auxToSym(v.Aux)
7772 if v_0.Op != OpAMD64LEAQ {
7773 break
7774 }
7775 off2 := auxIntToInt32(v_0.AuxInt)
7776 sym2 := auxToSym(v_0.Aux)
7777 base := v_0.Args[0]
7778 val := v_1
7779 mem := v_2
7780 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7781 break
7782 }
7783 v.reset(OpAMD64CMPQload)
7784 v.AuxInt = int32ToAuxInt(off1 + off2)
7785 v.Aux = symToAux(mergeSym(sym1, sym2))
7786 v.AddArg3(base, val, mem)
7787 return true
7788 }
7789
7790
7791
7792 for {
7793 off := auxIntToInt32(v.AuxInt)
7794 sym := auxToSym(v.Aux)
7795 ptr := v_0
7796 if v_1.Op != OpAMD64MOVQconst {
7797 break
7798 }
7799 c := auxIntToInt64(v_1.AuxInt)
7800 mem := v_2
7801 if !(validVal(c)) {
7802 break
7803 }
7804 v.reset(OpAMD64CMPQconstload)
7805 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7806 v.Aux = symToAux(sym)
7807 v.AddArg2(ptr, mem)
7808 return true
7809 }
7810 return false
7811 }
7812 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
7813 v_1 := v.Args[1]
7814 v_0 := v.Args[0]
7815 b := v.Block
7816
7817
7818 for {
7819 x := v_0
7820 if v_1.Op != OpAMD64MOVLconst {
7821 break
7822 }
7823 c := auxIntToInt32(v_1.AuxInt)
7824 v.reset(OpAMD64CMPWconst)
7825 v.AuxInt = int16ToAuxInt(int16(c))
7826 v.AddArg(x)
7827 return true
7828 }
7829
7830
7831 for {
7832 if v_0.Op != OpAMD64MOVLconst {
7833 break
7834 }
7835 c := auxIntToInt32(v_0.AuxInt)
7836 x := v_1
7837 v.reset(OpAMD64InvertFlags)
7838 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
7839 v0.AuxInt = int16ToAuxInt(int16(c))
7840 v0.AddArg(x)
7841 v.AddArg(v0)
7842 return true
7843 }
7844
7845
7846
7847 for {
7848 x := v_0
7849 y := v_1
7850 if !(canonLessThan(x, y)) {
7851 break
7852 }
7853 v.reset(OpAMD64InvertFlags)
7854 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
7855 v0.AddArg2(y, x)
7856 v.AddArg(v0)
7857 return true
7858 }
7859
7860
7861
7862 for {
7863 l := v_0
7864 if l.Op != OpAMD64MOVWload {
7865 break
7866 }
7867 off := auxIntToInt32(l.AuxInt)
7868 sym := auxToSym(l.Aux)
7869 mem := l.Args[1]
7870 ptr := l.Args[0]
7871 x := v_1
7872 if !(canMergeLoad(v, l) && clobber(l)) {
7873 break
7874 }
7875 v.reset(OpAMD64CMPWload)
7876 v.AuxInt = int32ToAuxInt(off)
7877 v.Aux = symToAux(sym)
7878 v.AddArg3(ptr, x, mem)
7879 return true
7880 }
7881
7882
7883
7884 for {
7885 x := v_0
7886 l := v_1
7887 if l.Op != OpAMD64MOVWload {
7888 break
7889 }
7890 off := auxIntToInt32(l.AuxInt)
7891 sym := auxToSym(l.Aux)
7892 mem := l.Args[1]
7893 ptr := l.Args[0]
7894 if !(canMergeLoad(v, l) && clobber(l)) {
7895 break
7896 }
7897 v.reset(OpAMD64InvertFlags)
7898 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
7899 v0.AuxInt = int32ToAuxInt(off)
7900 v0.Aux = symToAux(sym)
7901 v0.AddArg3(ptr, x, mem)
7902 v.AddArg(v0)
7903 return true
7904 }
7905 return false
7906 }
7907 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
7908 v_0 := v.Args[0]
7909 b := v.Block
7910
7911
7912
7913 for {
7914 y := auxIntToInt16(v.AuxInt)
7915 if v_0.Op != OpAMD64MOVLconst {
7916 break
7917 }
7918 x := auxIntToInt32(v_0.AuxInt)
7919 if !(int16(x) == y) {
7920 break
7921 }
7922 v.reset(OpAMD64FlagEQ)
7923 return true
7924 }
7925
7926
7927
7928 for {
7929 y := auxIntToInt16(v.AuxInt)
7930 if v_0.Op != OpAMD64MOVLconst {
7931 break
7932 }
7933 x := auxIntToInt32(v_0.AuxInt)
7934 if !(int16(x) < y && uint16(x) < uint16(y)) {
7935 break
7936 }
7937 v.reset(OpAMD64FlagLT_ULT)
7938 return true
7939 }
7940
7941
7942
7943 for {
7944 y := auxIntToInt16(v.AuxInt)
7945 if v_0.Op != OpAMD64MOVLconst {
7946 break
7947 }
7948 x := auxIntToInt32(v_0.AuxInt)
7949 if !(int16(x) < y && uint16(x) > uint16(y)) {
7950 break
7951 }
7952 v.reset(OpAMD64FlagLT_UGT)
7953 return true
7954 }
7955
7956
7957
7958 for {
7959 y := auxIntToInt16(v.AuxInt)
7960 if v_0.Op != OpAMD64MOVLconst {
7961 break
7962 }
7963 x := auxIntToInt32(v_0.AuxInt)
7964 if !(int16(x) > y && uint16(x) < uint16(y)) {
7965 break
7966 }
7967 v.reset(OpAMD64FlagGT_ULT)
7968 return true
7969 }
7970
7971
7972
7973 for {
7974 y := auxIntToInt16(v.AuxInt)
7975 if v_0.Op != OpAMD64MOVLconst {
7976 break
7977 }
7978 x := auxIntToInt32(v_0.AuxInt)
7979 if !(int16(x) > y && uint16(x) > uint16(y)) {
7980 break
7981 }
7982 v.reset(OpAMD64FlagGT_UGT)
7983 return true
7984 }
7985
7986
7987
7988 for {
7989 n := auxIntToInt16(v.AuxInt)
7990 if v_0.Op != OpAMD64ANDLconst {
7991 break
7992 }
7993 m := auxIntToInt32(v_0.AuxInt)
7994 if !(0 <= int16(m) && int16(m) < n) {
7995 break
7996 }
7997 v.reset(OpAMD64FlagLT_ULT)
7998 return true
7999 }
8000
8001
8002
8003 for {
8004 if auxIntToInt16(v.AuxInt) != 0 {
8005 break
8006 }
8007 a := v_0
8008 if a.Op != OpAMD64ANDL {
8009 break
8010 }
8011 y := a.Args[1]
8012 x := a.Args[0]
8013 if !(a.Uses == 1) {
8014 break
8015 }
8016 v.reset(OpAMD64TESTW)
8017 v.AddArg2(x, y)
8018 return true
8019 }
8020
8021
8022
8023 for {
8024 if auxIntToInt16(v.AuxInt) != 0 {
8025 break
8026 }
8027 a := v_0
8028 if a.Op != OpAMD64ANDLconst {
8029 break
8030 }
8031 c := auxIntToInt32(a.AuxInt)
8032 x := a.Args[0]
8033 if !(a.Uses == 1) {
8034 break
8035 }
8036 v.reset(OpAMD64TESTWconst)
8037 v.AuxInt = int16ToAuxInt(int16(c))
8038 v.AddArg(x)
8039 return true
8040 }
8041
8042
8043 for {
8044 if auxIntToInt16(v.AuxInt) != 0 {
8045 break
8046 }
8047 x := v_0
8048 v.reset(OpAMD64TESTW)
8049 v.AddArg2(x, x)
8050 return true
8051 }
8052
8053
8054
8055 for {
8056 c := auxIntToInt16(v.AuxInt)
8057 l := v_0
8058 if l.Op != OpAMD64MOVWload {
8059 break
8060 }
8061 off := auxIntToInt32(l.AuxInt)
8062 sym := auxToSym(l.Aux)
8063 mem := l.Args[1]
8064 ptr := l.Args[0]
8065 if !(l.Uses == 1 && clobber(l)) {
8066 break
8067 }
8068 b = l.Block
8069 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
8070 v.copyOf(v0)
8071 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
8072 v0.Aux = symToAux(sym)
8073 v0.AddArg2(ptr, mem)
8074 return true
8075 }
8076 return false
8077 }
8078 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
8079 v_1 := v.Args[1]
8080 v_0 := v.Args[0]
8081
8082
8083
8084 for {
8085 valoff1 := auxIntToValAndOff(v.AuxInt)
8086 sym := auxToSym(v.Aux)
8087 if v_0.Op != OpAMD64ADDQconst {
8088 break
8089 }
8090 off2 := auxIntToInt32(v_0.AuxInt)
8091 base := v_0.Args[0]
8092 mem := v_1
8093 if !(ValAndOff(valoff1).canAdd32(off2)) {
8094 break
8095 }
8096 v.reset(OpAMD64CMPWconstload)
8097 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8098 v.Aux = symToAux(sym)
8099 v.AddArg2(base, mem)
8100 return true
8101 }
8102
8103
8104
8105 for {
8106 valoff1 := auxIntToValAndOff(v.AuxInt)
8107 sym1 := auxToSym(v.Aux)
8108 if v_0.Op != OpAMD64LEAQ {
8109 break
8110 }
8111 off2 := auxIntToInt32(v_0.AuxInt)
8112 sym2 := auxToSym(v_0.Aux)
8113 base := v_0.Args[0]
8114 mem := v_1
8115 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
8116 break
8117 }
8118 v.reset(OpAMD64CMPWconstload)
8119 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8120 v.Aux = symToAux(mergeSym(sym1, sym2))
8121 v.AddArg2(base, mem)
8122 return true
8123 }
8124 return false
8125 }
8126 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
8127 v_2 := v.Args[2]
8128 v_1 := v.Args[1]
8129 v_0 := v.Args[0]
8130
8131
8132
8133 for {
8134 off1 := auxIntToInt32(v.AuxInt)
8135 sym := auxToSym(v.Aux)
8136 if v_0.Op != OpAMD64ADDQconst {
8137 break
8138 }
8139 off2 := auxIntToInt32(v_0.AuxInt)
8140 base := v_0.Args[0]
8141 val := v_1
8142 mem := v_2
8143 if !(is32Bit(int64(off1) + int64(off2))) {
8144 break
8145 }
8146 v.reset(OpAMD64CMPWload)
8147 v.AuxInt = int32ToAuxInt(off1 + off2)
8148 v.Aux = symToAux(sym)
8149 v.AddArg3(base, val, mem)
8150 return true
8151 }
8152
8153
8154
8155 for {
8156 off1 := auxIntToInt32(v.AuxInt)
8157 sym1 := auxToSym(v.Aux)
8158 if v_0.Op != OpAMD64LEAQ {
8159 break
8160 }
8161 off2 := auxIntToInt32(v_0.AuxInt)
8162 sym2 := auxToSym(v_0.Aux)
8163 base := v_0.Args[0]
8164 val := v_1
8165 mem := v_2
8166 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8167 break
8168 }
8169 v.reset(OpAMD64CMPWload)
8170 v.AuxInt = int32ToAuxInt(off1 + off2)
8171 v.Aux = symToAux(mergeSym(sym1, sym2))
8172 v.AddArg3(base, val, mem)
8173 return true
8174 }
8175
8176
8177 for {
8178 off := auxIntToInt32(v.AuxInt)
8179 sym := auxToSym(v.Aux)
8180 ptr := v_0
8181 if v_1.Op != OpAMD64MOVLconst {
8182 break
8183 }
8184 c := auxIntToInt32(v_1.AuxInt)
8185 mem := v_2
8186 v.reset(OpAMD64CMPWconstload)
8187 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
8188 v.Aux = symToAux(sym)
8189 v.AddArg2(ptr, mem)
8190 return true
8191 }
8192 return false
8193 }
8194 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
8195 v_3 := v.Args[3]
8196 v_2 := v.Args[2]
8197 v_1 := v.Args[1]
8198 v_0 := v.Args[0]
8199
8200
8201
8202 for {
8203 off1 := auxIntToInt32(v.AuxInt)
8204 sym := auxToSym(v.Aux)
8205 if v_0.Op != OpAMD64ADDQconst {
8206 break
8207 }
8208 off2 := auxIntToInt32(v_0.AuxInt)
8209 ptr := v_0.Args[0]
8210 old := v_1
8211 new_ := v_2
8212 mem := v_3
8213 if !(is32Bit(int64(off1) + int64(off2))) {
8214 break
8215 }
8216 v.reset(OpAMD64CMPXCHGLlock)
8217 v.AuxInt = int32ToAuxInt(off1 + off2)
8218 v.Aux = symToAux(sym)
8219 v.AddArg4(ptr, old, new_, mem)
8220 return true
8221 }
8222 return false
8223 }
8224 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
8225 v_3 := v.Args[3]
8226 v_2 := v.Args[2]
8227 v_1 := v.Args[1]
8228 v_0 := v.Args[0]
8229
8230
8231
8232 for {
8233 off1 := auxIntToInt32(v.AuxInt)
8234 sym := auxToSym(v.Aux)
8235 if v_0.Op != OpAMD64ADDQconst {
8236 break
8237 }
8238 off2 := auxIntToInt32(v_0.AuxInt)
8239 ptr := v_0.Args[0]
8240 old := v_1
8241 new_ := v_2
8242 mem := v_3
8243 if !(is32Bit(int64(off1) + int64(off2))) {
8244 break
8245 }
8246 v.reset(OpAMD64CMPXCHGQlock)
8247 v.AuxInt = int32ToAuxInt(off1 + off2)
8248 v.Aux = symToAux(sym)
8249 v.AddArg4(ptr, old, new_, mem)
8250 return true
8251 }
8252 return false
8253 }
8254 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
8255 v_1 := v.Args[1]
8256 v_0 := v.Args[0]
8257
8258
8259
8260 for {
8261 x := v_0
8262 l := v_1
8263 if l.Op != OpAMD64MOVSDload {
8264 break
8265 }
8266 off := auxIntToInt32(l.AuxInt)
8267 sym := auxToSym(l.Aux)
8268 mem := l.Args[1]
8269 ptr := l.Args[0]
8270 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8271 break
8272 }
8273 v.reset(OpAMD64DIVSDload)
8274 v.AuxInt = int32ToAuxInt(off)
8275 v.Aux = symToAux(sym)
8276 v.AddArg3(x, ptr, mem)
8277 return true
8278 }
8279 return false
8280 }
8281 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8282 v_2 := v.Args[2]
8283 v_1 := v.Args[1]
8284 v_0 := v.Args[0]
8285
8286
8287
8288 for {
8289 off1 := auxIntToInt32(v.AuxInt)
8290 sym := auxToSym(v.Aux)
8291 val := v_0
8292 if v_1.Op != OpAMD64ADDQconst {
8293 break
8294 }
8295 off2 := auxIntToInt32(v_1.AuxInt)
8296 base := v_1.Args[0]
8297 mem := v_2
8298 if !(is32Bit(int64(off1) + int64(off2))) {
8299 break
8300 }
8301 v.reset(OpAMD64DIVSDload)
8302 v.AuxInt = int32ToAuxInt(off1 + off2)
8303 v.Aux = symToAux(sym)
8304 v.AddArg3(val, base, mem)
8305 return true
8306 }
8307
8308
8309
8310 for {
8311 off1 := auxIntToInt32(v.AuxInt)
8312 sym1 := auxToSym(v.Aux)
8313 val := v_0
8314 if v_1.Op != OpAMD64LEAQ {
8315 break
8316 }
8317 off2 := auxIntToInt32(v_1.AuxInt)
8318 sym2 := auxToSym(v_1.Aux)
8319 base := v_1.Args[0]
8320 mem := v_2
8321 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8322 break
8323 }
8324 v.reset(OpAMD64DIVSDload)
8325 v.AuxInt = int32ToAuxInt(off1 + off2)
8326 v.Aux = symToAux(mergeSym(sym1, sym2))
8327 v.AddArg3(val, base, mem)
8328 return true
8329 }
8330 return false
8331 }
8332 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8333 v_1 := v.Args[1]
8334 v_0 := v.Args[0]
8335
8336
8337
8338 for {
8339 x := v_0
8340 l := v_1
8341 if l.Op != OpAMD64MOVSSload {
8342 break
8343 }
8344 off := auxIntToInt32(l.AuxInt)
8345 sym := auxToSym(l.Aux)
8346 mem := l.Args[1]
8347 ptr := l.Args[0]
8348 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8349 break
8350 }
8351 v.reset(OpAMD64DIVSSload)
8352 v.AuxInt = int32ToAuxInt(off)
8353 v.Aux = symToAux(sym)
8354 v.AddArg3(x, ptr, mem)
8355 return true
8356 }
8357 return false
8358 }
8359 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8360 v_2 := v.Args[2]
8361 v_1 := v.Args[1]
8362 v_0 := v.Args[0]
8363
8364
8365
8366 for {
8367 off1 := auxIntToInt32(v.AuxInt)
8368 sym := auxToSym(v.Aux)
8369 val := v_0
8370 if v_1.Op != OpAMD64ADDQconst {
8371 break
8372 }
8373 off2 := auxIntToInt32(v_1.AuxInt)
8374 base := v_1.Args[0]
8375 mem := v_2
8376 if !(is32Bit(int64(off1) + int64(off2))) {
8377 break
8378 }
8379 v.reset(OpAMD64DIVSSload)
8380 v.AuxInt = int32ToAuxInt(off1 + off2)
8381 v.Aux = symToAux(sym)
8382 v.AddArg3(val, base, mem)
8383 return true
8384 }
8385
8386
8387
8388 for {
8389 off1 := auxIntToInt32(v.AuxInt)
8390 sym1 := auxToSym(v.Aux)
8391 val := v_0
8392 if v_1.Op != OpAMD64LEAQ {
8393 break
8394 }
8395 off2 := auxIntToInt32(v_1.AuxInt)
8396 sym2 := auxToSym(v_1.Aux)
8397 base := v_1.Args[0]
8398 mem := v_2
8399 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8400 break
8401 }
8402 v.reset(OpAMD64DIVSSload)
8403 v.AuxInt = int32ToAuxInt(off1 + off2)
8404 v.Aux = symToAux(mergeSym(sym1, sym2))
8405 v.AddArg3(val, base, mem)
8406 return true
8407 }
8408 return false
8409 }
8410 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8411 v_1 := v.Args[1]
8412 v_0 := v.Args[0]
8413
8414
8415
8416 for {
8417 x := v_0
8418 y := v_1
8419 if !(!x.rematerializeable() && y.rematerializeable()) {
8420 break
8421 }
8422 v.reset(OpAMD64HMULL)
8423 v.AddArg2(y, x)
8424 return true
8425 }
8426 return false
8427 }
8428 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8429 v_1 := v.Args[1]
8430 v_0 := v.Args[0]
8431
8432
8433
8434 for {
8435 x := v_0
8436 y := v_1
8437 if !(!x.rematerializeable() && y.rematerializeable()) {
8438 break
8439 }
8440 v.reset(OpAMD64HMULLU)
8441 v.AddArg2(y, x)
8442 return true
8443 }
8444 return false
8445 }
8446 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8447 v_1 := v.Args[1]
8448 v_0 := v.Args[0]
8449
8450
8451
8452 for {
8453 x := v_0
8454 y := v_1
8455 if !(!x.rematerializeable() && y.rematerializeable()) {
8456 break
8457 }
8458 v.reset(OpAMD64HMULQ)
8459 v.AddArg2(y, x)
8460 return true
8461 }
8462 return false
8463 }
8464 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8465 v_1 := v.Args[1]
8466 v_0 := v.Args[0]
8467
8468
8469
8470 for {
8471 x := v_0
8472 y := v_1
8473 if !(!x.rematerializeable() && y.rematerializeable()) {
8474 break
8475 }
8476 v.reset(OpAMD64HMULQU)
8477 v.AddArg2(y, x)
8478 return true
8479 }
8480 return false
8481 }
8482 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8483 v_0 := v.Args[0]
8484
8485
8486
8487 for {
8488 c := auxIntToInt32(v.AuxInt)
8489 s := auxToSym(v.Aux)
8490 if v_0.Op != OpAMD64ADDLconst {
8491 break
8492 }
8493 d := auxIntToInt32(v_0.AuxInt)
8494 x := v_0.Args[0]
8495 if !(is32Bit(int64(c) + int64(d))) {
8496 break
8497 }
8498 v.reset(OpAMD64LEAL)
8499 v.AuxInt = int32ToAuxInt(c + d)
8500 v.Aux = symToAux(s)
8501 v.AddArg(x)
8502 return true
8503 }
8504
8505
8506
8507 for {
8508 c := auxIntToInt32(v.AuxInt)
8509 s := auxToSym(v.Aux)
8510 if v_0.Op != OpAMD64ADDL {
8511 break
8512 }
8513 _ = v_0.Args[1]
8514 v_0_0 := v_0.Args[0]
8515 v_0_1 := v_0.Args[1]
8516 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8517 x := v_0_0
8518 y := v_0_1
8519 if !(x.Op != OpSB && y.Op != OpSB) {
8520 continue
8521 }
8522 v.reset(OpAMD64LEAL1)
8523 v.AuxInt = int32ToAuxInt(c)
8524 v.Aux = symToAux(s)
8525 v.AddArg2(x, y)
8526 return true
8527 }
8528 break
8529 }
8530 return false
8531 }
8532 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8533 v_1 := v.Args[1]
8534 v_0 := v.Args[0]
8535
8536
8537
8538 for {
8539 c := auxIntToInt32(v.AuxInt)
8540 s := auxToSym(v.Aux)
8541 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8542 if v_0.Op != OpAMD64ADDLconst {
8543 continue
8544 }
8545 d := auxIntToInt32(v_0.AuxInt)
8546 x := v_0.Args[0]
8547 y := v_1
8548 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8549 continue
8550 }
8551 v.reset(OpAMD64LEAL1)
8552 v.AuxInt = int32ToAuxInt(c + d)
8553 v.Aux = symToAux(s)
8554 v.AddArg2(x, y)
8555 return true
8556 }
8557 break
8558 }
8559
8560
8561 for {
8562 c := auxIntToInt32(v.AuxInt)
8563 s := auxToSym(v.Aux)
8564 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8565 x := v_0
8566 if v_1.Op != OpAMD64ADDL {
8567 continue
8568 }
8569 y := v_1.Args[1]
8570 if y != v_1.Args[0] {
8571 continue
8572 }
8573 v.reset(OpAMD64LEAL2)
8574 v.AuxInt = int32ToAuxInt(c)
8575 v.Aux = symToAux(s)
8576 v.AddArg2(x, y)
8577 return true
8578 }
8579 break
8580 }
8581
8582
8583 for {
8584 c := auxIntToInt32(v.AuxInt)
8585 s := auxToSym(v.Aux)
8586 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8587 x := v_0
8588 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8589 continue
8590 }
8591 y := v_1.Args[0]
8592 v.reset(OpAMD64LEAL4)
8593 v.AuxInt = int32ToAuxInt(c)
8594 v.Aux = symToAux(s)
8595 v.AddArg2(x, y)
8596 return true
8597 }
8598 break
8599 }
8600
8601
8602 for {
8603 c := auxIntToInt32(v.AuxInt)
8604 s := auxToSym(v.Aux)
8605 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8606 x := v_0
8607 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
8608 continue
8609 }
8610 y := v_1.Args[0]
8611 v.reset(OpAMD64LEAL8)
8612 v.AuxInt = int32ToAuxInt(c)
8613 v.Aux = symToAux(s)
8614 v.AddArg2(x, y)
8615 return true
8616 }
8617 break
8618 }
8619 return false
8620 }
8621 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8622 v_1 := v.Args[1]
8623 v_0 := v.Args[0]
8624
8625
8626
8627 for {
8628 c := auxIntToInt32(v.AuxInt)
8629 s := auxToSym(v.Aux)
8630 if v_0.Op != OpAMD64ADDLconst {
8631 break
8632 }
8633 d := auxIntToInt32(v_0.AuxInt)
8634 x := v_0.Args[0]
8635 y := v_1
8636 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8637 break
8638 }
8639 v.reset(OpAMD64LEAL2)
8640 v.AuxInt = int32ToAuxInt(c + d)
8641 v.Aux = symToAux(s)
8642 v.AddArg2(x, y)
8643 return true
8644 }
8645
8646
8647
8648 for {
8649 c := auxIntToInt32(v.AuxInt)
8650 s := auxToSym(v.Aux)
8651 x := v_0
8652 if v_1.Op != OpAMD64ADDLconst {
8653 break
8654 }
8655 d := auxIntToInt32(v_1.AuxInt)
8656 y := v_1.Args[0]
8657 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8658 break
8659 }
8660 v.reset(OpAMD64LEAL2)
8661 v.AuxInt = int32ToAuxInt(c + 2*d)
8662 v.Aux = symToAux(s)
8663 v.AddArg2(x, y)
8664 return true
8665 }
8666
8667
8668 for {
8669 c := auxIntToInt32(v.AuxInt)
8670 s := auxToSym(v.Aux)
8671 x := v_0
8672 if v_1.Op != OpAMD64ADDL {
8673 break
8674 }
8675 y := v_1.Args[1]
8676 if y != v_1.Args[0] {
8677 break
8678 }
8679 v.reset(OpAMD64LEAL4)
8680 v.AuxInt = int32ToAuxInt(c)
8681 v.Aux = symToAux(s)
8682 v.AddArg2(x, y)
8683 return true
8684 }
8685
8686
8687 for {
8688 c := auxIntToInt32(v.AuxInt)
8689 s := auxToSym(v.Aux)
8690 x := v_0
8691 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8692 break
8693 }
8694 y := v_1.Args[0]
8695 v.reset(OpAMD64LEAL8)
8696 v.AuxInt = int32ToAuxInt(c)
8697 v.Aux = symToAux(s)
8698 v.AddArg2(x, y)
8699 return true
8700 }
8701
8702
8703
8704 for {
8705 if auxIntToInt32(v.AuxInt) != 0 {
8706 break
8707 }
8708 s := auxToSym(v.Aux)
8709 if v_0.Op != OpAMD64ADDL {
8710 break
8711 }
8712 x := v_0.Args[1]
8713 if x != v_0.Args[0] || x != v_1 || !(s == nil) {
8714 break
8715 }
8716 v.reset(OpAMD64SHLLconst)
8717 v.AuxInt = int8ToAuxInt(2)
8718 v.AddArg(x)
8719 return true
8720 }
8721 return false
8722 }
8723 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
8724 v_1 := v.Args[1]
8725 v_0 := v.Args[0]
8726
8727
8728
8729 for {
8730 c := auxIntToInt32(v.AuxInt)
8731 s := auxToSym(v.Aux)
8732 if v_0.Op != OpAMD64ADDLconst {
8733 break
8734 }
8735 d := auxIntToInt32(v_0.AuxInt)
8736 x := v_0.Args[0]
8737 y := v_1
8738 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8739 break
8740 }
8741 v.reset(OpAMD64LEAL4)
8742 v.AuxInt = int32ToAuxInt(c + d)
8743 v.Aux = symToAux(s)
8744 v.AddArg2(x, y)
8745 return true
8746 }
8747
8748
8749
8750 for {
8751 c := auxIntToInt32(v.AuxInt)
8752 s := auxToSym(v.Aux)
8753 x := v_0
8754 if v_1.Op != OpAMD64ADDLconst {
8755 break
8756 }
8757 d := auxIntToInt32(v_1.AuxInt)
8758 y := v_1.Args[0]
8759 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
8760 break
8761 }
8762 v.reset(OpAMD64LEAL4)
8763 v.AuxInt = int32ToAuxInt(c + 4*d)
8764 v.Aux = symToAux(s)
8765 v.AddArg2(x, y)
8766 return true
8767 }
8768
8769
8770 for {
8771 c := auxIntToInt32(v.AuxInt)
8772 s := auxToSym(v.Aux)
8773 x := v_0
8774 if v_1.Op != OpAMD64ADDL {
8775 break
8776 }
8777 y := v_1.Args[1]
8778 if y != v_1.Args[0] {
8779 break
8780 }
8781 v.reset(OpAMD64LEAL8)
8782 v.AuxInt = int32ToAuxInt(c)
8783 v.Aux = symToAux(s)
8784 v.AddArg2(x, y)
8785 return true
8786 }
8787 return false
8788 }
8789 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
8790 v_1 := v.Args[1]
8791 v_0 := v.Args[0]
8792
8793
8794
8795 for {
8796 c := auxIntToInt32(v.AuxInt)
8797 s := auxToSym(v.Aux)
8798 if v_0.Op != OpAMD64ADDLconst {
8799 break
8800 }
8801 d := auxIntToInt32(v_0.AuxInt)
8802 x := v_0.Args[0]
8803 y := v_1
8804 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8805 break
8806 }
8807 v.reset(OpAMD64LEAL8)
8808 v.AuxInt = int32ToAuxInt(c + d)
8809 v.Aux = symToAux(s)
8810 v.AddArg2(x, y)
8811 return true
8812 }
8813
8814
8815
8816 for {
8817 c := auxIntToInt32(v.AuxInt)
8818 s := auxToSym(v.Aux)
8819 x := v_0
8820 if v_1.Op != OpAMD64ADDLconst {
8821 break
8822 }
8823 d := auxIntToInt32(v_1.AuxInt)
8824 y := v_1.Args[0]
8825 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
8826 break
8827 }
8828 v.reset(OpAMD64LEAL8)
8829 v.AuxInt = int32ToAuxInt(c + 8*d)
8830 v.Aux = symToAux(s)
8831 v.AddArg2(x, y)
8832 return true
8833 }
8834 return false
8835 }
8836 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
8837 v_0 := v.Args[0]
8838
8839
8840
8841 for {
8842 c := auxIntToInt32(v.AuxInt)
8843 s := auxToSym(v.Aux)
8844 if v_0.Op != OpAMD64ADDQconst {
8845 break
8846 }
8847 d := auxIntToInt32(v_0.AuxInt)
8848 x := v_0.Args[0]
8849 if !(is32Bit(int64(c) + int64(d))) {
8850 break
8851 }
8852 v.reset(OpAMD64LEAQ)
8853 v.AuxInt = int32ToAuxInt(c + d)
8854 v.Aux = symToAux(s)
8855 v.AddArg(x)
8856 return true
8857 }
8858
8859
8860
8861 for {
8862 c := auxIntToInt32(v.AuxInt)
8863 s := auxToSym(v.Aux)
8864 if v_0.Op != OpAMD64ADDQ {
8865 break
8866 }
8867 _ = v_0.Args[1]
8868 v_0_0 := v_0.Args[0]
8869 v_0_1 := v_0.Args[1]
8870 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8871 x := v_0_0
8872 y := v_0_1
8873 if !(x.Op != OpSB && y.Op != OpSB) {
8874 continue
8875 }
8876 v.reset(OpAMD64LEAQ1)
8877 v.AuxInt = int32ToAuxInt(c)
8878 v.Aux = symToAux(s)
8879 v.AddArg2(x, y)
8880 return true
8881 }
8882 break
8883 }
8884
8885
8886
8887 for {
8888 off1 := auxIntToInt32(v.AuxInt)
8889 sym1 := auxToSym(v.Aux)
8890 if v_0.Op != OpAMD64LEAQ {
8891 break
8892 }
8893 off2 := auxIntToInt32(v_0.AuxInt)
8894 sym2 := auxToSym(v_0.Aux)
8895 x := v_0.Args[0]
8896 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8897 break
8898 }
8899 v.reset(OpAMD64LEAQ)
8900 v.AuxInt = int32ToAuxInt(off1 + off2)
8901 v.Aux = symToAux(mergeSym(sym1, sym2))
8902 v.AddArg(x)
8903 return true
8904 }
8905
8906
8907
8908 for {
8909 off1 := auxIntToInt32(v.AuxInt)
8910 sym1 := auxToSym(v.Aux)
8911 if v_0.Op != OpAMD64LEAQ1 {
8912 break
8913 }
8914 off2 := auxIntToInt32(v_0.AuxInt)
8915 sym2 := auxToSym(v_0.Aux)
8916 y := v_0.Args[1]
8917 x := v_0.Args[0]
8918 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8919 break
8920 }
8921 v.reset(OpAMD64LEAQ1)
8922 v.AuxInt = int32ToAuxInt(off1 + off2)
8923 v.Aux = symToAux(mergeSym(sym1, sym2))
8924 v.AddArg2(x, y)
8925 return true
8926 }
8927
8928
8929
8930 for {
8931 off1 := auxIntToInt32(v.AuxInt)
8932 sym1 := auxToSym(v.Aux)
8933 if v_0.Op != OpAMD64LEAQ2 {
8934 break
8935 }
8936 off2 := auxIntToInt32(v_0.AuxInt)
8937 sym2 := auxToSym(v_0.Aux)
8938 y := v_0.Args[1]
8939 x := v_0.Args[0]
8940 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8941 break
8942 }
8943 v.reset(OpAMD64LEAQ2)
8944 v.AuxInt = int32ToAuxInt(off1 + off2)
8945 v.Aux = symToAux(mergeSym(sym1, sym2))
8946 v.AddArg2(x, y)
8947 return true
8948 }
8949
8950
8951
8952 for {
8953 off1 := auxIntToInt32(v.AuxInt)
8954 sym1 := auxToSym(v.Aux)
8955 if v_0.Op != OpAMD64LEAQ4 {
8956 break
8957 }
8958 off2 := auxIntToInt32(v_0.AuxInt)
8959 sym2 := auxToSym(v_0.Aux)
8960 y := v_0.Args[1]
8961 x := v_0.Args[0]
8962 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8963 break
8964 }
8965 v.reset(OpAMD64LEAQ4)
8966 v.AuxInt = int32ToAuxInt(off1 + off2)
8967 v.Aux = symToAux(mergeSym(sym1, sym2))
8968 v.AddArg2(x, y)
8969 return true
8970 }
8971
8972
8973
8974 for {
8975 off1 := auxIntToInt32(v.AuxInt)
8976 sym1 := auxToSym(v.Aux)
8977 if v_0.Op != OpAMD64LEAQ8 {
8978 break
8979 }
8980 off2 := auxIntToInt32(v_0.AuxInt)
8981 sym2 := auxToSym(v_0.Aux)
8982 y := v_0.Args[1]
8983 x := v_0.Args[0]
8984 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8985 break
8986 }
8987 v.reset(OpAMD64LEAQ8)
8988 v.AuxInt = int32ToAuxInt(off1 + off2)
8989 v.Aux = symToAux(mergeSym(sym1, sym2))
8990 v.AddArg2(x, y)
8991 return true
8992 }
8993 return false
8994 }
8995 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
8996 v_1 := v.Args[1]
8997 v_0 := v.Args[0]
8998
8999
9000
9001 for {
9002 c := auxIntToInt32(v.AuxInt)
9003 s := auxToSym(v.Aux)
9004 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9005 if v_0.Op != OpAMD64ADDQconst {
9006 continue
9007 }
9008 d := auxIntToInt32(v_0.AuxInt)
9009 x := v_0.Args[0]
9010 y := v_1
9011 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9012 continue
9013 }
9014 v.reset(OpAMD64LEAQ1)
9015 v.AuxInt = int32ToAuxInt(c + d)
9016 v.Aux = symToAux(s)
9017 v.AddArg2(x, y)
9018 return true
9019 }
9020 break
9021 }
9022
9023
9024 for {
9025 c := auxIntToInt32(v.AuxInt)
9026 s := auxToSym(v.Aux)
9027 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9028 x := v_0
9029 if v_1.Op != OpAMD64ADDQ {
9030 continue
9031 }
9032 y := v_1.Args[1]
9033 if y != v_1.Args[0] {
9034 continue
9035 }
9036 v.reset(OpAMD64LEAQ2)
9037 v.AuxInt = int32ToAuxInt(c)
9038 v.Aux = symToAux(s)
9039 v.AddArg2(x, y)
9040 return true
9041 }
9042 break
9043 }
9044
9045
9046 for {
9047 c := auxIntToInt32(v.AuxInt)
9048 s := auxToSym(v.Aux)
9049 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9050 x := v_0
9051 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9052 continue
9053 }
9054 y := v_1.Args[0]
9055 v.reset(OpAMD64LEAQ4)
9056 v.AuxInt = int32ToAuxInt(c)
9057 v.Aux = symToAux(s)
9058 v.AddArg2(x, y)
9059 return true
9060 }
9061 break
9062 }
9063
9064
9065 for {
9066 c := auxIntToInt32(v.AuxInt)
9067 s := auxToSym(v.Aux)
9068 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9069 x := v_0
9070 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
9071 continue
9072 }
9073 y := v_1.Args[0]
9074 v.reset(OpAMD64LEAQ8)
9075 v.AuxInt = int32ToAuxInt(c)
9076 v.Aux = symToAux(s)
9077 v.AddArg2(x, y)
9078 return true
9079 }
9080 break
9081 }
9082
9083
9084
9085 for {
9086 off1 := auxIntToInt32(v.AuxInt)
9087 sym1 := auxToSym(v.Aux)
9088 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9089 if v_0.Op != OpAMD64LEAQ {
9090 continue
9091 }
9092 off2 := auxIntToInt32(v_0.AuxInt)
9093 sym2 := auxToSym(v_0.Aux)
9094 x := v_0.Args[0]
9095 y := v_1
9096 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9097 continue
9098 }
9099 v.reset(OpAMD64LEAQ1)
9100 v.AuxInt = int32ToAuxInt(off1 + off2)
9101 v.Aux = symToAux(mergeSym(sym1, sym2))
9102 v.AddArg2(x, y)
9103 return true
9104 }
9105 break
9106 }
9107
9108
9109
9110 for {
9111 off1 := auxIntToInt32(v.AuxInt)
9112 sym1 := auxToSym(v.Aux)
9113 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9114 x := v_0
9115 if v_1.Op != OpAMD64LEAQ1 {
9116 continue
9117 }
9118 off2 := auxIntToInt32(v_1.AuxInt)
9119 sym2 := auxToSym(v_1.Aux)
9120 y := v_1.Args[1]
9121 if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9122 continue
9123 }
9124 v.reset(OpAMD64LEAQ2)
9125 v.AuxInt = int32ToAuxInt(off1 + off2)
9126 v.Aux = symToAux(mergeSym(sym1, sym2))
9127 v.AddArg2(x, y)
9128 return true
9129 }
9130 break
9131 }
9132
9133
9134
9135 for {
9136 off1 := auxIntToInt32(v.AuxInt)
9137 sym1 := auxToSym(v.Aux)
9138 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9139 x := v_0
9140 if v_1.Op != OpAMD64LEAQ1 {
9141 continue
9142 }
9143 off2 := auxIntToInt32(v_1.AuxInt)
9144 sym2 := auxToSym(v_1.Aux)
9145 _ = v_1.Args[1]
9146 v_1_0 := v_1.Args[0]
9147 v_1_1 := v_1.Args[1]
9148 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
9149 if x != v_1_0 {
9150 continue
9151 }
9152 y := v_1_1
9153 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9154 continue
9155 }
9156 v.reset(OpAMD64LEAQ2)
9157 v.AuxInt = int32ToAuxInt(off1 + off2)
9158 v.Aux = symToAux(mergeSym(sym1, sym2))
9159 v.AddArg2(y, x)
9160 return true
9161 }
9162 }
9163 break
9164 }
9165
9166
9167
9168 for {
9169 if auxIntToInt32(v.AuxInt) != 0 {
9170 break
9171 }
9172 x := v_0
9173 y := v_1
9174 if !(v.Aux == nil) {
9175 break
9176 }
9177 v.reset(OpAMD64ADDQ)
9178 v.AddArg2(x, y)
9179 return true
9180 }
9181 return false
9182 }
9183 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
9184 v_1 := v.Args[1]
9185 v_0 := v.Args[0]
9186
9187
9188
9189 for {
9190 c := auxIntToInt32(v.AuxInt)
9191 s := auxToSym(v.Aux)
9192 if v_0.Op != OpAMD64ADDQconst {
9193 break
9194 }
9195 d := auxIntToInt32(v_0.AuxInt)
9196 x := v_0.Args[0]
9197 y := v_1
9198 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9199 break
9200 }
9201 v.reset(OpAMD64LEAQ2)
9202 v.AuxInt = int32ToAuxInt(c + d)
9203 v.Aux = symToAux(s)
9204 v.AddArg2(x, y)
9205 return true
9206 }
9207
9208
9209
9210 for {
9211 c := auxIntToInt32(v.AuxInt)
9212 s := auxToSym(v.Aux)
9213 x := v_0
9214 if v_1.Op != OpAMD64ADDQconst {
9215 break
9216 }
9217 d := auxIntToInt32(v_1.AuxInt)
9218 y := v_1.Args[0]
9219 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
9220 break
9221 }
9222 v.reset(OpAMD64LEAQ2)
9223 v.AuxInt = int32ToAuxInt(c + 2*d)
9224 v.Aux = symToAux(s)
9225 v.AddArg2(x, y)
9226 return true
9227 }
9228
9229
9230 for {
9231 c := auxIntToInt32(v.AuxInt)
9232 s := auxToSym(v.Aux)
9233 x := v_0
9234 if v_1.Op != OpAMD64ADDQ {
9235 break
9236 }
9237 y := v_1.Args[1]
9238 if y != v_1.Args[0] {
9239 break
9240 }
9241 v.reset(OpAMD64LEAQ4)
9242 v.AuxInt = int32ToAuxInt(c)
9243 v.Aux = symToAux(s)
9244 v.AddArg2(x, y)
9245 return true
9246 }
9247
9248
9249 for {
9250 c := auxIntToInt32(v.AuxInt)
9251 s := auxToSym(v.Aux)
9252 x := v_0
9253 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9254 break
9255 }
9256 y := v_1.Args[0]
9257 v.reset(OpAMD64LEAQ8)
9258 v.AuxInt = int32ToAuxInt(c)
9259 v.Aux = symToAux(s)
9260 v.AddArg2(x, y)
9261 return true
9262 }
9263
9264
9265
9266 for {
9267 if auxIntToInt32(v.AuxInt) != 0 {
9268 break
9269 }
9270 s := auxToSym(v.Aux)
9271 if v_0.Op != OpAMD64ADDQ {
9272 break
9273 }
9274 x := v_0.Args[1]
9275 if x != v_0.Args[0] || x != v_1 || !(s == nil) {
9276 break
9277 }
9278 v.reset(OpAMD64SHLQconst)
9279 v.AuxInt = int8ToAuxInt(2)
9280 v.AddArg(x)
9281 return true
9282 }
9283
9284
9285
9286 for {
9287 off1 := auxIntToInt32(v.AuxInt)
9288 sym1 := auxToSym(v.Aux)
9289 if v_0.Op != OpAMD64LEAQ {
9290 break
9291 }
9292 off2 := auxIntToInt32(v_0.AuxInt)
9293 sym2 := auxToSym(v_0.Aux)
9294 x := v_0.Args[0]
9295 y := v_1
9296 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9297 break
9298 }
9299 v.reset(OpAMD64LEAQ2)
9300 v.AuxInt = int32ToAuxInt(off1 + off2)
9301 v.Aux = symToAux(mergeSym(sym1, sym2))
9302 v.AddArg2(x, y)
9303 return true
9304 }
9305
9306
9307
9308 for {
9309 off1 := auxIntToInt32(v.AuxInt)
9310 sym1 := auxToSym(v.Aux)
9311 x := v_0
9312 if v_1.Op != OpAMD64LEAQ1 {
9313 break
9314 }
9315 off2 := auxIntToInt32(v_1.AuxInt)
9316 sym2 := auxToSym(v_1.Aux)
9317 y := v_1.Args[1]
9318 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
9319 break
9320 }
9321 v.reset(OpAMD64LEAQ4)
9322 v.AuxInt = int32ToAuxInt(off1 + 2*off2)
9323 v.Aux = symToAux(sym1)
9324 v.AddArg2(x, y)
9325 return true
9326 }
9327
9328
9329
9330 for {
9331 off := auxIntToInt32(v.AuxInt)
9332 sym := auxToSym(v.Aux)
9333 x := v_0
9334 if v_1.Op != OpAMD64MOVQconst {
9335 break
9336 }
9337 scale := auxIntToInt64(v_1.AuxInt)
9338 if !(is32Bit(int64(off) + int64(scale)*2)) {
9339 break
9340 }
9341 v.reset(OpAMD64LEAQ)
9342 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9343 v.Aux = symToAux(sym)
9344 v.AddArg(x)
9345 return true
9346 }
9347
9348
9349
9350 for {
9351 off := auxIntToInt32(v.AuxInt)
9352 sym := auxToSym(v.Aux)
9353 x := v_0
9354 if v_1.Op != OpAMD64MOVLconst {
9355 break
9356 }
9357 scale := auxIntToInt32(v_1.AuxInt)
9358 if !(is32Bit(int64(off) + int64(scale)*2)) {
9359 break
9360 }
9361 v.reset(OpAMD64LEAQ)
9362 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9363 v.Aux = symToAux(sym)
9364 v.AddArg(x)
9365 return true
9366 }
9367 return false
9368 }
9369 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9370 v_1 := v.Args[1]
9371 v_0 := v.Args[0]
9372
9373
9374
9375 for {
9376 c := auxIntToInt32(v.AuxInt)
9377 s := auxToSym(v.Aux)
9378 if v_0.Op != OpAMD64ADDQconst {
9379 break
9380 }
9381 d := auxIntToInt32(v_0.AuxInt)
9382 x := v_0.Args[0]
9383 y := v_1
9384 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9385 break
9386 }
9387 v.reset(OpAMD64LEAQ4)
9388 v.AuxInt = int32ToAuxInt(c + d)
9389 v.Aux = symToAux(s)
9390 v.AddArg2(x, y)
9391 return true
9392 }
9393
9394
9395
9396 for {
9397 c := auxIntToInt32(v.AuxInt)
9398 s := auxToSym(v.Aux)
9399 x := v_0
9400 if v_1.Op != OpAMD64ADDQconst {
9401 break
9402 }
9403 d := auxIntToInt32(v_1.AuxInt)
9404 y := v_1.Args[0]
9405 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9406 break
9407 }
9408 v.reset(OpAMD64LEAQ4)
9409 v.AuxInt = int32ToAuxInt(c + 4*d)
9410 v.Aux = symToAux(s)
9411 v.AddArg2(x, y)
9412 return true
9413 }
9414
9415
9416 for {
9417 c := auxIntToInt32(v.AuxInt)
9418 s := auxToSym(v.Aux)
9419 x := v_0
9420 if v_1.Op != OpAMD64ADDQ {
9421 break
9422 }
9423 y := v_1.Args[1]
9424 if y != v_1.Args[0] {
9425 break
9426 }
9427 v.reset(OpAMD64LEAQ8)
9428 v.AuxInt = int32ToAuxInt(c)
9429 v.Aux = symToAux(s)
9430 v.AddArg2(x, y)
9431 return true
9432 }
9433
9434
9435
9436 for {
9437 off1 := auxIntToInt32(v.AuxInt)
9438 sym1 := auxToSym(v.Aux)
9439 if v_0.Op != OpAMD64LEAQ {
9440 break
9441 }
9442 off2 := auxIntToInt32(v_0.AuxInt)
9443 sym2 := auxToSym(v_0.Aux)
9444 x := v_0.Args[0]
9445 y := v_1
9446 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9447 break
9448 }
9449 v.reset(OpAMD64LEAQ4)
9450 v.AuxInt = int32ToAuxInt(off1 + off2)
9451 v.Aux = symToAux(mergeSym(sym1, sym2))
9452 v.AddArg2(x, y)
9453 return true
9454 }
9455
9456
9457
9458 for {
9459 off1 := auxIntToInt32(v.AuxInt)
9460 sym1 := auxToSym(v.Aux)
9461 x := v_0
9462 if v_1.Op != OpAMD64LEAQ1 {
9463 break
9464 }
9465 off2 := auxIntToInt32(v_1.AuxInt)
9466 sym2 := auxToSym(v_1.Aux)
9467 y := v_1.Args[1]
9468 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
9469 break
9470 }
9471 v.reset(OpAMD64LEAQ8)
9472 v.AuxInt = int32ToAuxInt(off1 + 4*off2)
9473 v.Aux = symToAux(sym1)
9474 v.AddArg2(x, y)
9475 return true
9476 }
9477
9478
9479
9480 for {
9481 off := auxIntToInt32(v.AuxInt)
9482 sym := auxToSym(v.Aux)
9483 x := v_0
9484 if v_1.Op != OpAMD64MOVQconst {
9485 break
9486 }
9487 scale := auxIntToInt64(v_1.AuxInt)
9488 if !(is32Bit(int64(off) + int64(scale)*4)) {
9489 break
9490 }
9491 v.reset(OpAMD64LEAQ)
9492 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9493 v.Aux = symToAux(sym)
9494 v.AddArg(x)
9495 return true
9496 }
9497
9498
9499
9500 for {
9501 off := auxIntToInt32(v.AuxInt)
9502 sym := auxToSym(v.Aux)
9503 x := v_0
9504 if v_1.Op != OpAMD64MOVLconst {
9505 break
9506 }
9507 scale := auxIntToInt32(v_1.AuxInt)
9508 if !(is32Bit(int64(off) + int64(scale)*4)) {
9509 break
9510 }
9511 v.reset(OpAMD64LEAQ)
9512 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9513 v.Aux = symToAux(sym)
9514 v.AddArg(x)
9515 return true
9516 }
9517 return false
9518 }
9519 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9520 v_1 := v.Args[1]
9521 v_0 := v.Args[0]
9522
9523
9524
9525 for {
9526 c := auxIntToInt32(v.AuxInt)
9527 s := auxToSym(v.Aux)
9528 if v_0.Op != OpAMD64ADDQconst {
9529 break
9530 }
9531 d := auxIntToInt32(v_0.AuxInt)
9532 x := v_0.Args[0]
9533 y := v_1
9534 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9535 break
9536 }
9537 v.reset(OpAMD64LEAQ8)
9538 v.AuxInt = int32ToAuxInt(c + d)
9539 v.Aux = symToAux(s)
9540 v.AddArg2(x, y)
9541 return true
9542 }
9543
9544
9545
9546 for {
9547 c := auxIntToInt32(v.AuxInt)
9548 s := auxToSym(v.Aux)
9549 x := v_0
9550 if v_1.Op != OpAMD64ADDQconst {
9551 break
9552 }
9553 d := auxIntToInt32(v_1.AuxInt)
9554 y := v_1.Args[0]
9555 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9556 break
9557 }
9558 v.reset(OpAMD64LEAQ8)
9559 v.AuxInt = int32ToAuxInt(c + 8*d)
9560 v.Aux = symToAux(s)
9561 v.AddArg2(x, y)
9562 return true
9563 }
9564
9565
9566
9567 for {
9568 off1 := auxIntToInt32(v.AuxInt)
9569 sym1 := auxToSym(v.Aux)
9570 if v_0.Op != OpAMD64LEAQ {
9571 break
9572 }
9573 off2 := auxIntToInt32(v_0.AuxInt)
9574 sym2 := auxToSym(v_0.Aux)
9575 x := v_0.Args[0]
9576 y := v_1
9577 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9578 break
9579 }
9580 v.reset(OpAMD64LEAQ8)
9581 v.AuxInt = int32ToAuxInt(off1 + off2)
9582 v.Aux = symToAux(mergeSym(sym1, sym2))
9583 v.AddArg2(x, y)
9584 return true
9585 }
9586
9587
9588
9589 for {
9590 off := auxIntToInt32(v.AuxInt)
9591 sym := auxToSym(v.Aux)
9592 x := v_0
9593 if v_1.Op != OpAMD64MOVQconst {
9594 break
9595 }
9596 scale := auxIntToInt64(v_1.AuxInt)
9597 if !(is32Bit(int64(off) + int64(scale)*8)) {
9598 break
9599 }
9600 v.reset(OpAMD64LEAQ)
9601 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9602 v.Aux = symToAux(sym)
9603 v.AddArg(x)
9604 return true
9605 }
9606
9607
9608
9609 for {
9610 off := auxIntToInt32(v.AuxInt)
9611 sym := auxToSym(v.Aux)
9612 x := v_0
9613 if v_1.Op != OpAMD64MOVLconst {
9614 break
9615 }
9616 scale := auxIntToInt32(v_1.AuxInt)
9617 if !(is32Bit(int64(off) + int64(scale)*8)) {
9618 break
9619 }
9620 v.reset(OpAMD64LEAQ)
9621 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9622 v.Aux = symToAux(sym)
9623 v.AddArg(x)
9624 return true
9625 }
9626 return false
9627 }
9628 func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
9629 v_2 := v.Args[2]
9630 v_1 := v.Args[1]
9631 v_0 := v.Args[0]
9632
9633
9634
9635 for {
9636 i := auxIntToInt32(v.AuxInt)
9637 s := auxToSym(v.Aux)
9638 p := v_0
9639 x := v_1
9640 if x.Op != OpAMD64BSWAPL {
9641 break
9642 }
9643 w := x.Args[0]
9644 mem := v_2
9645 if !(x.Uses == 1) {
9646 break
9647 }
9648 v.reset(OpAMD64MOVLstore)
9649 v.AuxInt = int32ToAuxInt(i)
9650 v.Aux = symToAux(s)
9651 v.AddArg3(p, w, mem)
9652 return true
9653 }
9654 return false
9655 }
9656 func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool {
9657 v_2 := v.Args[2]
9658 v_1 := v.Args[1]
9659 v_0 := v.Args[0]
9660
9661
9662
9663 for {
9664 i := auxIntToInt32(v.AuxInt)
9665 s := auxToSym(v.Aux)
9666 p := v_0
9667 x := v_1
9668 if x.Op != OpAMD64BSWAPQ {
9669 break
9670 }
9671 w := x.Args[0]
9672 mem := v_2
9673 if !(x.Uses == 1) {
9674 break
9675 }
9676 v.reset(OpAMD64MOVQstore)
9677 v.AuxInt = int32ToAuxInt(i)
9678 v.Aux = symToAux(s)
9679 v.AddArg3(p, w, mem)
9680 return true
9681 }
9682 return false
9683 }
9684 func rewriteValueAMD64_OpAMD64MOVBEWstore(v *Value) bool {
9685 v_2 := v.Args[2]
9686 v_1 := v.Args[1]
9687 v_0 := v.Args[0]
9688
9689
9690
9691 for {
9692 i := auxIntToInt32(v.AuxInt)
9693 s := auxToSym(v.Aux)
9694 p := v_0
9695 x := v_1
9696 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
9697 break
9698 }
9699 w := x.Args[0]
9700 mem := v_2
9701 if !(x.Uses == 1) {
9702 break
9703 }
9704 v.reset(OpAMD64MOVWstore)
9705 v.AuxInt = int32ToAuxInt(i)
9706 v.Aux = symToAux(s)
9707 v.AddArg3(p, w, mem)
9708 return true
9709 }
9710 return false
9711 }
9712 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9713 v_0 := v.Args[0]
9714 b := v.Block
9715
9716
9717
9718 for {
9719 x := v_0
9720 if x.Op != OpAMD64MOVBload {
9721 break
9722 }
9723 off := auxIntToInt32(x.AuxInt)
9724 sym := auxToSym(x.Aux)
9725 mem := x.Args[1]
9726 ptr := x.Args[0]
9727 if !(x.Uses == 1 && clobber(x)) {
9728 break
9729 }
9730 b = x.Block
9731 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9732 v.copyOf(v0)
9733 v0.AuxInt = int32ToAuxInt(off)
9734 v0.Aux = symToAux(sym)
9735 v0.AddArg2(ptr, mem)
9736 return true
9737 }
9738
9739
9740
9741 for {
9742 x := v_0
9743 if x.Op != OpAMD64MOVWload {
9744 break
9745 }
9746 off := auxIntToInt32(x.AuxInt)
9747 sym := auxToSym(x.Aux)
9748 mem := x.Args[1]
9749 ptr := x.Args[0]
9750 if !(x.Uses == 1 && clobber(x)) {
9751 break
9752 }
9753 b = x.Block
9754 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9755 v.copyOf(v0)
9756 v0.AuxInt = int32ToAuxInt(off)
9757 v0.Aux = symToAux(sym)
9758 v0.AddArg2(ptr, mem)
9759 return true
9760 }
9761
9762
9763
9764 for {
9765 x := v_0
9766 if x.Op != OpAMD64MOVLload {
9767 break
9768 }
9769 off := auxIntToInt32(x.AuxInt)
9770 sym := auxToSym(x.Aux)
9771 mem := x.Args[1]
9772 ptr := x.Args[0]
9773 if !(x.Uses == 1 && clobber(x)) {
9774 break
9775 }
9776 b = x.Block
9777 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9778 v.copyOf(v0)
9779 v0.AuxInt = int32ToAuxInt(off)
9780 v0.Aux = symToAux(sym)
9781 v0.AddArg2(ptr, mem)
9782 return true
9783 }
9784
9785
9786
9787 for {
9788 x := v_0
9789 if x.Op != OpAMD64MOVQload {
9790 break
9791 }
9792 off := auxIntToInt32(x.AuxInt)
9793 sym := auxToSym(x.Aux)
9794 mem := x.Args[1]
9795 ptr := x.Args[0]
9796 if !(x.Uses == 1 && clobber(x)) {
9797 break
9798 }
9799 b = x.Block
9800 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9801 v.copyOf(v0)
9802 v0.AuxInt = int32ToAuxInt(off)
9803 v0.Aux = symToAux(sym)
9804 v0.AddArg2(ptr, mem)
9805 return true
9806 }
9807
9808
9809
9810 for {
9811 if v_0.Op != OpAMD64ANDLconst {
9812 break
9813 }
9814 c := auxIntToInt32(v_0.AuxInt)
9815 x := v_0.Args[0]
9816 if !(c&0x80 == 0) {
9817 break
9818 }
9819 v.reset(OpAMD64ANDLconst)
9820 v.AuxInt = int32ToAuxInt(c & 0x7f)
9821 v.AddArg(x)
9822 return true
9823 }
9824
9825
9826 for {
9827 if v_0.Op != OpAMD64MOVBQSX {
9828 break
9829 }
9830 x := v_0.Args[0]
9831 v.reset(OpAMD64MOVBQSX)
9832 v.AddArg(x)
9833 return true
9834 }
9835 return false
9836 }
9837 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
9838 v_1 := v.Args[1]
9839 v_0 := v.Args[0]
9840
9841
9842
9843 for {
9844 off := auxIntToInt32(v.AuxInt)
9845 sym := auxToSym(v.Aux)
9846 ptr := v_0
9847 if v_1.Op != OpAMD64MOVBstore {
9848 break
9849 }
9850 off2 := auxIntToInt32(v_1.AuxInt)
9851 sym2 := auxToSym(v_1.Aux)
9852 x := v_1.Args[1]
9853 ptr2 := v_1.Args[0]
9854 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9855 break
9856 }
9857 v.reset(OpAMD64MOVBQSX)
9858 v.AddArg(x)
9859 return true
9860 }
9861
9862
9863
9864 for {
9865 off1 := auxIntToInt32(v.AuxInt)
9866 sym1 := auxToSym(v.Aux)
9867 if v_0.Op != OpAMD64LEAQ {
9868 break
9869 }
9870 off2 := auxIntToInt32(v_0.AuxInt)
9871 sym2 := auxToSym(v_0.Aux)
9872 base := v_0.Args[0]
9873 mem := v_1
9874 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9875 break
9876 }
9877 v.reset(OpAMD64MOVBQSXload)
9878 v.AuxInt = int32ToAuxInt(off1 + off2)
9879 v.Aux = symToAux(mergeSym(sym1, sym2))
9880 v.AddArg2(base, mem)
9881 return true
9882 }
9883
9884
9885
9886 for {
9887 off := auxIntToInt32(v.AuxInt)
9888 sym := auxToSym(v.Aux)
9889 if v_0.Op != OpSB || !(symIsRO(sym)) {
9890 break
9891 }
9892 v.reset(OpAMD64MOVQconst)
9893 v.AuxInt = int64ToAuxInt(int64(int8(read8(sym, int64(off)))))
9894 return true
9895 }
9896 return false
9897 }
9898 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
9899 v_0 := v.Args[0]
9900 b := v.Block
9901
9902
9903
9904 for {
9905 x := v_0
9906 if x.Op != OpAMD64MOVBload {
9907 break
9908 }
9909 off := auxIntToInt32(x.AuxInt)
9910 sym := auxToSym(x.Aux)
9911 mem := x.Args[1]
9912 ptr := x.Args[0]
9913 if !(x.Uses == 1 && clobber(x)) {
9914 break
9915 }
9916 b = x.Block
9917 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9918 v.copyOf(v0)
9919 v0.AuxInt = int32ToAuxInt(off)
9920 v0.Aux = symToAux(sym)
9921 v0.AddArg2(ptr, mem)
9922 return true
9923 }
9924
9925
9926
9927 for {
9928 x := v_0
9929 if x.Op != OpAMD64MOVWload {
9930 break
9931 }
9932 off := auxIntToInt32(x.AuxInt)
9933 sym := auxToSym(x.Aux)
9934 mem := x.Args[1]
9935 ptr := x.Args[0]
9936 if !(x.Uses == 1 && clobber(x)) {
9937 break
9938 }
9939 b = x.Block
9940 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9941 v.copyOf(v0)
9942 v0.AuxInt = int32ToAuxInt(off)
9943 v0.Aux = symToAux(sym)
9944 v0.AddArg2(ptr, mem)
9945 return true
9946 }
9947
9948
9949
9950 for {
9951 x := v_0
9952 if x.Op != OpAMD64MOVLload {
9953 break
9954 }
9955 off := auxIntToInt32(x.AuxInt)
9956 sym := auxToSym(x.Aux)
9957 mem := x.Args[1]
9958 ptr := x.Args[0]
9959 if !(x.Uses == 1 && clobber(x)) {
9960 break
9961 }
9962 b = x.Block
9963 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9964 v.copyOf(v0)
9965 v0.AuxInt = int32ToAuxInt(off)
9966 v0.Aux = symToAux(sym)
9967 v0.AddArg2(ptr, mem)
9968 return true
9969 }
9970
9971
9972
9973 for {
9974 x := v_0
9975 if x.Op != OpAMD64MOVQload {
9976 break
9977 }
9978 off := auxIntToInt32(x.AuxInt)
9979 sym := auxToSym(x.Aux)
9980 mem := x.Args[1]
9981 ptr := x.Args[0]
9982 if !(x.Uses == 1 && clobber(x)) {
9983 break
9984 }
9985 b = x.Block
9986 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9987 v.copyOf(v0)
9988 v0.AuxInt = int32ToAuxInt(off)
9989 v0.Aux = symToAux(sym)
9990 v0.AddArg2(ptr, mem)
9991 return true
9992 }
9993
9994
9995 for {
9996 if v_0.Op != OpAMD64ANDLconst {
9997 break
9998 }
9999 c := auxIntToInt32(v_0.AuxInt)
10000 x := v_0.Args[0]
10001 v.reset(OpAMD64ANDLconst)
10002 v.AuxInt = int32ToAuxInt(c & 0xff)
10003 v.AddArg(x)
10004 return true
10005 }
10006
10007
10008 for {
10009 if v_0.Op != OpAMD64MOVBQZX {
10010 break
10011 }
10012 x := v_0.Args[0]
10013 v.reset(OpAMD64MOVBQZX)
10014 v.AddArg(x)
10015 return true
10016 }
10017 return false
10018 }
10019 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
10020 v_1 := v.Args[1]
10021 v_0 := v.Args[0]
10022
10023
10024
10025 for {
10026 off1 := auxIntToInt32(v.AuxInt)
10027 sym := auxToSym(v.Aux)
10028 if v_0.Op != OpAMD64ADDQconst {
10029 break
10030 }
10031 off2 := auxIntToInt32(v_0.AuxInt)
10032 ptr := v_0.Args[0]
10033 mem := v_1
10034 if !(is32Bit(int64(off1) + int64(off2))) {
10035 break
10036 }
10037 v.reset(OpAMD64MOVBatomicload)
10038 v.AuxInt = int32ToAuxInt(off1 + off2)
10039 v.Aux = symToAux(sym)
10040 v.AddArg2(ptr, mem)
10041 return true
10042 }
10043
10044
10045
10046 for {
10047 off1 := auxIntToInt32(v.AuxInt)
10048 sym1 := auxToSym(v.Aux)
10049 if v_0.Op != OpAMD64LEAQ {
10050 break
10051 }
10052 off2 := auxIntToInt32(v_0.AuxInt)
10053 sym2 := auxToSym(v_0.Aux)
10054 ptr := v_0.Args[0]
10055 mem := v_1
10056 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10057 break
10058 }
10059 v.reset(OpAMD64MOVBatomicload)
10060 v.AuxInt = int32ToAuxInt(off1 + off2)
10061 v.Aux = symToAux(mergeSym(sym1, sym2))
10062 v.AddArg2(ptr, mem)
10063 return true
10064 }
10065 return false
10066 }
10067 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
10068 v_1 := v.Args[1]
10069 v_0 := v.Args[0]
10070
10071
10072
10073 for {
10074 off := auxIntToInt32(v.AuxInt)
10075 sym := auxToSym(v.Aux)
10076 ptr := v_0
10077 if v_1.Op != OpAMD64MOVBstore {
10078 break
10079 }
10080 off2 := auxIntToInt32(v_1.AuxInt)
10081 sym2 := auxToSym(v_1.Aux)
10082 x := v_1.Args[1]
10083 ptr2 := v_1.Args[0]
10084 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10085 break
10086 }
10087 v.reset(OpAMD64MOVBQZX)
10088 v.AddArg(x)
10089 return true
10090 }
10091
10092
10093
10094 for {
10095 off1 := auxIntToInt32(v.AuxInt)
10096 sym := auxToSym(v.Aux)
10097 if v_0.Op != OpAMD64ADDQconst {
10098 break
10099 }
10100 off2 := auxIntToInt32(v_0.AuxInt)
10101 ptr := v_0.Args[0]
10102 mem := v_1
10103 if !(is32Bit(int64(off1) + int64(off2))) {
10104 break
10105 }
10106 v.reset(OpAMD64MOVBload)
10107 v.AuxInt = int32ToAuxInt(off1 + off2)
10108 v.Aux = symToAux(sym)
10109 v.AddArg2(ptr, mem)
10110 return true
10111 }
10112
10113
10114
10115 for {
10116 off1 := auxIntToInt32(v.AuxInt)
10117 sym1 := auxToSym(v.Aux)
10118 if v_0.Op != OpAMD64LEAQ {
10119 break
10120 }
10121 off2 := auxIntToInt32(v_0.AuxInt)
10122 sym2 := auxToSym(v_0.Aux)
10123 base := v_0.Args[0]
10124 mem := v_1
10125 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10126 break
10127 }
10128 v.reset(OpAMD64MOVBload)
10129 v.AuxInt = int32ToAuxInt(off1 + off2)
10130 v.Aux = symToAux(mergeSym(sym1, sym2))
10131 v.AddArg2(base, mem)
10132 return true
10133 }
10134
10135
10136
10137 for {
10138 off := auxIntToInt32(v.AuxInt)
10139 sym := auxToSym(v.Aux)
10140 if v_0.Op != OpSB || !(symIsRO(sym)) {
10141 break
10142 }
10143 v.reset(OpAMD64MOVLconst)
10144 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
10145 return true
10146 }
10147 return false
10148 }
10149 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
10150 v_2 := v.Args[2]
10151 v_1 := v.Args[1]
10152 v_0 := v.Args[0]
10153
10154
10155
10156 for {
10157 off := auxIntToInt32(v.AuxInt)
10158 sym := auxToSym(v.Aux)
10159 ptr := v_0
10160 y := v_1
10161 if y.Op != OpAMD64SETL {
10162 break
10163 }
10164 x := y.Args[0]
10165 mem := v_2
10166 if !(y.Uses == 1) {
10167 break
10168 }
10169 v.reset(OpAMD64SETLstore)
10170 v.AuxInt = int32ToAuxInt(off)
10171 v.Aux = symToAux(sym)
10172 v.AddArg3(ptr, x, mem)
10173 return true
10174 }
10175
10176
10177
10178 for {
10179 off := auxIntToInt32(v.AuxInt)
10180 sym := auxToSym(v.Aux)
10181 ptr := v_0
10182 y := v_1
10183 if y.Op != OpAMD64SETLE {
10184 break
10185 }
10186 x := y.Args[0]
10187 mem := v_2
10188 if !(y.Uses == 1) {
10189 break
10190 }
10191 v.reset(OpAMD64SETLEstore)
10192 v.AuxInt = int32ToAuxInt(off)
10193 v.Aux = symToAux(sym)
10194 v.AddArg3(ptr, x, mem)
10195 return true
10196 }
10197
10198
10199
10200 for {
10201 off := auxIntToInt32(v.AuxInt)
10202 sym := auxToSym(v.Aux)
10203 ptr := v_0
10204 y := v_1
10205 if y.Op != OpAMD64SETG {
10206 break
10207 }
10208 x := y.Args[0]
10209 mem := v_2
10210 if !(y.Uses == 1) {
10211 break
10212 }
10213 v.reset(OpAMD64SETGstore)
10214 v.AuxInt = int32ToAuxInt(off)
10215 v.Aux = symToAux(sym)
10216 v.AddArg3(ptr, x, mem)
10217 return true
10218 }
10219
10220
10221
10222 for {
10223 off := auxIntToInt32(v.AuxInt)
10224 sym := auxToSym(v.Aux)
10225 ptr := v_0
10226 y := v_1
10227 if y.Op != OpAMD64SETGE {
10228 break
10229 }
10230 x := y.Args[0]
10231 mem := v_2
10232 if !(y.Uses == 1) {
10233 break
10234 }
10235 v.reset(OpAMD64SETGEstore)
10236 v.AuxInt = int32ToAuxInt(off)
10237 v.Aux = symToAux(sym)
10238 v.AddArg3(ptr, x, mem)
10239 return true
10240 }
10241
10242
10243
10244 for {
10245 off := auxIntToInt32(v.AuxInt)
10246 sym := auxToSym(v.Aux)
10247 ptr := v_0
10248 y := v_1
10249 if y.Op != OpAMD64SETEQ {
10250 break
10251 }
10252 x := y.Args[0]
10253 mem := v_2
10254 if !(y.Uses == 1) {
10255 break
10256 }
10257 v.reset(OpAMD64SETEQstore)
10258 v.AuxInt = int32ToAuxInt(off)
10259 v.Aux = symToAux(sym)
10260 v.AddArg3(ptr, x, mem)
10261 return true
10262 }
10263
10264
10265
10266 for {
10267 off := auxIntToInt32(v.AuxInt)
10268 sym := auxToSym(v.Aux)
10269 ptr := v_0
10270 y := v_1
10271 if y.Op != OpAMD64SETNE {
10272 break
10273 }
10274 x := y.Args[0]
10275 mem := v_2
10276 if !(y.Uses == 1) {
10277 break
10278 }
10279 v.reset(OpAMD64SETNEstore)
10280 v.AuxInt = int32ToAuxInt(off)
10281 v.Aux = symToAux(sym)
10282 v.AddArg3(ptr, x, mem)
10283 return true
10284 }
10285
10286
10287
10288 for {
10289 off := auxIntToInt32(v.AuxInt)
10290 sym := auxToSym(v.Aux)
10291 ptr := v_0
10292 y := v_1
10293 if y.Op != OpAMD64SETB {
10294 break
10295 }
10296 x := y.Args[0]
10297 mem := v_2
10298 if !(y.Uses == 1) {
10299 break
10300 }
10301 v.reset(OpAMD64SETBstore)
10302 v.AuxInt = int32ToAuxInt(off)
10303 v.Aux = symToAux(sym)
10304 v.AddArg3(ptr, x, mem)
10305 return true
10306 }
10307
10308
10309
10310 for {
10311 off := auxIntToInt32(v.AuxInt)
10312 sym := auxToSym(v.Aux)
10313 ptr := v_0
10314 y := v_1
10315 if y.Op != OpAMD64SETBE {
10316 break
10317 }
10318 x := y.Args[0]
10319 mem := v_2
10320 if !(y.Uses == 1) {
10321 break
10322 }
10323 v.reset(OpAMD64SETBEstore)
10324 v.AuxInt = int32ToAuxInt(off)
10325 v.Aux = symToAux(sym)
10326 v.AddArg3(ptr, x, mem)
10327 return true
10328 }
10329
10330
10331
10332 for {
10333 off := auxIntToInt32(v.AuxInt)
10334 sym := auxToSym(v.Aux)
10335 ptr := v_0
10336 y := v_1
10337 if y.Op != OpAMD64SETA {
10338 break
10339 }
10340 x := y.Args[0]
10341 mem := v_2
10342 if !(y.Uses == 1) {
10343 break
10344 }
10345 v.reset(OpAMD64SETAstore)
10346 v.AuxInt = int32ToAuxInt(off)
10347 v.Aux = symToAux(sym)
10348 v.AddArg3(ptr, x, mem)
10349 return true
10350 }
10351
10352
10353
10354 for {
10355 off := auxIntToInt32(v.AuxInt)
10356 sym := auxToSym(v.Aux)
10357 ptr := v_0
10358 y := v_1
10359 if y.Op != OpAMD64SETAE {
10360 break
10361 }
10362 x := y.Args[0]
10363 mem := v_2
10364 if !(y.Uses == 1) {
10365 break
10366 }
10367 v.reset(OpAMD64SETAEstore)
10368 v.AuxInt = int32ToAuxInt(off)
10369 v.Aux = symToAux(sym)
10370 v.AddArg3(ptr, x, mem)
10371 return true
10372 }
10373
10374
10375 for {
10376 off := auxIntToInt32(v.AuxInt)
10377 sym := auxToSym(v.Aux)
10378 ptr := v_0
10379 if v_1.Op != OpAMD64MOVBQSX {
10380 break
10381 }
10382 x := v_1.Args[0]
10383 mem := v_2
10384 v.reset(OpAMD64MOVBstore)
10385 v.AuxInt = int32ToAuxInt(off)
10386 v.Aux = symToAux(sym)
10387 v.AddArg3(ptr, x, mem)
10388 return true
10389 }
10390
10391
10392 for {
10393 off := auxIntToInt32(v.AuxInt)
10394 sym := auxToSym(v.Aux)
10395 ptr := v_0
10396 if v_1.Op != OpAMD64MOVBQZX {
10397 break
10398 }
10399 x := v_1.Args[0]
10400 mem := v_2
10401 v.reset(OpAMD64MOVBstore)
10402 v.AuxInt = int32ToAuxInt(off)
10403 v.Aux = symToAux(sym)
10404 v.AddArg3(ptr, x, mem)
10405 return true
10406 }
10407
10408
10409
10410 for {
10411 off1 := auxIntToInt32(v.AuxInt)
10412 sym := auxToSym(v.Aux)
10413 if v_0.Op != OpAMD64ADDQconst {
10414 break
10415 }
10416 off2 := auxIntToInt32(v_0.AuxInt)
10417 ptr := v_0.Args[0]
10418 val := v_1
10419 mem := v_2
10420 if !(is32Bit(int64(off1) + int64(off2))) {
10421 break
10422 }
10423 v.reset(OpAMD64MOVBstore)
10424 v.AuxInt = int32ToAuxInt(off1 + off2)
10425 v.Aux = symToAux(sym)
10426 v.AddArg3(ptr, val, mem)
10427 return true
10428 }
10429
10430
10431 for {
10432 off := auxIntToInt32(v.AuxInt)
10433 sym := auxToSym(v.Aux)
10434 ptr := v_0
10435 if v_1.Op != OpAMD64MOVLconst {
10436 break
10437 }
10438 c := auxIntToInt32(v_1.AuxInt)
10439 mem := v_2
10440 v.reset(OpAMD64MOVBstoreconst)
10441 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10442 v.Aux = symToAux(sym)
10443 v.AddArg2(ptr, mem)
10444 return true
10445 }
10446
10447
10448 for {
10449 off := auxIntToInt32(v.AuxInt)
10450 sym := auxToSym(v.Aux)
10451 ptr := v_0
10452 if v_1.Op != OpAMD64MOVQconst {
10453 break
10454 }
10455 c := auxIntToInt64(v_1.AuxInt)
10456 mem := v_2
10457 v.reset(OpAMD64MOVBstoreconst)
10458 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10459 v.Aux = symToAux(sym)
10460 v.AddArg2(ptr, mem)
10461 return true
10462 }
10463
10464
10465
10466 for {
10467 off1 := auxIntToInt32(v.AuxInt)
10468 sym1 := auxToSym(v.Aux)
10469 if v_0.Op != OpAMD64LEAQ {
10470 break
10471 }
10472 off2 := auxIntToInt32(v_0.AuxInt)
10473 sym2 := auxToSym(v_0.Aux)
10474 base := v_0.Args[0]
10475 val := v_1
10476 mem := v_2
10477 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10478 break
10479 }
10480 v.reset(OpAMD64MOVBstore)
10481 v.AuxInt = int32ToAuxInt(off1 + off2)
10482 v.Aux = symToAux(mergeSym(sym1, sym2))
10483 v.AddArg3(base, val, mem)
10484 return true
10485 }
10486 return false
10487 }
10488 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
10489 v_1 := v.Args[1]
10490 v_0 := v.Args[0]
10491
10492
10493
10494 for {
10495 sc := auxIntToValAndOff(v.AuxInt)
10496 s := auxToSym(v.Aux)
10497 if v_0.Op != OpAMD64ADDQconst {
10498 break
10499 }
10500 off := auxIntToInt32(v_0.AuxInt)
10501 ptr := v_0.Args[0]
10502 mem := v_1
10503 if !(ValAndOff(sc).canAdd32(off)) {
10504 break
10505 }
10506 v.reset(OpAMD64MOVBstoreconst)
10507 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10508 v.Aux = symToAux(s)
10509 v.AddArg2(ptr, mem)
10510 return true
10511 }
10512
10513
10514
10515 for {
10516 sc := auxIntToValAndOff(v.AuxInt)
10517 sym1 := auxToSym(v.Aux)
10518 if v_0.Op != OpAMD64LEAQ {
10519 break
10520 }
10521 off := auxIntToInt32(v_0.AuxInt)
10522 sym2 := auxToSym(v_0.Aux)
10523 ptr := v_0.Args[0]
10524 mem := v_1
10525 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
10526 break
10527 }
10528 v.reset(OpAMD64MOVBstoreconst)
10529 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10530 v.Aux = symToAux(mergeSym(sym1, sym2))
10531 v.AddArg2(ptr, mem)
10532 return true
10533 }
10534 return false
10535 }
10536 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
10537 v_0 := v.Args[0]
10538 b := v.Block
10539
10540
10541
10542 for {
10543 x := v_0
10544 if x.Op != OpAMD64MOVLload {
10545 break
10546 }
10547 off := auxIntToInt32(x.AuxInt)
10548 sym := auxToSym(x.Aux)
10549 mem := x.Args[1]
10550 ptr := x.Args[0]
10551 if !(x.Uses == 1 && clobber(x)) {
10552 break
10553 }
10554 b = x.Block
10555 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10556 v.copyOf(v0)
10557 v0.AuxInt = int32ToAuxInt(off)
10558 v0.Aux = symToAux(sym)
10559 v0.AddArg2(ptr, mem)
10560 return true
10561 }
10562
10563
10564
10565 for {
10566 x := v_0
10567 if x.Op != OpAMD64MOVQload {
10568 break
10569 }
10570 off := auxIntToInt32(x.AuxInt)
10571 sym := auxToSym(x.Aux)
10572 mem := x.Args[1]
10573 ptr := x.Args[0]
10574 if !(x.Uses == 1 && clobber(x)) {
10575 break
10576 }
10577 b = x.Block
10578 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10579 v.copyOf(v0)
10580 v0.AuxInt = int32ToAuxInt(off)
10581 v0.Aux = symToAux(sym)
10582 v0.AddArg2(ptr, mem)
10583 return true
10584 }
10585
10586
10587
10588 for {
10589 if v_0.Op != OpAMD64ANDLconst {
10590 break
10591 }
10592 c := auxIntToInt32(v_0.AuxInt)
10593 x := v_0.Args[0]
10594 if !(uint32(c)&0x80000000 == 0) {
10595 break
10596 }
10597 v.reset(OpAMD64ANDLconst)
10598 v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
10599 v.AddArg(x)
10600 return true
10601 }
10602
10603
10604 for {
10605 if v_0.Op != OpAMD64MOVLQSX {
10606 break
10607 }
10608 x := v_0.Args[0]
10609 v.reset(OpAMD64MOVLQSX)
10610 v.AddArg(x)
10611 return true
10612 }
10613
10614
10615 for {
10616 if v_0.Op != OpAMD64MOVWQSX {
10617 break
10618 }
10619 x := v_0.Args[0]
10620 v.reset(OpAMD64MOVWQSX)
10621 v.AddArg(x)
10622 return true
10623 }
10624
10625
10626 for {
10627 if v_0.Op != OpAMD64MOVBQSX {
10628 break
10629 }
10630 x := v_0.Args[0]
10631 v.reset(OpAMD64MOVBQSX)
10632 v.AddArg(x)
10633 return true
10634 }
10635 return false
10636 }
10637 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
10638 v_1 := v.Args[1]
10639 v_0 := v.Args[0]
10640 b := v.Block
10641 config := b.Func.Config
10642
10643
10644
10645 for {
10646 off := auxIntToInt32(v.AuxInt)
10647 sym := auxToSym(v.Aux)
10648 ptr := v_0
10649 if v_1.Op != OpAMD64MOVLstore {
10650 break
10651 }
10652 off2 := auxIntToInt32(v_1.AuxInt)
10653 sym2 := auxToSym(v_1.Aux)
10654 x := v_1.Args[1]
10655 ptr2 := v_1.Args[0]
10656 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10657 break
10658 }
10659 v.reset(OpAMD64MOVLQSX)
10660 v.AddArg(x)
10661 return true
10662 }
10663
10664
10665
10666 for {
10667 off1 := auxIntToInt32(v.AuxInt)
10668 sym1 := auxToSym(v.Aux)
10669 if v_0.Op != OpAMD64LEAQ {
10670 break
10671 }
10672 off2 := auxIntToInt32(v_0.AuxInt)
10673 sym2 := auxToSym(v_0.Aux)
10674 base := v_0.Args[0]
10675 mem := v_1
10676 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10677 break
10678 }
10679 v.reset(OpAMD64MOVLQSXload)
10680 v.AuxInt = int32ToAuxInt(off1 + off2)
10681 v.Aux = symToAux(mergeSym(sym1, sym2))
10682 v.AddArg2(base, mem)
10683 return true
10684 }
10685
10686
10687
10688 for {
10689 off := auxIntToInt32(v.AuxInt)
10690 sym := auxToSym(v.Aux)
10691 if v_0.Op != OpSB || !(symIsRO(sym)) {
10692 break
10693 }
10694 v.reset(OpAMD64MOVQconst)
10695 v.AuxInt = int64ToAuxInt(int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))))
10696 return true
10697 }
10698 return false
10699 }
10700 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
10701 v_0 := v.Args[0]
10702 b := v.Block
10703
10704
10705
10706 for {
10707 x := v_0
10708 if x.Op != OpAMD64MOVLload {
10709 break
10710 }
10711 off := auxIntToInt32(x.AuxInt)
10712 sym := auxToSym(x.Aux)
10713 mem := x.Args[1]
10714 ptr := x.Args[0]
10715 if !(x.Uses == 1 && clobber(x)) {
10716 break
10717 }
10718 b = x.Block
10719 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10720 v.copyOf(v0)
10721 v0.AuxInt = int32ToAuxInt(off)
10722 v0.Aux = symToAux(sym)
10723 v0.AddArg2(ptr, mem)
10724 return true
10725 }
10726
10727
10728
10729 for {
10730 x := v_0
10731 if x.Op != OpAMD64MOVQload {
10732 break
10733 }
10734 off := auxIntToInt32(x.AuxInt)
10735 sym := auxToSym(x.Aux)
10736 mem := x.Args[1]
10737 ptr := x.Args[0]
10738 if !(x.Uses == 1 && clobber(x)) {
10739 break
10740 }
10741 b = x.Block
10742 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10743 v.copyOf(v0)
10744 v0.AuxInt = int32ToAuxInt(off)
10745 v0.Aux = symToAux(sym)
10746 v0.AddArg2(ptr, mem)
10747 return true
10748 }
10749
10750
10751 for {
10752 if v_0.Op != OpAMD64ANDLconst {
10753 break
10754 }
10755 c := auxIntToInt32(v_0.AuxInt)
10756 x := v_0.Args[0]
10757 v.reset(OpAMD64ANDLconst)
10758 v.AuxInt = int32ToAuxInt(c)
10759 v.AddArg(x)
10760 return true
10761 }
10762
10763
10764 for {
10765 if v_0.Op != OpAMD64MOVLQZX {
10766 break
10767 }
10768 x := v_0.Args[0]
10769 v.reset(OpAMD64MOVLQZX)
10770 v.AddArg(x)
10771 return true
10772 }
10773
10774
10775 for {
10776 if v_0.Op != OpAMD64MOVWQZX {
10777 break
10778 }
10779 x := v_0.Args[0]
10780 v.reset(OpAMD64MOVWQZX)
10781 v.AddArg(x)
10782 return true
10783 }
10784
10785
10786 for {
10787 if v_0.Op != OpAMD64MOVBQZX {
10788 break
10789 }
10790 x := v_0.Args[0]
10791 v.reset(OpAMD64MOVBQZX)
10792 v.AddArg(x)
10793 return true
10794 }
10795 return false
10796 }
10797 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
10798 v_1 := v.Args[1]
10799 v_0 := v.Args[0]
10800
10801
10802
10803 for {
10804 off1 := auxIntToInt32(v.AuxInt)
10805 sym := auxToSym(v.Aux)
10806 if v_0.Op != OpAMD64ADDQconst {
10807 break
10808 }
10809 off2 := auxIntToInt32(v_0.AuxInt)
10810 ptr := v_0.Args[0]
10811 mem := v_1
10812 if !(is32Bit(int64(off1) + int64(off2))) {
10813 break
10814 }
10815 v.reset(OpAMD64MOVLatomicload)
10816 v.AuxInt = int32ToAuxInt(off1 + off2)
10817 v.Aux = symToAux(sym)
10818 v.AddArg2(ptr, mem)
10819 return true
10820 }
10821
10822
10823
10824 for {
10825 off1 := auxIntToInt32(v.AuxInt)
10826 sym1 := auxToSym(v.Aux)
10827 if v_0.Op != OpAMD64LEAQ {
10828 break
10829 }
10830 off2 := auxIntToInt32(v_0.AuxInt)
10831 sym2 := auxToSym(v_0.Aux)
10832 ptr := v_0.Args[0]
10833 mem := v_1
10834 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10835 break
10836 }
10837 v.reset(OpAMD64MOVLatomicload)
10838 v.AuxInt = int32ToAuxInt(off1 + off2)
10839 v.Aux = symToAux(mergeSym(sym1, sym2))
10840 v.AddArg2(ptr, mem)
10841 return true
10842 }
10843 return false
10844 }
10845 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
10846 v_0 := v.Args[0]
10847 b := v.Block
10848
10849
10850
10851 for {
10852 t := v.Type
10853 if v_0.Op != OpArg {
10854 break
10855 }
10856 u := v_0.Type
10857 off := auxIntToInt32(v_0.AuxInt)
10858 sym := auxToSym(v_0.Aux)
10859 if !(t.Size() == u.Size()) {
10860 break
10861 }
10862 b = b.Func.Entry
10863 v0 := b.NewValue0(v.Pos, OpArg, t)
10864 v.copyOf(v0)
10865 v0.AuxInt = int32ToAuxInt(off)
10866 v0.Aux = symToAux(sym)
10867 return true
10868 }
10869 return false
10870 }
10871 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
10872 v_0 := v.Args[0]
10873 b := v.Block
10874
10875
10876
10877 for {
10878 t := v.Type
10879 if v_0.Op != OpArg {
10880 break
10881 }
10882 u := v_0.Type
10883 off := auxIntToInt32(v_0.AuxInt)
10884 sym := auxToSym(v_0.Aux)
10885 if !(t.Size() == u.Size()) {
10886 break
10887 }
10888 b = b.Func.Entry
10889 v0 := b.NewValue0(v.Pos, OpArg, t)
10890 v.copyOf(v0)
10891 v0.AuxInt = int32ToAuxInt(off)
10892 v0.Aux = symToAux(sym)
10893 return true
10894 }
10895 return false
10896 }
10897 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
10898 v_1 := v.Args[1]
10899 v_0 := v.Args[0]
10900 b := v.Block
10901 config := b.Func.Config
10902
10903
10904
10905 for {
10906 off := auxIntToInt32(v.AuxInt)
10907 sym := auxToSym(v.Aux)
10908 ptr := v_0
10909 if v_1.Op != OpAMD64MOVLstore {
10910 break
10911 }
10912 off2 := auxIntToInt32(v_1.AuxInt)
10913 sym2 := auxToSym(v_1.Aux)
10914 x := v_1.Args[1]
10915 ptr2 := v_1.Args[0]
10916 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10917 break
10918 }
10919 v.reset(OpAMD64MOVLQZX)
10920 v.AddArg(x)
10921 return true
10922 }
10923
10924
10925
10926 for {
10927 off1 := auxIntToInt32(v.AuxInt)
10928 sym := auxToSym(v.Aux)
10929 if v_0.Op != OpAMD64ADDQconst {
10930 break
10931 }
10932 off2 := auxIntToInt32(v_0.AuxInt)
10933 ptr := v_0.Args[0]
10934 mem := v_1
10935 if !(is32Bit(int64(off1) + int64(off2))) {
10936 break
10937 }
10938 v.reset(OpAMD64MOVLload)
10939 v.AuxInt = int32ToAuxInt(off1 + off2)
10940 v.Aux = symToAux(sym)
10941 v.AddArg2(ptr, mem)
10942 return true
10943 }
10944
10945
10946
10947 for {
10948 off1 := auxIntToInt32(v.AuxInt)
10949 sym1 := auxToSym(v.Aux)
10950 if v_0.Op != OpAMD64LEAQ {
10951 break
10952 }
10953 off2 := auxIntToInt32(v_0.AuxInt)
10954 sym2 := auxToSym(v_0.Aux)
10955 base := v_0.Args[0]
10956 mem := v_1
10957 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10958 break
10959 }
10960 v.reset(OpAMD64MOVLload)
10961 v.AuxInt = int32ToAuxInt(off1 + off2)
10962 v.Aux = symToAux(mergeSym(sym1, sym2))
10963 v.AddArg2(base, mem)
10964 return true
10965 }
10966
10967
10968 for {
10969 off := auxIntToInt32(v.AuxInt)
10970 sym := auxToSym(v.Aux)
10971 ptr := v_0
10972 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
10973 break
10974 }
10975 val := v_1.Args[1]
10976 if ptr != v_1.Args[0] {
10977 break
10978 }
10979 v.reset(OpAMD64MOVLf2i)
10980 v.AddArg(val)
10981 return true
10982 }
10983
10984
10985
10986 for {
10987 off := auxIntToInt32(v.AuxInt)
10988 sym := auxToSym(v.Aux)
10989 if v_0.Op != OpSB || !(symIsRO(sym)) {
10990 break
10991 }
10992 v.reset(OpAMD64MOVLconst)
10993 v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
10994 return true
10995 }
10996 return false
10997 }
10998 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
10999 v_2 := v.Args[2]
11000 v_1 := v.Args[1]
11001 v_0 := v.Args[0]
11002
11003
11004 for {
11005 off := auxIntToInt32(v.AuxInt)
11006 sym := auxToSym(v.Aux)
11007 ptr := v_0
11008 if v_1.Op != OpAMD64MOVLQSX {
11009 break
11010 }
11011 x := v_1.Args[0]
11012 mem := v_2
11013 v.reset(OpAMD64MOVLstore)
11014 v.AuxInt = int32ToAuxInt(off)
11015 v.Aux = symToAux(sym)
11016 v.AddArg3(ptr, x, mem)
11017 return true
11018 }
11019
11020
11021 for {
11022 off := auxIntToInt32(v.AuxInt)
11023 sym := auxToSym(v.Aux)
11024 ptr := v_0
11025 if v_1.Op != OpAMD64MOVLQZX {
11026 break
11027 }
11028 x := v_1.Args[0]
11029 mem := v_2
11030 v.reset(OpAMD64MOVLstore)
11031 v.AuxInt = int32ToAuxInt(off)
11032 v.Aux = symToAux(sym)
11033 v.AddArg3(ptr, x, mem)
11034 return true
11035 }
11036
11037
11038
11039 for {
11040 off1 := auxIntToInt32(v.AuxInt)
11041 sym := auxToSym(v.Aux)
11042 if v_0.Op != OpAMD64ADDQconst {
11043 break
11044 }
11045 off2 := auxIntToInt32(v_0.AuxInt)
11046 ptr := v_0.Args[0]
11047 val := v_1
11048 mem := v_2
11049 if !(is32Bit(int64(off1) + int64(off2))) {
11050 break
11051 }
11052 v.reset(OpAMD64MOVLstore)
11053 v.AuxInt = int32ToAuxInt(off1 + off2)
11054 v.Aux = symToAux(sym)
11055 v.AddArg3(ptr, val, mem)
11056 return true
11057 }
11058
11059
11060 for {
11061 off := auxIntToInt32(v.AuxInt)
11062 sym := auxToSym(v.Aux)
11063 ptr := v_0
11064 if v_1.Op != OpAMD64MOVLconst {
11065 break
11066 }
11067 c := auxIntToInt32(v_1.AuxInt)
11068 mem := v_2
11069 v.reset(OpAMD64MOVLstoreconst)
11070 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11071 v.Aux = symToAux(sym)
11072 v.AddArg2(ptr, mem)
11073 return true
11074 }
11075
11076
11077 for {
11078 off := auxIntToInt32(v.AuxInt)
11079 sym := auxToSym(v.Aux)
11080 ptr := v_0
11081 if v_1.Op != OpAMD64MOVQconst {
11082 break
11083 }
11084 c := auxIntToInt64(v_1.AuxInt)
11085 mem := v_2
11086 v.reset(OpAMD64MOVLstoreconst)
11087 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11088 v.Aux = symToAux(sym)
11089 v.AddArg2(ptr, mem)
11090 return true
11091 }
11092
11093
11094
11095 for {
11096 off1 := auxIntToInt32(v.AuxInt)
11097 sym1 := auxToSym(v.Aux)
11098 if v_0.Op != OpAMD64LEAQ {
11099 break
11100 }
11101 off2 := auxIntToInt32(v_0.AuxInt)
11102 sym2 := auxToSym(v_0.Aux)
11103 base := v_0.Args[0]
11104 val := v_1
11105 mem := v_2
11106 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11107 break
11108 }
11109 v.reset(OpAMD64MOVLstore)
11110 v.AuxInt = int32ToAuxInt(off1 + off2)
11111 v.Aux = symToAux(mergeSym(sym1, sym2))
11112 v.AddArg3(base, val, mem)
11113 return true
11114 }
11115
11116
11117
11118 for {
11119 off := auxIntToInt32(v.AuxInt)
11120 sym := auxToSym(v.Aux)
11121 ptr := v_0
11122 y := v_1
11123 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11124 break
11125 }
11126 mem := y.Args[2]
11127 x := y.Args[0]
11128 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11129 break
11130 }
11131 v.reset(OpAMD64ADDLmodify)
11132 v.AuxInt = int32ToAuxInt(off)
11133 v.Aux = symToAux(sym)
11134 v.AddArg3(ptr, x, mem)
11135 return true
11136 }
11137
11138
11139
11140 for {
11141 off := auxIntToInt32(v.AuxInt)
11142 sym := auxToSym(v.Aux)
11143 ptr := v_0
11144 y := v_1
11145 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11146 break
11147 }
11148 mem := y.Args[2]
11149 x := y.Args[0]
11150 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11151 break
11152 }
11153 v.reset(OpAMD64ANDLmodify)
11154 v.AuxInt = int32ToAuxInt(off)
11155 v.Aux = symToAux(sym)
11156 v.AddArg3(ptr, x, mem)
11157 return true
11158 }
11159
11160
11161
11162 for {
11163 off := auxIntToInt32(v.AuxInt)
11164 sym := auxToSym(v.Aux)
11165 ptr := v_0
11166 y := v_1
11167 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11168 break
11169 }
11170 mem := y.Args[2]
11171 x := y.Args[0]
11172 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11173 break
11174 }
11175 v.reset(OpAMD64ORLmodify)
11176 v.AuxInt = int32ToAuxInt(off)
11177 v.Aux = symToAux(sym)
11178 v.AddArg3(ptr, x, mem)
11179 return true
11180 }
11181
11182
11183
11184 for {
11185 off := auxIntToInt32(v.AuxInt)
11186 sym := auxToSym(v.Aux)
11187 ptr := v_0
11188 y := v_1
11189 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11190 break
11191 }
11192 mem := y.Args[2]
11193 x := y.Args[0]
11194 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11195 break
11196 }
11197 v.reset(OpAMD64XORLmodify)
11198 v.AuxInt = int32ToAuxInt(off)
11199 v.Aux = symToAux(sym)
11200 v.AddArg3(ptr, x, mem)
11201 return true
11202 }
11203
11204
11205
11206 for {
11207 off := auxIntToInt32(v.AuxInt)
11208 sym := auxToSym(v.Aux)
11209 ptr := v_0
11210 y := v_1
11211 if y.Op != OpAMD64ADDL {
11212 break
11213 }
11214 _ = y.Args[1]
11215 y_0 := y.Args[0]
11216 y_1 := y.Args[1]
11217 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11218 l := y_0
11219 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11220 continue
11221 }
11222 mem := l.Args[1]
11223 if ptr != l.Args[0] {
11224 continue
11225 }
11226 x := y_1
11227 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11228 continue
11229 }
11230 v.reset(OpAMD64ADDLmodify)
11231 v.AuxInt = int32ToAuxInt(off)
11232 v.Aux = symToAux(sym)
11233 v.AddArg3(ptr, x, mem)
11234 return true
11235 }
11236 break
11237 }
11238
11239
11240
11241 for {
11242 off := auxIntToInt32(v.AuxInt)
11243 sym := auxToSym(v.Aux)
11244 ptr := v_0
11245 y := v_1
11246 if y.Op != OpAMD64SUBL {
11247 break
11248 }
11249 x := y.Args[1]
11250 l := y.Args[0]
11251 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11252 break
11253 }
11254 mem := l.Args[1]
11255 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11256 break
11257 }
11258 v.reset(OpAMD64SUBLmodify)
11259 v.AuxInt = int32ToAuxInt(off)
11260 v.Aux = symToAux(sym)
11261 v.AddArg3(ptr, x, mem)
11262 return true
11263 }
11264
11265
11266
11267 for {
11268 off := auxIntToInt32(v.AuxInt)
11269 sym := auxToSym(v.Aux)
11270 ptr := v_0
11271 y := v_1
11272 if y.Op != OpAMD64ANDL {
11273 break
11274 }
11275 _ = y.Args[1]
11276 y_0 := y.Args[0]
11277 y_1 := y.Args[1]
11278 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11279 l := y_0
11280 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11281 continue
11282 }
11283 mem := l.Args[1]
11284 if ptr != l.Args[0] {
11285 continue
11286 }
11287 x := y_1
11288 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11289 continue
11290 }
11291 v.reset(OpAMD64ANDLmodify)
11292 v.AuxInt = int32ToAuxInt(off)
11293 v.Aux = symToAux(sym)
11294 v.AddArg3(ptr, x, mem)
11295 return true
11296 }
11297 break
11298 }
11299
11300
11301
11302 for {
11303 off := auxIntToInt32(v.AuxInt)
11304 sym := auxToSym(v.Aux)
11305 ptr := v_0
11306 y := v_1
11307 if y.Op != OpAMD64ORL {
11308 break
11309 }
11310 _ = y.Args[1]
11311 y_0 := y.Args[0]
11312 y_1 := y.Args[1]
11313 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11314 l := y_0
11315 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11316 continue
11317 }
11318 mem := l.Args[1]
11319 if ptr != l.Args[0] {
11320 continue
11321 }
11322 x := y_1
11323 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11324 continue
11325 }
11326 v.reset(OpAMD64ORLmodify)
11327 v.AuxInt = int32ToAuxInt(off)
11328 v.Aux = symToAux(sym)
11329 v.AddArg3(ptr, x, mem)
11330 return true
11331 }
11332 break
11333 }
11334
11335
11336
11337 for {
11338 off := auxIntToInt32(v.AuxInt)
11339 sym := auxToSym(v.Aux)
11340 ptr := v_0
11341 y := v_1
11342 if y.Op != OpAMD64XORL {
11343 break
11344 }
11345 _ = y.Args[1]
11346 y_0 := y.Args[0]
11347 y_1 := y.Args[1]
11348 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11349 l := y_0
11350 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11351 continue
11352 }
11353 mem := l.Args[1]
11354 if ptr != l.Args[0] {
11355 continue
11356 }
11357 x := y_1
11358 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11359 continue
11360 }
11361 v.reset(OpAMD64XORLmodify)
11362 v.AuxInt = int32ToAuxInt(off)
11363 v.Aux = symToAux(sym)
11364 v.AddArg3(ptr, x, mem)
11365 return true
11366 }
11367 break
11368 }
11369
11370
11371
11372 for {
11373 off := auxIntToInt32(v.AuxInt)
11374 sym := auxToSym(v.Aux)
11375 ptr := v_0
11376 a := v_1
11377 if a.Op != OpAMD64ADDLconst {
11378 break
11379 }
11380 c := auxIntToInt32(a.AuxInt)
11381 l := a.Args[0]
11382 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11383 break
11384 }
11385 mem := l.Args[1]
11386 ptr2 := l.Args[0]
11387 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11388 break
11389 }
11390 v.reset(OpAMD64ADDLconstmodify)
11391 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11392 v.Aux = symToAux(sym)
11393 v.AddArg2(ptr, mem)
11394 return true
11395 }
11396
11397
11398
11399 for {
11400 off := auxIntToInt32(v.AuxInt)
11401 sym := auxToSym(v.Aux)
11402 ptr := v_0
11403 a := v_1
11404 if a.Op != OpAMD64ANDLconst {
11405 break
11406 }
11407 c := auxIntToInt32(a.AuxInt)
11408 l := a.Args[0]
11409 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11410 break
11411 }
11412 mem := l.Args[1]
11413 ptr2 := l.Args[0]
11414 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11415 break
11416 }
11417 v.reset(OpAMD64ANDLconstmodify)
11418 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11419 v.Aux = symToAux(sym)
11420 v.AddArg2(ptr, mem)
11421 return true
11422 }
11423
11424
11425
11426 for {
11427 off := auxIntToInt32(v.AuxInt)
11428 sym := auxToSym(v.Aux)
11429 ptr := v_0
11430 a := v_1
11431 if a.Op != OpAMD64ORLconst {
11432 break
11433 }
11434 c := auxIntToInt32(a.AuxInt)
11435 l := a.Args[0]
11436 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11437 break
11438 }
11439 mem := l.Args[1]
11440 ptr2 := l.Args[0]
11441 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11442 break
11443 }
11444 v.reset(OpAMD64ORLconstmodify)
11445 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11446 v.Aux = symToAux(sym)
11447 v.AddArg2(ptr, mem)
11448 return true
11449 }
11450
11451
11452
11453 for {
11454 off := auxIntToInt32(v.AuxInt)
11455 sym := auxToSym(v.Aux)
11456 ptr := v_0
11457 a := v_1
11458 if a.Op != OpAMD64XORLconst {
11459 break
11460 }
11461 c := auxIntToInt32(a.AuxInt)
11462 l := a.Args[0]
11463 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11464 break
11465 }
11466 mem := l.Args[1]
11467 ptr2 := l.Args[0]
11468 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11469 break
11470 }
11471 v.reset(OpAMD64XORLconstmodify)
11472 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11473 v.Aux = symToAux(sym)
11474 v.AddArg2(ptr, mem)
11475 return true
11476 }
11477
11478
11479 for {
11480 off := auxIntToInt32(v.AuxInt)
11481 sym := auxToSym(v.Aux)
11482 ptr := v_0
11483 if v_1.Op != OpAMD64MOVLf2i {
11484 break
11485 }
11486 val := v_1.Args[0]
11487 mem := v_2
11488 v.reset(OpAMD64MOVSSstore)
11489 v.AuxInt = int32ToAuxInt(off)
11490 v.Aux = symToAux(sym)
11491 v.AddArg3(ptr, val, mem)
11492 return true
11493 }
11494
11495
11496
11497 for {
11498 i := auxIntToInt32(v.AuxInt)
11499 s := auxToSym(v.Aux)
11500 p := v_0
11501 x := v_1
11502 if x.Op != OpAMD64BSWAPL {
11503 break
11504 }
11505 w := x.Args[0]
11506 mem := v_2
11507 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
11508 break
11509 }
11510 v.reset(OpAMD64MOVBELstore)
11511 v.AuxInt = int32ToAuxInt(i)
11512 v.Aux = symToAux(s)
11513 v.AddArg3(p, w, mem)
11514 return true
11515 }
11516 return false
11517 }
11518 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
11519 v_1 := v.Args[1]
11520 v_0 := v.Args[0]
11521
11522
11523
11524 for {
11525 sc := auxIntToValAndOff(v.AuxInt)
11526 s := auxToSym(v.Aux)
11527 if v_0.Op != OpAMD64ADDQconst {
11528 break
11529 }
11530 off := auxIntToInt32(v_0.AuxInt)
11531 ptr := v_0.Args[0]
11532 mem := v_1
11533 if !(ValAndOff(sc).canAdd32(off)) {
11534 break
11535 }
11536 v.reset(OpAMD64MOVLstoreconst)
11537 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11538 v.Aux = symToAux(s)
11539 v.AddArg2(ptr, mem)
11540 return true
11541 }
11542
11543
11544
11545 for {
11546 sc := auxIntToValAndOff(v.AuxInt)
11547 sym1 := auxToSym(v.Aux)
11548 if v_0.Op != OpAMD64LEAQ {
11549 break
11550 }
11551 off := auxIntToInt32(v_0.AuxInt)
11552 sym2 := auxToSym(v_0.Aux)
11553 ptr := v_0.Args[0]
11554 mem := v_1
11555 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11556 break
11557 }
11558 v.reset(OpAMD64MOVLstoreconst)
11559 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11560 v.Aux = symToAux(mergeSym(sym1, sym2))
11561 v.AddArg2(ptr, mem)
11562 return true
11563 }
11564 return false
11565 }
11566 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
11567 v_1 := v.Args[1]
11568 v_0 := v.Args[0]
11569
11570
11571
11572 for {
11573 off1 := auxIntToInt32(v.AuxInt)
11574 sym := auxToSym(v.Aux)
11575 if v_0.Op != OpAMD64ADDQconst {
11576 break
11577 }
11578 off2 := auxIntToInt32(v_0.AuxInt)
11579 ptr := v_0.Args[0]
11580 mem := v_1
11581 if !(is32Bit(int64(off1) + int64(off2))) {
11582 break
11583 }
11584 v.reset(OpAMD64MOVOload)
11585 v.AuxInt = int32ToAuxInt(off1 + off2)
11586 v.Aux = symToAux(sym)
11587 v.AddArg2(ptr, mem)
11588 return true
11589 }
11590
11591
11592
11593 for {
11594 off1 := auxIntToInt32(v.AuxInt)
11595 sym1 := auxToSym(v.Aux)
11596 if v_0.Op != OpAMD64LEAQ {
11597 break
11598 }
11599 off2 := auxIntToInt32(v_0.AuxInt)
11600 sym2 := auxToSym(v_0.Aux)
11601 base := v_0.Args[0]
11602 mem := v_1
11603 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11604 break
11605 }
11606 v.reset(OpAMD64MOVOload)
11607 v.AuxInt = int32ToAuxInt(off1 + off2)
11608 v.Aux = symToAux(mergeSym(sym1, sym2))
11609 v.AddArg2(base, mem)
11610 return true
11611 }
11612 return false
11613 }
11614 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
11615 v_2 := v.Args[2]
11616 v_1 := v.Args[1]
11617 v_0 := v.Args[0]
11618 b := v.Block
11619 config := b.Func.Config
11620 typ := &b.Func.Config.Types
11621
11622
11623
11624 for {
11625 off1 := auxIntToInt32(v.AuxInt)
11626 sym := auxToSym(v.Aux)
11627 if v_0.Op != OpAMD64ADDQconst {
11628 break
11629 }
11630 off2 := auxIntToInt32(v_0.AuxInt)
11631 ptr := v_0.Args[0]
11632 val := v_1
11633 mem := v_2
11634 if !(is32Bit(int64(off1) + int64(off2))) {
11635 break
11636 }
11637 v.reset(OpAMD64MOVOstore)
11638 v.AuxInt = int32ToAuxInt(off1 + off2)
11639 v.Aux = symToAux(sym)
11640 v.AddArg3(ptr, val, mem)
11641 return true
11642 }
11643
11644
11645
11646 for {
11647 off1 := auxIntToInt32(v.AuxInt)
11648 sym1 := auxToSym(v.Aux)
11649 if v_0.Op != OpAMD64LEAQ {
11650 break
11651 }
11652 off2 := auxIntToInt32(v_0.AuxInt)
11653 sym2 := auxToSym(v_0.Aux)
11654 base := v_0.Args[0]
11655 val := v_1
11656 mem := v_2
11657 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11658 break
11659 }
11660 v.reset(OpAMD64MOVOstore)
11661 v.AuxInt = int32ToAuxInt(off1 + off2)
11662 v.Aux = symToAux(mergeSym(sym1, sym2))
11663 v.AddArg3(base, val, mem)
11664 return true
11665 }
11666
11667
11668
11669 for {
11670 dstOff := auxIntToInt32(v.AuxInt)
11671 dstSym := auxToSym(v.Aux)
11672 ptr := v_0
11673 if v_1.Op != OpAMD64MOVOload {
11674 break
11675 }
11676 srcOff := auxIntToInt32(v_1.AuxInt)
11677 srcSym := auxToSym(v_1.Aux)
11678 v_1_0 := v_1.Args[0]
11679 if v_1_0.Op != OpSB {
11680 break
11681 }
11682 mem := v_2
11683 if !(symIsRO(srcSym)) {
11684 break
11685 }
11686 v.reset(OpAMD64MOVQstore)
11687 v.AuxInt = int32ToAuxInt(dstOff + 8)
11688 v.Aux = symToAux(dstSym)
11689 v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11690 v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
11691 v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
11692 v1.AuxInt = int32ToAuxInt(dstOff)
11693 v1.Aux = symToAux(dstSym)
11694 v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11695 v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
11696 v1.AddArg3(ptr, v2, mem)
11697 v.AddArg3(ptr, v0, v1)
11698 return true
11699 }
11700 return false
11701 }
11702 func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool {
11703 v_1 := v.Args[1]
11704 v_0 := v.Args[0]
11705
11706
11707
11708 for {
11709 sc := auxIntToValAndOff(v.AuxInt)
11710 s := auxToSym(v.Aux)
11711 if v_0.Op != OpAMD64ADDQconst {
11712 break
11713 }
11714 off := auxIntToInt32(v_0.AuxInt)
11715 ptr := v_0.Args[0]
11716 mem := v_1
11717 if !(ValAndOff(sc).canAdd32(off)) {
11718 break
11719 }
11720 v.reset(OpAMD64MOVOstoreconst)
11721 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11722 v.Aux = symToAux(s)
11723 v.AddArg2(ptr, mem)
11724 return true
11725 }
11726
11727
11728
11729 for {
11730 sc := auxIntToValAndOff(v.AuxInt)
11731 sym1 := auxToSym(v.Aux)
11732 if v_0.Op != OpAMD64LEAQ {
11733 break
11734 }
11735 off := auxIntToInt32(v_0.AuxInt)
11736 sym2 := auxToSym(v_0.Aux)
11737 ptr := v_0.Args[0]
11738 mem := v_1
11739 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11740 break
11741 }
11742 v.reset(OpAMD64MOVOstoreconst)
11743 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11744 v.Aux = symToAux(mergeSym(sym1, sym2))
11745 v.AddArg2(ptr, mem)
11746 return true
11747 }
11748 return false
11749 }
11750 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
11751 v_1 := v.Args[1]
11752 v_0 := v.Args[0]
11753
11754
11755
11756 for {
11757 off1 := auxIntToInt32(v.AuxInt)
11758 sym := auxToSym(v.Aux)
11759 if v_0.Op != OpAMD64ADDQconst {
11760 break
11761 }
11762 off2 := auxIntToInt32(v_0.AuxInt)
11763 ptr := v_0.Args[0]
11764 mem := v_1
11765 if !(is32Bit(int64(off1) + int64(off2))) {
11766 break
11767 }
11768 v.reset(OpAMD64MOVQatomicload)
11769 v.AuxInt = int32ToAuxInt(off1 + off2)
11770 v.Aux = symToAux(sym)
11771 v.AddArg2(ptr, mem)
11772 return true
11773 }
11774
11775
11776
11777 for {
11778 off1 := auxIntToInt32(v.AuxInt)
11779 sym1 := auxToSym(v.Aux)
11780 if v_0.Op != OpAMD64LEAQ {
11781 break
11782 }
11783 off2 := auxIntToInt32(v_0.AuxInt)
11784 sym2 := auxToSym(v_0.Aux)
11785 ptr := v_0.Args[0]
11786 mem := v_1
11787 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11788 break
11789 }
11790 v.reset(OpAMD64MOVQatomicload)
11791 v.AuxInt = int32ToAuxInt(off1 + off2)
11792 v.Aux = symToAux(mergeSym(sym1, sym2))
11793 v.AddArg2(ptr, mem)
11794 return true
11795 }
11796 return false
11797 }
11798 func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
11799 v_0 := v.Args[0]
11800 b := v.Block
11801
11802
11803
11804 for {
11805 t := v.Type
11806 if v_0.Op != OpArg {
11807 break
11808 }
11809 u := v_0.Type
11810 off := auxIntToInt32(v_0.AuxInt)
11811 sym := auxToSym(v_0.Aux)
11812 if !(t.Size() == u.Size()) {
11813 break
11814 }
11815 b = b.Func.Entry
11816 v0 := b.NewValue0(v.Pos, OpArg, t)
11817 v.copyOf(v0)
11818 v0.AuxInt = int32ToAuxInt(off)
11819 v0.Aux = symToAux(sym)
11820 return true
11821 }
11822 return false
11823 }
11824 func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
11825 v_0 := v.Args[0]
11826 b := v.Block
11827
11828
11829
11830 for {
11831 t := v.Type
11832 if v_0.Op != OpArg {
11833 break
11834 }
11835 u := v_0.Type
11836 off := auxIntToInt32(v_0.AuxInt)
11837 sym := auxToSym(v_0.Aux)
11838 if !(t.Size() == u.Size()) {
11839 break
11840 }
11841 b = b.Func.Entry
11842 v0 := b.NewValue0(v.Pos, OpArg, t)
11843 v.copyOf(v0)
11844 v0.AuxInt = int32ToAuxInt(off)
11845 v0.Aux = symToAux(sym)
11846 return true
11847 }
11848 return false
11849 }
11850 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
11851 v_1 := v.Args[1]
11852 v_0 := v.Args[0]
11853 b := v.Block
11854 config := b.Func.Config
11855
11856
11857
11858 for {
11859 off := auxIntToInt32(v.AuxInt)
11860 sym := auxToSym(v.Aux)
11861 ptr := v_0
11862 if v_1.Op != OpAMD64MOVQstore {
11863 break
11864 }
11865 off2 := auxIntToInt32(v_1.AuxInt)
11866 sym2 := auxToSym(v_1.Aux)
11867 x := v_1.Args[1]
11868 ptr2 := v_1.Args[0]
11869 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11870 break
11871 }
11872 v.copyOf(x)
11873 return true
11874 }
11875
11876
11877
11878 for {
11879 off1 := auxIntToInt32(v.AuxInt)
11880 sym := auxToSym(v.Aux)
11881 if v_0.Op != OpAMD64ADDQconst {
11882 break
11883 }
11884 off2 := auxIntToInt32(v_0.AuxInt)
11885 ptr := v_0.Args[0]
11886 mem := v_1
11887 if !(is32Bit(int64(off1) + int64(off2))) {
11888 break
11889 }
11890 v.reset(OpAMD64MOVQload)
11891 v.AuxInt = int32ToAuxInt(off1 + off2)
11892 v.Aux = symToAux(sym)
11893 v.AddArg2(ptr, mem)
11894 return true
11895 }
11896
11897
11898
11899 for {
11900 off1 := auxIntToInt32(v.AuxInt)
11901 sym1 := auxToSym(v.Aux)
11902 if v_0.Op != OpAMD64LEAQ {
11903 break
11904 }
11905 off2 := auxIntToInt32(v_0.AuxInt)
11906 sym2 := auxToSym(v_0.Aux)
11907 base := v_0.Args[0]
11908 mem := v_1
11909 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11910 break
11911 }
11912 v.reset(OpAMD64MOVQload)
11913 v.AuxInt = int32ToAuxInt(off1 + off2)
11914 v.Aux = symToAux(mergeSym(sym1, sym2))
11915 v.AddArg2(base, mem)
11916 return true
11917 }
11918
11919
11920 for {
11921 off := auxIntToInt32(v.AuxInt)
11922 sym := auxToSym(v.Aux)
11923 ptr := v_0
11924 if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
11925 break
11926 }
11927 val := v_1.Args[1]
11928 if ptr != v_1.Args[0] {
11929 break
11930 }
11931 v.reset(OpAMD64MOVQf2i)
11932 v.AddArg(val)
11933 return true
11934 }
11935
11936
11937
11938 for {
11939 off := auxIntToInt32(v.AuxInt)
11940 sym := auxToSym(v.Aux)
11941 if v_0.Op != OpSB || !(symIsRO(sym)) {
11942 break
11943 }
11944 v.reset(OpAMD64MOVQconst)
11945 v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
11946 return true
11947 }
11948 return false
11949 }
11950 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
11951 v_2 := v.Args[2]
11952 v_1 := v.Args[1]
11953 v_0 := v.Args[0]
11954
11955
11956
11957 for {
11958 off1 := auxIntToInt32(v.AuxInt)
11959 sym := auxToSym(v.Aux)
11960 if v_0.Op != OpAMD64ADDQconst {
11961 break
11962 }
11963 off2 := auxIntToInt32(v_0.AuxInt)
11964 ptr := v_0.Args[0]
11965 val := v_1
11966 mem := v_2
11967 if !(is32Bit(int64(off1) + int64(off2))) {
11968 break
11969 }
11970 v.reset(OpAMD64MOVQstore)
11971 v.AuxInt = int32ToAuxInt(off1 + off2)
11972 v.Aux = symToAux(sym)
11973 v.AddArg3(ptr, val, mem)
11974 return true
11975 }
11976
11977
11978
11979 for {
11980 off := auxIntToInt32(v.AuxInt)
11981 sym := auxToSym(v.Aux)
11982 ptr := v_0
11983 if v_1.Op != OpAMD64MOVQconst {
11984 break
11985 }
11986 c := auxIntToInt64(v_1.AuxInt)
11987 mem := v_2
11988 if !(validVal(c)) {
11989 break
11990 }
11991 v.reset(OpAMD64MOVQstoreconst)
11992 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11993 v.Aux = symToAux(sym)
11994 v.AddArg2(ptr, mem)
11995 return true
11996 }
11997
11998
11999
12000 for {
12001 off1 := auxIntToInt32(v.AuxInt)
12002 sym1 := auxToSym(v.Aux)
12003 if v_0.Op != OpAMD64LEAQ {
12004 break
12005 }
12006 off2 := auxIntToInt32(v_0.AuxInt)
12007 sym2 := auxToSym(v_0.Aux)
12008 base := v_0.Args[0]
12009 val := v_1
12010 mem := v_2
12011 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12012 break
12013 }
12014 v.reset(OpAMD64MOVQstore)
12015 v.AuxInt = int32ToAuxInt(off1 + off2)
12016 v.Aux = symToAux(mergeSym(sym1, sym2))
12017 v.AddArg3(base, val, mem)
12018 return true
12019 }
12020
12021
12022
12023 for {
12024 off := auxIntToInt32(v.AuxInt)
12025 sym := auxToSym(v.Aux)
12026 ptr := v_0
12027 y := v_1
12028 if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12029 break
12030 }
12031 mem := y.Args[2]
12032 x := y.Args[0]
12033 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12034 break
12035 }
12036 v.reset(OpAMD64ADDQmodify)
12037 v.AuxInt = int32ToAuxInt(off)
12038 v.Aux = symToAux(sym)
12039 v.AddArg3(ptr, x, mem)
12040 return true
12041 }
12042
12043
12044
12045 for {
12046 off := auxIntToInt32(v.AuxInt)
12047 sym := auxToSym(v.Aux)
12048 ptr := v_0
12049 y := v_1
12050 if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12051 break
12052 }
12053 mem := y.Args[2]
12054 x := y.Args[0]
12055 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12056 break
12057 }
12058 v.reset(OpAMD64ANDQmodify)
12059 v.AuxInt = int32ToAuxInt(off)
12060 v.Aux = symToAux(sym)
12061 v.AddArg3(ptr, x, mem)
12062 return true
12063 }
12064
12065
12066
12067 for {
12068 off := auxIntToInt32(v.AuxInt)
12069 sym := auxToSym(v.Aux)
12070 ptr := v_0
12071 y := v_1
12072 if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12073 break
12074 }
12075 mem := y.Args[2]
12076 x := y.Args[0]
12077 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12078 break
12079 }
12080 v.reset(OpAMD64ORQmodify)
12081 v.AuxInt = int32ToAuxInt(off)
12082 v.Aux = symToAux(sym)
12083 v.AddArg3(ptr, x, mem)
12084 return true
12085 }
12086
12087
12088
12089 for {
12090 off := auxIntToInt32(v.AuxInt)
12091 sym := auxToSym(v.Aux)
12092 ptr := v_0
12093 y := v_1
12094 if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12095 break
12096 }
12097 mem := y.Args[2]
12098 x := y.Args[0]
12099 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12100 break
12101 }
12102 v.reset(OpAMD64XORQmodify)
12103 v.AuxInt = int32ToAuxInt(off)
12104 v.Aux = symToAux(sym)
12105 v.AddArg3(ptr, x, mem)
12106 return true
12107 }
12108
12109
12110
12111 for {
12112 off := auxIntToInt32(v.AuxInt)
12113 sym := auxToSym(v.Aux)
12114 ptr := v_0
12115 y := v_1
12116 if y.Op != OpAMD64ADDQ {
12117 break
12118 }
12119 _ = y.Args[1]
12120 y_0 := y.Args[0]
12121 y_1 := y.Args[1]
12122 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12123 l := y_0
12124 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12125 continue
12126 }
12127 mem := l.Args[1]
12128 if ptr != l.Args[0] {
12129 continue
12130 }
12131 x := y_1
12132 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12133 continue
12134 }
12135 v.reset(OpAMD64ADDQmodify)
12136 v.AuxInt = int32ToAuxInt(off)
12137 v.Aux = symToAux(sym)
12138 v.AddArg3(ptr, x, mem)
12139 return true
12140 }
12141 break
12142 }
12143
12144
12145
12146 for {
12147 off := auxIntToInt32(v.AuxInt)
12148 sym := auxToSym(v.Aux)
12149 ptr := v_0
12150 y := v_1
12151 if y.Op != OpAMD64SUBQ {
12152 break
12153 }
12154 x := y.Args[1]
12155 l := y.Args[0]
12156 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12157 break
12158 }
12159 mem := l.Args[1]
12160 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12161 break
12162 }
12163 v.reset(OpAMD64SUBQmodify)
12164 v.AuxInt = int32ToAuxInt(off)
12165 v.Aux = symToAux(sym)
12166 v.AddArg3(ptr, x, mem)
12167 return true
12168 }
12169
12170
12171
12172 for {
12173 off := auxIntToInt32(v.AuxInt)
12174 sym := auxToSym(v.Aux)
12175 ptr := v_0
12176 y := v_1
12177 if y.Op != OpAMD64ANDQ {
12178 break
12179 }
12180 _ = y.Args[1]
12181 y_0 := y.Args[0]
12182 y_1 := y.Args[1]
12183 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12184 l := y_0
12185 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12186 continue
12187 }
12188 mem := l.Args[1]
12189 if ptr != l.Args[0] {
12190 continue
12191 }
12192 x := y_1
12193 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12194 continue
12195 }
12196 v.reset(OpAMD64ANDQmodify)
12197 v.AuxInt = int32ToAuxInt(off)
12198 v.Aux = symToAux(sym)
12199 v.AddArg3(ptr, x, mem)
12200 return true
12201 }
12202 break
12203 }
12204
12205
12206
12207 for {
12208 off := auxIntToInt32(v.AuxInt)
12209 sym := auxToSym(v.Aux)
12210 ptr := v_0
12211 y := v_1
12212 if y.Op != OpAMD64ORQ {
12213 break
12214 }
12215 _ = y.Args[1]
12216 y_0 := y.Args[0]
12217 y_1 := y.Args[1]
12218 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12219 l := y_0
12220 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12221 continue
12222 }
12223 mem := l.Args[1]
12224 if ptr != l.Args[0] {
12225 continue
12226 }
12227 x := y_1
12228 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12229 continue
12230 }
12231 v.reset(OpAMD64ORQmodify)
12232 v.AuxInt = int32ToAuxInt(off)
12233 v.Aux = symToAux(sym)
12234 v.AddArg3(ptr, x, mem)
12235 return true
12236 }
12237 break
12238 }
12239
12240
12241
12242 for {
12243 off := auxIntToInt32(v.AuxInt)
12244 sym := auxToSym(v.Aux)
12245 ptr := v_0
12246 y := v_1
12247 if y.Op != OpAMD64XORQ {
12248 break
12249 }
12250 _ = y.Args[1]
12251 y_0 := y.Args[0]
12252 y_1 := y.Args[1]
12253 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12254 l := y_0
12255 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12256 continue
12257 }
12258 mem := l.Args[1]
12259 if ptr != l.Args[0] {
12260 continue
12261 }
12262 x := y_1
12263 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12264 continue
12265 }
12266 v.reset(OpAMD64XORQmodify)
12267 v.AuxInt = int32ToAuxInt(off)
12268 v.Aux = symToAux(sym)
12269 v.AddArg3(ptr, x, mem)
12270 return true
12271 }
12272 break
12273 }
12274
12275
12276
12277 for {
12278 off := auxIntToInt32(v.AuxInt)
12279 sym := auxToSym(v.Aux)
12280 ptr := v_0
12281 x := v_1
12282 if x.Op != OpAMD64BTSQconst {
12283 break
12284 }
12285 c := auxIntToInt8(x.AuxInt)
12286 l := x.Args[0]
12287 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12288 break
12289 }
12290 mem := l.Args[1]
12291 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
12292 break
12293 }
12294 v.reset(OpAMD64BTSQconstmodify)
12295 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12296 v.Aux = symToAux(sym)
12297 v.AddArg2(ptr, mem)
12298 return true
12299 }
12300
12301
12302
12303 for {
12304 off := auxIntToInt32(v.AuxInt)
12305 sym := auxToSym(v.Aux)
12306 ptr := v_0
12307 x := v_1
12308 if x.Op != OpAMD64BTRQconst {
12309 break
12310 }
12311 c := auxIntToInt8(x.AuxInt)
12312 l := x.Args[0]
12313 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12314 break
12315 }
12316 mem := l.Args[1]
12317 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
12318 break
12319 }
12320 v.reset(OpAMD64BTRQconstmodify)
12321 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12322 v.Aux = symToAux(sym)
12323 v.AddArg2(ptr, mem)
12324 return true
12325 }
12326
12327
12328
12329 for {
12330 off := auxIntToInt32(v.AuxInt)
12331 sym := auxToSym(v.Aux)
12332 ptr := v_0
12333 x := v_1
12334 if x.Op != OpAMD64BTCQconst {
12335 break
12336 }
12337 c := auxIntToInt8(x.AuxInt)
12338 l := x.Args[0]
12339 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12340 break
12341 }
12342 mem := l.Args[1]
12343 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
12344 break
12345 }
12346 v.reset(OpAMD64BTCQconstmodify)
12347 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12348 v.Aux = symToAux(sym)
12349 v.AddArg2(ptr, mem)
12350 return true
12351 }
12352
12353
12354
12355 for {
12356 off := auxIntToInt32(v.AuxInt)
12357 sym := auxToSym(v.Aux)
12358 ptr := v_0
12359 a := v_1
12360 if a.Op != OpAMD64ADDQconst {
12361 break
12362 }
12363 c := auxIntToInt32(a.AuxInt)
12364 l := a.Args[0]
12365 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12366 break
12367 }
12368 mem := l.Args[1]
12369 ptr2 := l.Args[0]
12370 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12371 break
12372 }
12373 v.reset(OpAMD64ADDQconstmodify)
12374 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12375 v.Aux = symToAux(sym)
12376 v.AddArg2(ptr, mem)
12377 return true
12378 }
12379
12380
12381
12382 for {
12383 off := auxIntToInt32(v.AuxInt)
12384 sym := auxToSym(v.Aux)
12385 ptr := v_0
12386 a := v_1
12387 if a.Op != OpAMD64ANDQconst {
12388 break
12389 }
12390 c := auxIntToInt32(a.AuxInt)
12391 l := a.Args[0]
12392 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12393 break
12394 }
12395 mem := l.Args[1]
12396 ptr2 := l.Args[0]
12397 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12398 break
12399 }
12400 v.reset(OpAMD64ANDQconstmodify)
12401 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12402 v.Aux = symToAux(sym)
12403 v.AddArg2(ptr, mem)
12404 return true
12405 }
12406
12407
12408
12409 for {
12410 off := auxIntToInt32(v.AuxInt)
12411 sym := auxToSym(v.Aux)
12412 ptr := v_0
12413 a := v_1
12414 if a.Op != OpAMD64ORQconst {
12415 break
12416 }
12417 c := auxIntToInt32(a.AuxInt)
12418 l := a.Args[0]
12419 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12420 break
12421 }
12422 mem := l.Args[1]
12423 ptr2 := l.Args[0]
12424 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12425 break
12426 }
12427 v.reset(OpAMD64ORQconstmodify)
12428 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12429 v.Aux = symToAux(sym)
12430 v.AddArg2(ptr, mem)
12431 return true
12432 }
12433
12434
12435
12436 for {
12437 off := auxIntToInt32(v.AuxInt)
12438 sym := auxToSym(v.Aux)
12439 ptr := v_0
12440 a := v_1
12441 if a.Op != OpAMD64XORQconst {
12442 break
12443 }
12444 c := auxIntToInt32(a.AuxInt)
12445 l := a.Args[0]
12446 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12447 break
12448 }
12449 mem := l.Args[1]
12450 ptr2 := l.Args[0]
12451 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12452 break
12453 }
12454 v.reset(OpAMD64XORQconstmodify)
12455 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12456 v.Aux = symToAux(sym)
12457 v.AddArg2(ptr, mem)
12458 return true
12459 }
12460
12461
12462 for {
12463 off := auxIntToInt32(v.AuxInt)
12464 sym := auxToSym(v.Aux)
12465 ptr := v_0
12466 if v_1.Op != OpAMD64MOVQf2i {
12467 break
12468 }
12469 val := v_1.Args[0]
12470 mem := v_2
12471 v.reset(OpAMD64MOVSDstore)
12472 v.AuxInt = int32ToAuxInt(off)
12473 v.Aux = symToAux(sym)
12474 v.AddArg3(ptr, val, mem)
12475 return true
12476 }
12477
12478
12479
12480 for {
12481 i := auxIntToInt32(v.AuxInt)
12482 s := auxToSym(v.Aux)
12483 p := v_0
12484 x := v_1
12485 if x.Op != OpAMD64BSWAPQ {
12486 break
12487 }
12488 w := x.Args[0]
12489 mem := v_2
12490 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
12491 break
12492 }
12493 v.reset(OpAMD64MOVBEQstore)
12494 v.AuxInt = int32ToAuxInt(i)
12495 v.Aux = symToAux(s)
12496 v.AddArg3(p, w, mem)
12497 return true
12498 }
12499 return false
12500 }
12501 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
12502 v_1 := v.Args[1]
12503 v_0 := v.Args[0]
12504
12505
12506
12507 for {
12508 sc := auxIntToValAndOff(v.AuxInt)
12509 s := auxToSym(v.Aux)
12510 if v_0.Op != OpAMD64ADDQconst {
12511 break
12512 }
12513 off := auxIntToInt32(v_0.AuxInt)
12514 ptr := v_0.Args[0]
12515 mem := v_1
12516 if !(ValAndOff(sc).canAdd32(off)) {
12517 break
12518 }
12519 v.reset(OpAMD64MOVQstoreconst)
12520 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12521 v.Aux = symToAux(s)
12522 v.AddArg2(ptr, mem)
12523 return true
12524 }
12525
12526
12527
12528 for {
12529 sc := auxIntToValAndOff(v.AuxInt)
12530 sym1 := auxToSym(v.Aux)
12531 if v_0.Op != OpAMD64LEAQ {
12532 break
12533 }
12534 off := auxIntToInt32(v_0.AuxInt)
12535 sym2 := auxToSym(v_0.Aux)
12536 ptr := v_0.Args[0]
12537 mem := v_1
12538 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
12539 break
12540 }
12541 v.reset(OpAMD64MOVQstoreconst)
12542 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12543 v.Aux = symToAux(mergeSym(sym1, sym2))
12544 v.AddArg2(ptr, mem)
12545 return true
12546 }
12547
12548
12549
12550 for {
12551 c := auxIntToValAndOff(v.AuxInt)
12552 s := auxToSym(v.Aux)
12553 p1 := v_0
12554 x := v_1
12555 if x.Op != OpAMD64MOVQstoreconst {
12556 break
12557 }
12558 a := auxIntToValAndOff(x.AuxInt)
12559 if auxToSym(x.Aux) != s {
12560 break
12561 }
12562 mem := x.Args[1]
12563 p0 := x.Args[0]
12564 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12565 break
12566 }
12567 v.reset(OpAMD64MOVOstoreconst)
12568 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12569 v.Aux = symToAux(s)
12570 v.AddArg2(p0, mem)
12571 return true
12572 }
12573
12574
12575
12576 for {
12577 a := auxIntToValAndOff(v.AuxInt)
12578 s := auxToSym(v.Aux)
12579 p0 := v_0
12580 x := v_1
12581 if x.Op != OpAMD64MOVQstoreconst {
12582 break
12583 }
12584 c := auxIntToValAndOff(x.AuxInt)
12585 if auxToSym(x.Aux) != s {
12586 break
12587 }
12588 mem := x.Args[1]
12589 p1 := x.Args[0]
12590 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12591 break
12592 }
12593 v.reset(OpAMD64MOVOstoreconst)
12594 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12595 v.Aux = symToAux(s)
12596 v.AddArg2(p0, mem)
12597 return true
12598 }
12599 return false
12600 }
12601 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
12602 v_1 := v.Args[1]
12603 v_0 := v.Args[0]
12604
12605
12606
12607 for {
12608 off1 := auxIntToInt32(v.AuxInt)
12609 sym := auxToSym(v.Aux)
12610 if v_0.Op != OpAMD64ADDQconst {
12611 break
12612 }
12613 off2 := auxIntToInt32(v_0.AuxInt)
12614 ptr := v_0.Args[0]
12615 mem := v_1
12616 if !(is32Bit(int64(off1) + int64(off2))) {
12617 break
12618 }
12619 v.reset(OpAMD64MOVSDload)
12620 v.AuxInt = int32ToAuxInt(off1 + off2)
12621 v.Aux = symToAux(sym)
12622 v.AddArg2(ptr, mem)
12623 return true
12624 }
12625
12626
12627
12628 for {
12629 off1 := auxIntToInt32(v.AuxInt)
12630 sym1 := auxToSym(v.Aux)
12631 if v_0.Op != OpAMD64LEAQ {
12632 break
12633 }
12634 off2 := auxIntToInt32(v_0.AuxInt)
12635 sym2 := auxToSym(v_0.Aux)
12636 base := v_0.Args[0]
12637 mem := v_1
12638 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12639 break
12640 }
12641 v.reset(OpAMD64MOVSDload)
12642 v.AuxInt = int32ToAuxInt(off1 + off2)
12643 v.Aux = symToAux(mergeSym(sym1, sym2))
12644 v.AddArg2(base, mem)
12645 return true
12646 }
12647
12648
12649 for {
12650 off := auxIntToInt32(v.AuxInt)
12651 sym := auxToSym(v.Aux)
12652 ptr := v_0
12653 if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12654 break
12655 }
12656 val := v_1.Args[1]
12657 if ptr != v_1.Args[0] {
12658 break
12659 }
12660 v.reset(OpAMD64MOVQi2f)
12661 v.AddArg(val)
12662 return true
12663 }
12664 return false
12665 }
12666 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
12667 v_2 := v.Args[2]
12668 v_1 := v.Args[1]
12669 v_0 := v.Args[0]
12670 b := v.Block
12671 typ := &b.Func.Config.Types
12672
12673
12674
12675 for {
12676 off1 := auxIntToInt32(v.AuxInt)
12677 sym := auxToSym(v.Aux)
12678 if v_0.Op != OpAMD64ADDQconst {
12679 break
12680 }
12681 off2 := auxIntToInt32(v_0.AuxInt)
12682 ptr := v_0.Args[0]
12683 val := v_1
12684 mem := v_2
12685 if !(is32Bit(int64(off1) + int64(off2))) {
12686 break
12687 }
12688 v.reset(OpAMD64MOVSDstore)
12689 v.AuxInt = int32ToAuxInt(off1 + off2)
12690 v.Aux = symToAux(sym)
12691 v.AddArg3(ptr, val, mem)
12692 return true
12693 }
12694
12695
12696
12697 for {
12698 off1 := auxIntToInt32(v.AuxInt)
12699 sym1 := auxToSym(v.Aux)
12700 if v_0.Op != OpAMD64LEAQ {
12701 break
12702 }
12703 off2 := auxIntToInt32(v_0.AuxInt)
12704 sym2 := auxToSym(v_0.Aux)
12705 base := v_0.Args[0]
12706 val := v_1
12707 mem := v_2
12708 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12709 break
12710 }
12711 v.reset(OpAMD64MOVSDstore)
12712 v.AuxInt = int32ToAuxInt(off1 + off2)
12713 v.Aux = symToAux(mergeSym(sym1, sym2))
12714 v.AddArg3(base, val, mem)
12715 return true
12716 }
12717
12718
12719 for {
12720 off := auxIntToInt32(v.AuxInt)
12721 sym := auxToSym(v.Aux)
12722 ptr := v_0
12723 if v_1.Op != OpAMD64MOVQi2f {
12724 break
12725 }
12726 val := v_1.Args[0]
12727 mem := v_2
12728 v.reset(OpAMD64MOVQstore)
12729 v.AuxInt = int32ToAuxInt(off)
12730 v.Aux = symToAux(sym)
12731 v.AddArg3(ptr, val, mem)
12732 return true
12733 }
12734
12735
12736
12737 for {
12738 off := auxIntToInt32(v.AuxInt)
12739 sym := auxToSym(v.Aux)
12740 ptr := v_0
12741 if v_1.Op != OpAMD64MOVSDconst {
12742 break
12743 }
12744 f := auxIntToFloat64(v_1.AuxInt)
12745 mem := v_2
12746 if !(f == f) {
12747 break
12748 }
12749 v.reset(OpAMD64MOVQstore)
12750 v.AuxInt = int32ToAuxInt(off)
12751 v.Aux = symToAux(sym)
12752 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
12753 v0.AuxInt = int64ToAuxInt(int64(math.Float64bits(f)))
12754 v.AddArg3(ptr, v0, mem)
12755 return true
12756 }
12757 return false
12758 }
12759 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
12760 v_1 := v.Args[1]
12761 v_0 := v.Args[0]
12762
12763
12764
12765 for {
12766 off1 := auxIntToInt32(v.AuxInt)
12767 sym := auxToSym(v.Aux)
12768 if v_0.Op != OpAMD64ADDQconst {
12769 break
12770 }
12771 off2 := auxIntToInt32(v_0.AuxInt)
12772 ptr := v_0.Args[0]
12773 mem := v_1
12774 if !(is32Bit(int64(off1) + int64(off2))) {
12775 break
12776 }
12777 v.reset(OpAMD64MOVSSload)
12778 v.AuxInt = int32ToAuxInt(off1 + off2)
12779 v.Aux = symToAux(sym)
12780 v.AddArg2(ptr, mem)
12781 return true
12782 }
12783
12784
12785
12786 for {
12787 off1 := auxIntToInt32(v.AuxInt)
12788 sym1 := auxToSym(v.Aux)
12789 if v_0.Op != OpAMD64LEAQ {
12790 break
12791 }
12792 off2 := auxIntToInt32(v_0.AuxInt)
12793 sym2 := auxToSym(v_0.Aux)
12794 base := v_0.Args[0]
12795 mem := v_1
12796 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12797 break
12798 }
12799 v.reset(OpAMD64MOVSSload)
12800 v.AuxInt = int32ToAuxInt(off1 + off2)
12801 v.Aux = symToAux(mergeSym(sym1, sym2))
12802 v.AddArg2(base, mem)
12803 return true
12804 }
12805
12806
12807 for {
12808 off := auxIntToInt32(v.AuxInt)
12809 sym := auxToSym(v.Aux)
12810 ptr := v_0
12811 if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12812 break
12813 }
12814 val := v_1.Args[1]
12815 if ptr != v_1.Args[0] {
12816 break
12817 }
12818 v.reset(OpAMD64MOVLi2f)
12819 v.AddArg(val)
12820 return true
12821 }
12822 return false
12823 }
12824 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
12825 v_2 := v.Args[2]
12826 v_1 := v.Args[1]
12827 v_0 := v.Args[0]
12828 b := v.Block
12829 typ := &b.Func.Config.Types
12830
12831
12832
12833 for {
12834 off1 := auxIntToInt32(v.AuxInt)
12835 sym := auxToSym(v.Aux)
12836 if v_0.Op != OpAMD64ADDQconst {
12837 break
12838 }
12839 off2 := auxIntToInt32(v_0.AuxInt)
12840 ptr := v_0.Args[0]
12841 val := v_1
12842 mem := v_2
12843 if !(is32Bit(int64(off1) + int64(off2))) {
12844 break
12845 }
12846 v.reset(OpAMD64MOVSSstore)
12847 v.AuxInt = int32ToAuxInt(off1 + off2)
12848 v.Aux = symToAux(sym)
12849 v.AddArg3(ptr, val, mem)
12850 return true
12851 }
12852
12853
12854
12855 for {
12856 off1 := auxIntToInt32(v.AuxInt)
12857 sym1 := auxToSym(v.Aux)
12858 if v_0.Op != OpAMD64LEAQ {
12859 break
12860 }
12861 off2 := auxIntToInt32(v_0.AuxInt)
12862 sym2 := auxToSym(v_0.Aux)
12863 base := v_0.Args[0]
12864 val := v_1
12865 mem := v_2
12866 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12867 break
12868 }
12869 v.reset(OpAMD64MOVSSstore)
12870 v.AuxInt = int32ToAuxInt(off1 + off2)
12871 v.Aux = symToAux(mergeSym(sym1, sym2))
12872 v.AddArg3(base, val, mem)
12873 return true
12874 }
12875
12876
12877 for {
12878 off := auxIntToInt32(v.AuxInt)
12879 sym := auxToSym(v.Aux)
12880 ptr := v_0
12881 if v_1.Op != OpAMD64MOVLi2f {
12882 break
12883 }
12884 val := v_1.Args[0]
12885 mem := v_2
12886 v.reset(OpAMD64MOVLstore)
12887 v.AuxInt = int32ToAuxInt(off)
12888 v.Aux = symToAux(sym)
12889 v.AddArg3(ptr, val, mem)
12890 return true
12891 }
12892
12893
12894
12895 for {
12896 off := auxIntToInt32(v.AuxInt)
12897 sym := auxToSym(v.Aux)
12898 ptr := v_0
12899 if v_1.Op != OpAMD64MOVSSconst {
12900 break
12901 }
12902 f := auxIntToFloat32(v_1.AuxInt)
12903 mem := v_2
12904 if !(f == f) {
12905 break
12906 }
12907 v.reset(OpAMD64MOVLstore)
12908 v.AuxInt = int32ToAuxInt(off)
12909 v.Aux = symToAux(sym)
12910 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt32)
12911 v0.AuxInt = int32ToAuxInt(int32(math.Float32bits(f)))
12912 v.AddArg3(ptr, v0, mem)
12913 return true
12914 }
12915 return false
12916 }
12917 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
12918 v_0 := v.Args[0]
12919 b := v.Block
12920
12921
12922
12923 for {
12924 x := v_0
12925 if x.Op != OpAMD64MOVWload {
12926 break
12927 }
12928 off := auxIntToInt32(x.AuxInt)
12929 sym := auxToSym(x.Aux)
12930 mem := x.Args[1]
12931 ptr := x.Args[0]
12932 if !(x.Uses == 1 && clobber(x)) {
12933 break
12934 }
12935 b = x.Block
12936 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12937 v.copyOf(v0)
12938 v0.AuxInt = int32ToAuxInt(off)
12939 v0.Aux = symToAux(sym)
12940 v0.AddArg2(ptr, mem)
12941 return true
12942 }
12943
12944
12945
12946 for {
12947 x := v_0
12948 if x.Op != OpAMD64MOVLload {
12949 break
12950 }
12951 off := auxIntToInt32(x.AuxInt)
12952 sym := auxToSym(x.Aux)
12953 mem := x.Args[1]
12954 ptr := x.Args[0]
12955 if !(x.Uses == 1 && clobber(x)) {
12956 break
12957 }
12958 b = x.Block
12959 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12960 v.copyOf(v0)
12961 v0.AuxInt = int32ToAuxInt(off)
12962 v0.Aux = symToAux(sym)
12963 v0.AddArg2(ptr, mem)
12964 return true
12965 }
12966
12967
12968
12969 for {
12970 x := v_0
12971 if x.Op != OpAMD64MOVQload {
12972 break
12973 }
12974 off := auxIntToInt32(x.AuxInt)
12975 sym := auxToSym(x.Aux)
12976 mem := x.Args[1]
12977 ptr := x.Args[0]
12978 if !(x.Uses == 1 && clobber(x)) {
12979 break
12980 }
12981 b = x.Block
12982 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12983 v.copyOf(v0)
12984 v0.AuxInt = int32ToAuxInt(off)
12985 v0.Aux = symToAux(sym)
12986 v0.AddArg2(ptr, mem)
12987 return true
12988 }
12989
12990
12991
12992 for {
12993 if v_0.Op != OpAMD64ANDLconst {
12994 break
12995 }
12996 c := auxIntToInt32(v_0.AuxInt)
12997 x := v_0.Args[0]
12998 if !(c&0x8000 == 0) {
12999 break
13000 }
13001 v.reset(OpAMD64ANDLconst)
13002 v.AuxInt = int32ToAuxInt(c & 0x7fff)
13003 v.AddArg(x)
13004 return true
13005 }
13006
13007
13008 for {
13009 if v_0.Op != OpAMD64MOVWQSX {
13010 break
13011 }
13012 x := v_0.Args[0]
13013 v.reset(OpAMD64MOVWQSX)
13014 v.AddArg(x)
13015 return true
13016 }
13017
13018
13019 for {
13020 if v_0.Op != OpAMD64MOVBQSX {
13021 break
13022 }
13023 x := v_0.Args[0]
13024 v.reset(OpAMD64MOVBQSX)
13025 v.AddArg(x)
13026 return true
13027 }
13028 return false
13029 }
13030 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
13031 v_1 := v.Args[1]
13032 v_0 := v.Args[0]
13033 b := v.Block
13034 config := b.Func.Config
13035
13036
13037
13038 for {
13039 off := auxIntToInt32(v.AuxInt)
13040 sym := auxToSym(v.Aux)
13041 ptr := v_0
13042 if v_1.Op != OpAMD64MOVWstore {
13043 break
13044 }
13045 off2 := auxIntToInt32(v_1.AuxInt)
13046 sym2 := auxToSym(v_1.Aux)
13047 x := v_1.Args[1]
13048 ptr2 := v_1.Args[0]
13049 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
13050 break
13051 }
13052 v.reset(OpAMD64MOVWQSX)
13053 v.AddArg(x)
13054 return true
13055 }
13056
13057
13058
13059 for {
13060 off1 := auxIntToInt32(v.AuxInt)
13061 sym1 := auxToSym(v.Aux)
13062 if v_0.Op != OpAMD64LEAQ {
13063 break
13064 }
13065 off2 := auxIntToInt32(v_0.AuxInt)
13066 sym2 := auxToSym(v_0.Aux)
13067 base := v_0.Args[0]
13068 mem := v_1
13069 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13070 break
13071 }
13072 v.reset(OpAMD64MOVWQSXload)
13073 v.AuxInt = int32ToAuxInt(off1 + off2)
13074 v.Aux = symToAux(mergeSym(sym1, sym2))
13075 v.AddArg2(base, mem)
13076 return true
13077 }
13078
13079
13080
13081 for {
13082 off := auxIntToInt32(v.AuxInt)
13083 sym := auxToSym(v.Aux)
13084 if v_0.Op != OpSB || !(symIsRO(sym)) {
13085 break
13086 }
13087 v.reset(OpAMD64MOVQconst)
13088 v.AuxInt = int64ToAuxInt(int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))))
13089 return true
13090 }
13091 return false
13092 }
13093 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
13094 v_0 := v.Args[0]
13095 b := v.Block
13096
13097
13098
13099 for {
13100 x := v_0
13101 if x.Op != OpAMD64MOVWload {
13102 break
13103 }
13104 off := auxIntToInt32(x.AuxInt)
13105 sym := auxToSym(x.Aux)
13106 mem := x.Args[1]
13107 ptr := x.Args[0]
13108 if !(x.Uses == 1 && clobber(x)) {
13109 break
13110 }
13111 b = x.Block
13112 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
13113 v.copyOf(v0)
13114 v0.AuxInt = int32ToAuxInt(off)
13115 v0.Aux = symToAux(sym)
13116 v0.AddArg2(ptr, mem)
13117 return true
13118 }
13119
13120
13121
13122 for {
13123 x := v_0
13124 if x.Op != OpAMD64MOVLload {
13125 break
13126 }
13127 off := auxIntToInt32(x.AuxInt)
13128 sym := auxToSym(x.Aux)
13129 mem := x.Args[1]
13130 ptr := x.Args[0]
13131 if !(x.Uses == 1 && clobber(x)) {
13132 break
13133 }
13134 b = x.Block
13135 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
13136 v.copyOf(v0)
13137 v0.AuxInt = int32ToAuxInt(off)
13138 v0.Aux = symToAux(sym)
13139 v0.AddArg2(ptr, mem)
13140 return true
13141 }
13142
13143
13144
13145 for {
13146 x := v_0
13147 if x.Op != OpAMD64MOVQload {
13148 break
13149 }
13150 off := auxIntToInt32(x.AuxInt)
13151 sym := auxToSym(x.Aux)
13152 mem := x.Args[1]
13153 ptr := x.Args[0]
13154 if !(x.Uses == 1 && clobber(x)) {
13155 break
13156 }
13157 b = x.Block
13158 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
13159 v.copyOf(v0)
13160 v0.AuxInt = int32ToAuxInt(off)
13161 v0.Aux = symToAux(sym)
13162 v0.AddArg2(ptr, mem)
13163 return true
13164 }
13165
13166
13167 for {
13168 if v_0.Op != OpAMD64ANDLconst {
13169 break
13170 }
13171 c := auxIntToInt32(v_0.AuxInt)
13172 x := v_0.Args[0]
13173 v.reset(OpAMD64ANDLconst)
13174 v.AuxInt = int32ToAuxInt(c & 0xffff)
13175 v.AddArg(x)
13176 return true
13177 }
13178
13179
13180 for {
13181 if v_0.Op != OpAMD64MOVWQZX {
13182 break
13183 }
13184 x := v_0.Args[0]
13185 v.reset(OpAMD64MOVWQZX)
13186 v.AddArg(x)
13187 return true
13188 }
13189
13190
13191 for {
13192 if v_0.Op != OpAMD64MOVBQZX {
13193 break
13194 }
13195 x := v_0.Args[0]
13196 v.reset(OpAMD64MOVBQZX)
13197 v.AddArg(x)
13198 return true
13199 }
13200 return false
13201 }
13202 func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
13203 v_1 := v.Args[1]
13204 v_0 := v.Args[0]
13205 b := v.Block
13206 config := b.Func.Config
13207
13208
13209
13210 for {
13211 off := auxIntToInt32(v.AuxInt)
13212 sym := auxToSym(v.Aux)
13213 ptr := v_0
13214 if v_1.Op != OpAMD64MOVWstore {
13215 break
13216 }
13217 off2 := auxIntToInt32(v_1.AuxInt)
13218 sym2 := auxToSym(v_1.Aux)
13219 x := v_1.Args[1]
13220 ptr2 := v_1.Args[0]
13221 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
13222 break
13223 }
13224 v.reset(OpAMD64MOVWQZX)
13225 v.AddArg(x)
13226 return true
13227 }
13228
13229
13230
13231 for {
13232 off1 := auxIntToInt32(v.AuxInt)
13233 sym := auxToSym(v.Aux)
13234 if v_0.Op != OpAMD64ADDQconst {
13235 break
13236 }
13237 off2 := auxIntToInt32(v_0.AuxInt)
13238 ptr := v_0.Args[0]
13239 mem := v_1
13240 if !(is32Bit(int64(off1) + int64(off2))) {
13241 break
13242 }
13243 v.reset(OpAMD64MOVWload)
13244 v.AuxInt = int32ToAuxInt(off1 + off2)
13245 v.Aux = symToAux(sym)
13246 v.AddArg2(ptr, mem)
13247 return true
13248 }
13249
13250
13251
13252 for {
13253 off1 := auxIntToInt32(v.AuxInt)
13254 sym1 := auxToSym(v.Aux)
13255 if v_0.Op != OpAMD64LEAQ {
13256 break
13257 }
13258 off2 := auxIntToInt32(v_0.AuxInt)
13259 sym2 := auxToSym(v_0.Aux)
13260 base := v_0.Args[0]
13261 mem := v_1
13262 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13263 break
13264 }
13265 v.reset(OpAMD64MOVWload)
13266 v.AuxInt = int32ToAuxInt(off1 + off2)
13267 v.Aux = symToAux(mergeSym(sym1, sym2))
13268 v.AddArg2(base, mem)
13269 return true
13270 }
13271
13272
13273
13274 for {
13275 off := auxIntToInt32(v.AuxInt)
13276 sym := auxToSym(v.Aux)
13277 if v_0.Op != OpSB || !(symIsRO(sym)) {
13278 break
13279 }
13280 v.reset(OpAMD64MOVLconst)
13281 v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
13282 return true
13283 }
13284 return false
13285 }
13286 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
13287 v_2 := v.Args[2]
13288 v_1 := v.Args[1]
13289 v_0 := v.Args[0]
13290
13291
13292 for {
13293 off := auxIntToInt32(v.AuxInt)
13294 sym := auxToSym(v.Aux)
13295 ptr := v_0
13296 if v_1.Op != OpAMD64MOVWQSX {
13297 break
13298 }
13299 x := v_1.Args[0]
13300 mem := v_2
13301 v.reset(OpAMD64MOVWstore)
13302 v.AuxInt = int32ToAuxInt(off)
13303 v.Aux = symToAux(sym)
13304 v.AddArg3(ptr, x, mem)
13305 return true
13306 }
13307
13308
13309 for {
13310 off := auxIntToInt32(v.AuxInt)
13311 sym := auxToSym(v.Aux)
13312 ptr := v_0
13313 if v_1.Op != OpAMD64MOVWQZX {
13314 break
13315 }
13316 x := v_1.Args[0]
13317 mem := v_2
13318 v.reset(OpAMD64MOVWstore)
13319 v.AuxInt = int32ToAuxInt(off)
13320 v.Aux = symToAux(sym)
13321 v.AddArg3(ptr, x, mem)
13322 return true
13323 }
13324
13325
13326
13327 for {
13328 off1 := auxIntToInt32(v.AuxInt)
13329 sym := auxToSym(v.Aux)
13330 if v_0.Op != OpAMD64ADDQconst {
13331 break
13332 }
13333 off2 := auxIntToInt32(v_0.AuxInt)
13334 ptr := v_0.Args[0]
13335 val := v_1
13336 mem := v_2
13337 if !(is32Bit(int64(off1) + int64(off2))) {
13338 break
13339 }
13340 v.reset(OpAMD64MOVWstore)
13341 v.AuxInt = int32ToAuxInt(off1 + off2)
13342 v.Aux = symToAux(sym)
13343 v.AddArg3(ptr, val, mem)
13344 return true
13345 }
13346
13347
13348 for {
13349 off := auxIntToInt32(v.AuxInt)
13350 sym := auxToSym(v.Aux)
13351 ptr := v_0
13352 if v_1.Op != OpAMD64MOVLconst {
13353 break
13354 }
13355 c := auxIntToInt32(v_1.AuxInt)
13356 mem := v_2
13357 v.reset(OpAMD64MOVWstoreconst)
13358 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
13359 v.Aux = symToAux(sym)
13360 v.AddArg2(ptr, mem)
13361 return true
13362 }
13363
13364
13365 for {
13366 off := auxIntToInt32(v.AuxInt)
13367 sym := auxToSym(v.Aux)
13368 ptr := v_0
13369 if v_1.Op != OpAMD64MOVQconst {
13370 break
13371 }
13372 c := auxIntToInt64(v_1.AuxInt)
13373 mem := v_2
13374 v.reset(OpAMD64MOVWstoreconst)
13375 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
13376 v.Aux = symToAux(sym)
13377 v.AddArg2(ptr, mem)
13378 return true
13379 }
13380
13381
13382
13383 for {
13384 off1 := auxIntToInt32(v.AuxInt)
13385 sym1 := auxToSym(v.Aux)
13386 if v_0.Op != OpAMD64LEAQ {
13387 break
13388 }
13389 off2 := auxIntToInt32(v_0.AuxInt)
13390 sym2 := auxToSym(v_0.Aux)
13391 base := v_0.Args[0]
13392 val := v_1
13393 mem := v_2
13394 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13395 break
13396 }
13397 v.reset(OpAMD64MOVWstore)
13398 v.AuxInt = int32ToAuxInt(off1 + off2)
13399 v.Aux = symToAux(mergeSym(sym1, sym2))
13400 v.AddArg3(base, val, mem)
13401 return true
13402 }
13403
13404
13405
13406 for {
13407 i := auxIntToInt32(v.AuxInt)
13408 s := auxToSym(v.Aux)
13409 p := v_0
13410 x := v_1
13411 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
13412 break
13413 }
13414 w := x.Args[0]
13415 mem := v_2
13416 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
13417 break
13418 }
13419 v.reset(OpAMD64MOVBEWstore)
13420 v.AuxInt = int32ToAuxInt(i)
13421 v.Aux = symToAux(s)
13422 v.AddArg3(p, w, mem)
13423 return true
13424 }
13425 return false
13426 }
13427 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
13428 v_1 := v.Args[1]
13429 v_0 := v.Args[0]
13430
13431
13432
13433 for {
13434 sc := auxIntToValAndOff(v.AuxInt)
13435 s := auxToSym(v.Aux)
13436 if v_0.Op != OpAMD64ADDQconst {
13437 break
13438 }
13439 off := auxIntToInt32(v_0.AuxInt)
13440 ptr := v_0.Args[0]
13441 mem := v_1
13442 if !(ValAndOff(sc).canAdd32(off)) {
13443 break
13444 }
13445 v.reset(OpAMD64MOVWstoreconst)
13446 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13447 v.Aux = symToAux(s)
13448 v.AddArg2(ptr, mem)
13449 return true
13450 }
13451
13452
13453
13454 for {
13455 sc := auxIntToValAndOff(v.AuxInt)
13456 sym1 := auxToSym(v.Aux)
13457 if v_0.Op != OpAMD64LEAQ {
13458 break
13459 }
13460 off := auxIntToInt32(v_0.AuxInt)
13461 sym2 := auxToSym(v_0.Aux)
13462 ptr := v_0.Args[0]
13463 mem := v_1
13464 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
13465 break
13466 }
13467 v.reset(OpAMD64MOVWstoreconst)
13468 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13469 v.Aux = symToAux(mergeSym(sym1, sym2))
13470 v.AddArg2(ptr, mem)
13471 return true
13472 }
13473 return false
13474 }
13475 func rewriteValueAMD64_OpAMD64MULL(v *Value) bool {
13476 v_1 := v.Args[1]
13477 v_0 := v.Args[0]
13478
13479
13480 for {
13481 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13482 x := v_0
13483 if v_1.Op != OpAMD64MOVLconst {
13484 continue
13485 }
13486 c := auxIntToInt32(v_1.AuxInt)
13487 v.reset(OpAMD64MULLconst)
13488 v.AuxInt = int32ToAuxInt(c)
13489 v.AddArg(x)
13490 return true
13491 }
13492 break
13493 }
13494 return false
13495 }
13496 func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
13497 v_0 := v.Args[0]
13498 b := v.Block
13499
13500
13501 for {
13502 c := auxIntToInt32(v.AuxInt)
13503 if v_0.Op != OpAMD64MULLconst {
13504 break
13505 }
13506 d := auxIntToInt32(v_0.AuxInt)
13507 x := v_0.Args[0]
13508 v.reset(OpAMD64MULLconst)
13509 v.AuxInt = int32ToAuxInt(c * d)
13510 v.AddArg(x)
13511 return true
13512 }
13513
13514
13515 for {
13516 if auxIntToInt32(v.AuxInt) != -9 {
13517 break
13518 }
13519 x := v_0
13520 v.reset(OpAMD64NEGL)
13521 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13522 v0.AddArg2(x, x)
13523 v.AddArg(v0)
13524 return true
13525 }
13526
13527
13528 for {
13529 if auxIntToInt32(v.AuxInt) != -5 {
13530 break
13531 }
13532 x := v_0
13533 v.reset(OpAMD64NEGL)
13534 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13535 v0.AddArg2(x, x)
13536 v.AddArg(v0)
13537 return true
13538 }
13539
13540
13541 for {
13542 if auxIntToInt32(v.AuxInt) != -3 {
13543 break
13544 }
13545 x := v_0
13546 v.reset(OpAMD64NEGL)
13547 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13548 v0.AddArg2(x, x)
13549 v.AddArg(v0)
13550 return true
13551 }
13552
13553
13554 for {
13555 if auxIntToInt32(v.AuxInt) != -1 {
13556 break
13557 }
13558 x := v_0
13559 v.reset(OpAMD64NEGL)
13560 v.AddArg(x)
13561 return true
13562 }
13563
13564
13565 for {
13566 if auxIntToInt32(v.AuxInt) != 0 {
13567 break
13568 }
13569 v.reset(OpAMD64MOVLconst)
13570 v.AuxInt = int32ToAuxInt(0)
13571 return true
13572 }
13573
13574
13575 for {
13576 if auxIntToInt32(v.AuxInt) != 1 {
13577 break
13578 }
13579 x := v_0
13580 v.copyOf(x)
13581 return true
13582 }
13583
13584
13585 for {
13586 if auxIntToInt32(v.AuxInt) != 3 {
13587 break
13588 }
13589 x := v_0
13590 v.reset(OpAMD64LEAL2)
13591 v.AddArg2(x, x)
13592 return true
13593 }
13594
13595
13596 for {
13597 if auxIntToInt32(v.AuxInt) != 5 {
13598 break
13599 }
13600 x := v_0
13601 v.reset(OpAMD64LEAL4)
13602 v.AddArg2(x, x)
13603 return true
13604 }
13605
13606
13607 for {
13608 if auxIntToInt32(v.AuxInt) != 7 {
13609 break
13610 }
13611 x := v_0
13612 v.reset(OpAMD64LEAL2)
13613 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13614 v0.AddArg2(x, x)
13615 v.AddArg2(x, v0)
13616 return true
13617 }
13618
13619
13620 for {
13621 if auxIntToInt32(v.AuxInt) != 9 {
13622 break
13623 }
13624 x := v_0
13625 v.reset(OpAMD64LEAL8)
13626 v.AddArg2(x, x)
13627 return true
13628 }
13629
13630
13631 for {
13632 if auxIntToInt32(v.AuxInt) != 11 {
13633 break
13634 }
13635 x := v_0
13636 v.reset(OpAMD64LEAL2)
13637 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13638 v0.AddArg2(x, x)
13639 v.AddArg2(x, v0)
13640 return true
13641 }
13642
13643
13644 for {
13645 if auxIntToInt32(v.AuxInt) != 13 {
13646 break
13647 }
13648 x := v_0
13649 v.reset(OpAMD64LEAL4)
13650 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13651 v0.AddArg2(x, x)
13652 v.AddArg2(x, v0)
13653 return true
13654 }
13655
13656
13657 for {
13658 if auxIntToInt32(v.AuxInt) != 19 {
13659 break
13660 }
13661 x := v_0
13662 v.reset(OpAMD64LEAL2)
13663 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13664 v0.AddArg2(x, x)
13665 v.AddArg2(x, v0)
13666 return true
13667 }
13668
13669
13670 for {
13671 if auxIntToInt32(v.AuxInt) != 21 {
13672 break
13673 }
13674 x := v_0
13675 v.reset(OpAMD64LEAL4)
13676 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13677 v0.AddArg2(x, x)
13678 v.AddArg2(x, v0)
13679 return true
13680 }
13681
13682
13683 for {
13684 if auxIntToInt32(v.AuxInt) != 25 {
13685 break
13686 }
13687 x := v_0
13688 v.reset(OpAMD64LEAL8)
13689 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13690 v0.AddArg2(x, x)
13691 v.AddArg2(x, v0)
13692 return true
13693 }
13694
13695
13696 for {
13697 if auxIntToInt32(v.AuxInt) != 27 {
13698 break
13699 }
13700 x := v_0
13701 v.reset(OpAMD64LEAL8)
13702 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13703 v0.AddArg2(x, x)
13704 v.AddArg2(v0, v0)
13705 return true
13706 }
13707
13708
13709 for {
13710 if auxIntToInt32(v.AuxInt) != 37 {
13711 break
13712 }
13713 x := v_0
13714 v.reset(OpAMD64LEAL4)
13715 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13716 v0.AddArg2(x, x)
13717 v.AddArg2(x, v0)
13718 return true
13719 }
13720
13721
13722 for {
13723 if auxIntToInt32(v.AuxInt) != 41 {
13724 break
13725 }
13726 x := v_0
13727 v.reset(OpAMD64LEAL8)
13728 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13729 v0.AddArg2(x, x)
13730 v.AddArg2(x, v0)
13731 return true
13732 }
13733
13734
13735 for {
13736 if auxIntToInt32(v.AuxInt) != 45 {
13737 break
13738 }
13739 x := v_0
13740 v.reset(OpAMD64LEAL8)
13741 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13742 v0.AddArg2(x, x)
13743 v.AddArg2(v0, v0)
13744 return true
13745 }
13746
13747
13748 for {
13749 if auxIntToInt32(v.AuxInt) != 73 {
13750 break
13751 }
13752 x := v_0
13753 v.reset(OpAMD64LEAL8)
13754 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13755 v0.AddArg2(x, x)
13756 v.AddArg2(x, v0)
13757 return true
13758 }
13759
13760
13761 for {
13762 if auxIntToInt32(v.AuxInt) != 81 {
13763 break
13764 }
13765 x := v_0
13766 v.reset(OpAMD64LEAL8)
13767 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13768 v0.AddArg2(x, x)
13769 v.AddArg2(v0, v0)
13770 return true
13771 }
13772
13773
13774
13775 for {
13776 c := auxIntToInt32(v.AuxInt)
13777 x := v_0
13778 if !(isPowerOfTwo(int64(c)+1) && c >= 15) {
13779 break
13780 }
13781 v.reset(OpAMD64SUBL)
13782 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13783 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
13784 v0.AddArg(x)
13785 v.AddArg2(v0, x)
13786 return true
13787 }
13788
13789
13790
13791 for {
13792 c := auxIntToInt32(v.AuxInt)
13793 x := v_0
13794 if !(isPowerOfTwo(c-1) && c >= 17) {
13795 break
13796 }
13797 v.reset(OpAMD64LEAL1)
13798 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13799 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
13800 v0.AddArg(x)
13801 v.AddArg2(v0, x)
13802 return true
13803 }
13804
13805
13806
13807 for {
13808 c := auxIntToInt32(v.AuxInt)
13809 x := v_0
13810 if !(isPowerOfTwo(c-2) && c >= 34) {
13811 break
13812 }
13813 v.reset(OpAMD64LEAL2)
13814 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13815 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
13816 v0.AddArg(x)
13817 v.AddArg2(v0, x)
13818 return true
13819 }
13820
13821
13822
13823 for {
13824 c := auxIntToInt32(v.AuxInt)
13825 x := v_0
13826 if !(isPowerOfTwo(c-4) && c >= 68) {
13827 break
13828 }
13829 v.reset(OpAMD64LEAL4)
13830 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13831 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
13832 v0.AddArg(x)
13833 v.AddArg2(v0, x)
13834 return true
13835 }
13836
13837
13838
13839 for {
13840 c := auxIntToInt32(v.AuxInt)
13841 x := v_0
13842 if !(isPowerOfTwo(c-8) && c >= 136) {
13843 break
13844 }
13845 v.reset(OpAMD64LEAL8)
13846 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
13847 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
13848 v0.AddArg(x)
13849 v.AddArg2(v0, x)
13850 return true
13851 }
13852
13853
13854
13855 for {
13856 c := auxIntToInt32(v.AuxInt)
13857 x := v_0
13858 if !(c%3 == 0 && isPowerOfTwo(c/3)) {
13859 break
13860 }
13861 v.reset(OpAMD64SHLLconst)
13862 v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
13863 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
13864 v0.AddArg2(x, x)
13865 v.AddArg(v0)
13866 return true
13867 }
13868
13869
13870
13871 for {
13872 c := auxIntToInt32(v.AuxInt)
13873 x := v_0
13874 if !(c%5 == 0 && isPowerOfTwo(c/5)) {
13875 break
13876 }
13877 v.reset(OpAMD64SHLLconst)
13878 v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
13879 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
13880 v0.AddArg2(x, x)
13881 v.AddArg(v0)
13882 return true
13883 }
13884
13885
13886
13887 for {
13888 c := auxIntToInt32(v.AuxInt)
13889 x := v_0
13890 if !(c%9 == 0 && isPowerOfTwo(c/9)) {
13891 break
13892 }
13893 v.reset(OpAMD64SHLLconst)
13894 v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
13895 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
13896 v0.AddArg2(x, x)
13897 v.AddArg(v0)
13898 return true
13899 }
13900
13901
13902 for {
13903 c := auxIntToInt32(v.AuxInt)
13904 if v_0.Op != OpAMD64MOVLconst {
13905 break
13906 }
13907 d := auxIntToInt32(v_0.AuxInt)
13908 v.reset(OpAMD64MOVLconst)
13909 v.AuxInt = int32ToAuxInt(c * d)
13910 return true
13911 }
13912 return false
13913 }
13914 func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool {
13915 v_1 := v.Args[1]
13916 v_0 := v.Args[0]
13917
13918
13919
13920 for {
13921 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13922 x := v_0
13923 if v_1.Op != OpAMD64MOVQconst {
13924 continue
13925 }
13926 c := auxIntToInt64(v_1.AuxInt)
13927 if !(is32Bit(c)) {
13928 continue
13929 }
13930 v.reset(OpAMD64MULQconst)
13931 v.AuxInt = int32ToAuxInt(int32(c))
13932 v.AddArg(x)
13933 return true
13934 }
13935 break
13936 }
13937 return false
13938 }
13939 func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
13940 v_0 := v.Args[0]
13941 b := v.Block
13942
13943
13944
13945 for {
13946 c := auxIntToInt32(v.AuxInt)
13947 if v_0.Op != OpAMD64MULQconst {
13948 break
13949 }
13950 d := auxIntToInt32(v_0.AuxInt)
13951 x := v_0.Args[0]
13952 if !(is32Bit(int64(c) * int64(d))) {
13953 break
13954 }
13955 v.reset(OpAMD64MULQconst)
13956 v.AuxInt = int32ToAuxInt(c * d)
13957 v.AddArg(x)
13958 return true
13959 }
13960
13961
13962 for {
13963 if auxIntToInt32(v.AuxInt) != -9 {
13964 break
13965 }
13966 x := v_0
13967 v.reset(OpAMD64NEGQ)
13968 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
13969 v0.AddArg2(x, x)
13970 v.AddArg(v0)
13971 return true
13972 }
13973
13974
13975 for {
13976 if auxIntToInt32(v.AuxInt) != -5 {
13977 break
13978 }
13979 x := v_0
13980 v.reset(OpAMD64NEGQ)
13981 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
13982 v0.AddArg2(x, x)
13983 v.AddArg(v0)
13984 return true
13985 }
13986
13987
13988 for {
13989 if auxIntToInt32(v.AuxInt) != -3 {
13990 break
13991 }
13992 x := v_0
13993 v.reset(OpAMD64NEGQ)
13994 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
13995 v0.AddArg2(x, x)
13996 v.AddArg(v0)
13997 return true
13998 }
13999
14000
14001 for {
14002 if auxIntToInt32(v.AuxInt) != -1 {
14003 break
14004 }
14005 x := v_0
14006 v.reset(OpAMD64NEGQ)
14007 v.AddArg(x)
14008 return true
14009 }
14010
14011
14012 for {
14013 if auxIntToInt32(v.AuxInt) != 0 {
14014 break
14015 }
14016 v.reset(OpAMD64MOVQconst)
14017 v.AuxInt = int64ToAuxInt(0)
14018 return true
14019 }
14020
14021
14022 for {
14023 if auxIntToInt32(v.AuxInt) != 1 {
14024 break
14025 }
14026 x := v_0
14027 v.copyOf(x)
14028 return true
14029 }
14030
14031
14032 for {
14033 if auxIntToInt32(v.AuxInt) != 3 {
14034 break
14035 }
14036 x := v_0
14037 v.reset(OpAMD64LEAQ2)
14038 v.AddArg2(x, x)
14039 return true
14040 }
14041
14042
14043 for {
14044 if auxIntToInt32(v.AuxInt) != 5 {
14045 break
14046 }
14047 x := v_0
14048 v.reset(OpAMD64LEAQ4)
14049 v.AddArg2(x, x)
14050 return true
14051 }
14052
14053
14054 for {
14055 if auxIntToInt32(v.AuxInt) != 7 {
14056 break
14057 }
14058 x := v_0
14059 v.reset(OpAMD64LEAQ2)
14060 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
14061 v0.AddArg2(x, x)
14062 v.AddArg2(x, v0)
14063 return true
14064 }
14065
14066
14067 for {
14068 if auxIntToInt32(v.AuxInt) != 9 {
14069 break
14070 }
14071 x := v_0
14072 v.reset(OpAMD64LEAQ8)
14073 v.AddArg2(x, x)
14074 return true
14075 }
14076
14077
14078 for {
14079 if auxIntToInt32(v.AuxInt) != 11 {
14080 break
14081 }
14082 x := v_0
14083 v.reset(OpAMD64LEAQ2)
14084 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
14085 v0.AddArg2(x, x)
14086 v.AddArg2(x, v0)
14087 return true
14088 }
14089
14090
14091 for {
14092 if auxIntToInt32(v.AuxInt) != 13 {
14093 break
14094 }
14095 x := v_0
14096 v.reset(OpAMD64LEAQ4)
14097 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
14098 v0.AddArg2(x, x)
14099 v.AddArg2(x, v0)
14100 return true
14101 }
14102
14103
14104 for {
14105 if auxIntToInt32(v.AuxInt) != 19 {
14106 break
14107 }
14108 x := v_0
14109 v.reset(OpAMD64LEAQ2)
14110 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
14111 v0.AddArg2(x, x)
14112 v.AddArg2(x, v0)
14113 return true
14114 }
14115
14116
14117 for {
14118 if auxIntToInt32(v.AuxInt) != 21 {
14119 break
14120 }
14121 x := v_0
14122 v.reset(OpAMD64LEAQ4)
14123 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
14124 v0.AddArg2(x, x)
14125 v.AddArg2(x, v0)
14126 return true
14127 }
14128
14129
14130 for {
14131 if auxIntToInt32(v.AuxInt) != 25 {
14132 break
14133 }
14134 x := v_0
14135 v.reset(OpAMD64LEAQ8)
14136 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
14137 v0.AddArg2(x, x)
14138 v.AddArg2(x, v0)
14139 return true
14140 }
14141
14142
14143 for {
14144 if auxIntToInt32(v.AuxInt) != 27 {
14145 break
14146 }
14147 x := v_0
14148 v.reset(OpAMD64LEAQ8)
14149 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
14150 v0.AddArg2(x, x)
14151 v.AddArg2(v0, v0)
14152 return true
14153 }
14154
14155
14156 for {
14157 if auxIntToInt32(v.AuxInt) != 37 {
14158 break
14159 }
14160 x := v_0
14161 v.reset(OpAMD64LEAQ4)
14162 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
14163 v0.AddArg2(x, x)
14164 v.AddArg2(x, v0)
14165 return true
14166 }
14167
14168
14169 for {
14170 if auxIntToInt32(v.AuxInt) != 41 {
14171 break
14172 }
14173 x := v_0
14174 v.reset(OpAMD64LEAQ8)
14175 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
14176 v0.AddArg2(x, x)
14177 v.AddArg2(x, v0)
14178 return true
14179 }
14180
14181
14182 for {
14183 if auxIntToInt32(v.AuxInt) != 45 {
14184 break
14185 }
14186 x := v_0
14187 v.reset(OpAMD64LEAQ8)
14188 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
14189 v0.AddArg2(x, x)
14190 v.AddArg2(v0, v0)
14191 return true
14192 }
14193
14194
14195 for {
14196 if auxIntToInt32(v.AuxInt) != 73 {
14197 break
14198 }
14199 x := v_0
14200 v.reset(OpAMD64LEAQ8)
14201 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
14202 v0.AddArg2(x, x)
14203 v.AddArg2(x, v0)
14204 return true
14205 }
14206
14207
14208 for {
14209 if auxIntToInt32(v.AuxInt) != 81 {
14210 break
14211 }
14212 x := v_0
14213 v.reset(OpAMD64LEAQ8)
14214 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
14215 v0.AddArg2(x, x)
14216 v.AddArg2(v0, v0)
14217 return true
14218 }
14219
14220
14221
14222 for {
14223 c := auxIntToInt32(v.AuxInt)
14224 x := v_0
14225 if !(isPowerOfTwo(int64(c)+1) && c >= 15) {
14226 break
14227 }
14228 v.reset(OpAMD64SUBQ)
14229 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
14230 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
14231 v0.AddArg(x)
14232 v.AddArg2(v0, x)
14233 return true
14234 }
14235
14236
14237
14238 for {
14239 c := auxIntToInt32(v.AuxInt)
14240 x := v_0
14241 if !(isPowerOfTwo(c-1) && c >= 17) {
14242 break
14243 }
14244 v.reset(OpAMD64LEAQ1)
14245 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
14246 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
14247 v0.AddArg(x)
14248 v.AddArg2(v0, x)
14249 return true
14250 }
14251
14252
14253
14254 for {
14255 c := auxIntToInt32(v.AuxInt)
14256 x := v_0
14257 if !(isPowerOfTwo(c-2) && c >= 34) {
14258 break
14259 }
14260 v.reset(OpAMD64LEAQ2)
14261 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
14262 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
14263 v0.AddArg(x)
14264 v.AddArg2(v0, x)
14265 return true
14266 }
14267
14268
14269
14270 for {
14271 c := auxIntToInt32(v.AuxInt)
14272 x := v_0
14273 if !(isPowerOfTwo(c-4) && c >= 68) {
14274 break
14275 }
14276 v.reset(OpAMD64LEAQ4)
14277 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
14278 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
14279 v0.AddArg(x)
14280 v.AddArg2(v0, x)
14281 return true
14282 }
14283
14284
14285
14286 for {
14287 c := auxIntToInt32(v.AuxInt)
14288 x := v_0
14289 if !(isPowerOfTwo(c-8) && c >= 136) {
14290 break
14291 }
14292 v.reset(OpAMD64LEAQ8)
14293 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
14294 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
14295 v0.AddArg(x)
14296 v.AddArg2(v0, x)
14297 return true
14298 }
14299
14300
14301
14302 for {
14303 c := auxIntToInt32(v.AuxInt)
14304 x := v_0
14305 if !(c%3 == 0 && isPowerOfTwo(c/3)) {
14306 break
14307 }
14308 v.reset(OpAMD64SHLQconst)
14309 v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
14310 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
14311 v0.AddArg2(x, x)
14312 v.AddArg(v0)
14313 return true
14314 }
14315
14316
14317
14318 for {
14319 c := auxIntToInt32(v.AuxInt)
14320 x := v_0
14321 if !(c%5 == 0 && isPowerOfTwo(c/5)) {
14322 break
14323 }
14324 v.reset(OpAMD64SHLQconst)
14325 v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
14326 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
14327 v0.AddArg2(x, x)
14328 v.AddArg(v0)
14329 return true
14330 }
14331
14332
14333
14334 for {
14335 c := auxIntToInt32(v.AuxInt)
14336 x := v_0
14337 if !(c%9 == 0 && isPowerOfTwo(c/9)) {
14338 break
14339 }
14340 v.reset(OpAMD64SHLQconst)
14341 v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
14342 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
14343 v0.AddArg2(x, x)
14344 v.AddArg(v0)
14345 return true
14346 }
14347
14348
14349 for {
14350 c := auxIntToInt32(v.AuxInt)
14351 if v_0.Op != OpAMD64MOVQconst {
14352 break
14353 }
14354 d := auxIntToInt64(v_0.AuxInt)
14355 v.reset(OpAMD64MOVQconst)
14356 v.AuxInt = int64ToAuxInt(int64(c) * d)
14357 return true
14358 }
14359
14360
14361
14362 for {
14363 c := auxIntToInt32(v.AuxInt)
14364 if v_0.Op != OpAMD64NEGQ {
14365 break
14366 }
14367 x := v_0.Args[0]
14368 if !(c != -(1 << 31)) {
14369 break
14370 }
14371 v.reset(OpAMD64MULQconst)
14372 v.AuxInt = int32ToAuxInt(-c)
14373 v.AddArg(x)
14374 return true
14375 }
14376 return false
14377 }
14378 func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
14379 v_1 := v.Args[1]
14380 v_0 := v.Args[0]
14381
14382
14383
14384 for {
14385 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14386 x := v_0
14387 l := v_1
14388 if l.Op != OpAMD64MOVSDload {
14389 continue
14390 }
14391 off := auxIntToInt32(l.AuxInt)
14392 sym := auxToSym(l.Aux)
14393 mem := l.Args[1]
14394 ptr := l.Args[0]
14395 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14396 continue
14397 }
14398 v.reset(OpAMD64MULSDload)
14399 v.AuxInt = int32ToAuxInt(off)
14400 v.Aux = symToAux(sym)
14401 v.AddArg3(x, ptr, mem)
14402 return true
14403 }
14404 break
14405 }
14406 return false
14407 }
14408 func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
14409 v_2 := v.Args[2]
14410 v_1 := v.Args[1]
14411 v_0 := v.Args[0]
14412 b := v.Block
14413 typ := &b.Func.Config.Types
14414
14415
14416
14417 for {
14418 off1 := auxIntToInt32(v.AuxInt)
14419 sym := auxToSym(v.Aux)
14420 val := v_0
14421 if v_1.Op != OpAMD64ADDQconst {
14422 break
14423 }
14424 off2 := auxIntToInt32(v_1.AuxInt)
14425 base := v_1.Args[0]
14426 mem := v_2
14427 if !(is32Bit(int64(off1) + int64(off2))) {
14428 break
14429 }
14430 v.reset(OpAMD64MULSDload)
14431 v.AuxInt = int32ToAuxInt(off1 + off2)
14432 v.Aux = symToAux(sym)
14433 v.AddArg3(val, base, mem)
14434 return true
14435 }
14436
14437
14438
14439 for {
14440 off1 := auxIntToInt32(v.AuxInt)
14441 sym1 := auxToSym(v.Aux)
14442 val := v_0
14443 if v_1.Op != OpAMD64LEAQ {
14444 break
14445 }
14446 off2 := auxIntToInt32(v_1.AuxInt)
14447 sym2 := auxToSym(v_1.Aux)
14448 base := v_1.Args[0]
14449 mem := v_2
14450 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14451 break
14452 }
14453 v.reset(OpAMD64MULSDload)
14454 v.AuxInt = int32ToAuxInt(off1 + off2)
14455 v.Aux = symToAux(mergeSym(sym1, sym2))
14456 v.AddArg3(val, base, mem)
14457 return true
14458 }
14459
14460
14461 for {
14462 off := auxIntToInt32(v.AuxInt)
14463 sym := auxToSym(v.Aux)
14464 x := v_0
14465 ptr := v_1
14466 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14467 break
14468 }
14469 y := v_2.Args[1]
14470 if ptr != v_2.Args[0] {
14471 break
14472 }
14473 v.reset(OpAMD64MULSD)
14474 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
14475 v0.AddArg(y)
14476 v.AddArg2(x, v0)
14477 return true
14478 }
14479 return false
14480 }
14481 func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
14482 v_1 := v.Args[1]
14483 v_0 := v.Args[0]
14484
14485
14486
14487 for {
14488 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14489 x := v_0
14490 l := v_1
14491 if l.Op != OpAMD64MOVSSload {
14492 continue
14493 }
14494 off := auxIntToInt32(l.AuxInt)
14495 sym := auxToSym(l.Aux)
14496 mem := l.Args[1]
14497 ptr := l.Args[0]
14498 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14499 continue
14500 }
14501 v.reset(OpAMD64MULSSload)
14502 v.AuxInt = int32ToAuxInt(off)
14503 v.Aux = symToAux(sym)
14504 v.AddArg3(x, ptr, mem)
14505 return true
14506 }
14507 break
14508 }
14509 return false
14510 }
14511 func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
14512 v_2 := v.Args[2]
14513 v_1 := v.Args[1]
14514 v_0 := v.Args[0]
14515 b := v.Block
14516 typ := &b.Func.Config.Types
14517
14518
14519
14520 for {
14521 off1 := auxIntToInt32(v.AuxInt)
14522 sym := auxToSym(v.Aux)
14523 val := v_0
14524 if v_1.Op != OpAMD64ADDQconst {
14525 break
14526 }
14527 off2 := auxIntToInt32(v_1.AuxInt)
14528 base := v_1.Args[0]
14529 mem := v_2
14530 if !(is32Bit(int64(off1) + int64(off2))) {
14531 break
14532 }
14533 v.reset(OpAMD64MULSSload)
14534 v.AuxInt = int32ToAuxInt(off1 + off2)
14535 v.Aux = symToAux(sym)
14536 v.AddArg3(val, base, mem)
14537 return true
14538 }
14539
14540
14541
14542 for {
14543 off1 := auxIntToInt32(v.AuxInt)
14544 sym1 := auxToSym(v.Aux)
14545 val := v_0
14546 if v_1.Op != OpAMD64LEAQ {
14547 break
14548 }
14549 off2 := auxIntToInt32(v_1.AuxInt)
14550 sym2 := auxToSym(v_1.Aux)
14551 base := v_1.Args[0]
14552 mem := v_2
14553 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14554 break
14555 }
14556 v.reset(OpAMD64MULSSload)
14557 v.AuxInt = int32ToAuxInt(off1 + off2)
14558 v.Aux = symToAux(mergeSym(sym1, sym2))
14559 v.AddArg3(val, base, mem)
14560 return true
14561 }
14562
14563
14564 for {
14565 off := auxIntToInt32(v.AuxInt)
14566 sym := auxToSym(v.Aux)
14567 x := v_0
14568 ptr := v_1
14569 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14570 break
14571 }
14572 y := v_2.Args[1]
14573 if ptr != v_2.Args[0] {
14574 break
14575 }
14576 v.reset(OpAMD64MULSS)
14577 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
14578 v0.AddArg(y)
14579 v.AddArg2(x, v0)
14580 return true
14581 }
14582 return false
14583 }
14584 func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
14585 v_0 := v.Args[0]
14586
14587
14588 for {
14589 if v_0.Op != OpAMD64NEGL {
14590 break
14591 }
14592 x := v_0.Args[0]
14593 v.copyOf(x)
14594 return true
14595 }
14596
14597
14598
14599 for {
14600 s := v_0
14601 if s.Op != OpAMD64SUBL {
14602 break
14603 }
14604 y := s.Args[1]
14605 x := s.Args[0]
14606 if !(s.Uses == 1) {
14607 break
14608 }
14609 v.reset(OpAMD64SUBL)
14610 v.AddArg2(y, x)
14611 return true
14612 }
14613
14614
14615 for {
14616 if v_0.Op != OpAMD64MOVLconst {
14617 break
14618 }
14619 c := auxIntToInt32(v_0.AuxInt)
14620 v.reset(OpAMD64MOVLconst)
14621 v.AuxInt = int32ToAuxInt(-c)
14622 return true
14623 }
14624 return false
14625 }
14626 func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
14627 v_0 := v.Args[0]
14628
14629
14630 for {
14631 if v_0.Op != OpAMD64NEGQ {
14632 break
14633 }
14634 x := v_0.Args[0]
14635 v.copyOf(x)
14636 return true
14637 }
14638
14639
14640
14641 for {
14642 s := v_0
14643 if s.Op != OpAMD64SUBQ {
14644 break
14645 }
14646 y := s.Args[1]
14647 x := s.Args[0]
14648 if !(s.Uses == 1) {
14649 break
14650 }
14651 v.reset(OpAMD64SUBQ)
14652 v.AddArg2(y, x)
14653 return true
14654 }
14655
14656
14657 for {
14658 if v_0.Op != OpAMD64MOVQconst {
14659 break
14660 }
14661 c := auxIntToInt64(v_0.AuxInt)
14662 v.reset(OpAMD64MOVQconst)
14663 v.AuxInt = int64ToAuxInt(-c)
14664 return true
14665 }
14666
14667
14668
14669 for {
14670 if v_0.Op != OpAMD64ADDQconst {
14671 break
14672 }
14673 c := auxIntToInt32(v_0.AuxInt)
14674 v_0_0 := v_0.Args[0]
14675 if v_0_0.Op != OpAMD64NEGQ {
14676 break
14677 }
14678 x := v_0_0.Args[0]
14679 if !(c != -(1 << 31)) {
14680 break
14681 }
14682 v.reset(OpAMD64ADDQconst)
14683 v.AuxInt = int32ToAuxInt(-c)
14684 v.AddArg(x)
14685 return true
14686 }
14687 return false
14688 }
14689 func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool {
14690 v_0 := v.Args[0]
14691
14692
14693 for {
14694 if v_0.Op != OpAMD64MOVLconst {
14695 break
14696 }
14697 c := auxIntToInt32(v_0.AuxInt)
14698 v.reset(OpAMD64MOVLconst)
14699 v.AuxInt = int32ToAuxInt(^c)
14700 return true
14701 }
14702 return false
14703 }
14704 func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
14705 v_0 := v.Args[0]
14706
14707
14708 for {
14709 if v_0.Op != OpAMD64MOVQconst {
14710 break
14711 }
14712 c := auxIntToInt64(v_0.AuxInt)
14713 v.reset(OpAMD64MOVQconst)
14714 v.AuxInt = int64ToAuxInt(^c)
14715 return true
14716 }
14717 return false
14718 }
14719 func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
14720 v_1 := v.Args[1]
14721 v_0 := v.Args[0]
14722
14723
14724 for {
14725 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14726 if v_0.Op != OpAMD64SHLL {
14727 continue
14728 }
14729 y := v_0.Args[1]
14730 v_0_0 := v_0.Args[0]
14731 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
14732 continue
14733 }
14734 x := v_1
14735 v.reset(OpAMD64BTSL)
14736 v.AddArg2(x, y)
14737 return true
14738 }
14739 break
14740 }
14741
14742
14743 for {
14744 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14745 x := v_0
14746 if v_1.Op != OpAMD64MOVLconst {
14747 continue
14748 }
14749 c := auxIntToInt32(v_1.AuxInt)
14750 v.reset(OpAMD64ORLconst)
14751 v.AuxInt = int32ToAuxInt(c)
14752 v.AddArg(x)
14753 return true
14754 }
14755 break
14756 }
14757
14758
14759 for {
14760 x := v_0
14761 if x != v_1 {
14762 break
14763 }
14764 v.copyOf(x)
14765 return true
14766 }
14767
14768
14769
14770 for {
14771 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14772 x := v_0
14773 l := v_1
14774 if l.Op != OpAMD64MOVLload {
14775 continue
14776 }
14777 off := auxIntToInt32(l.AuxInt)
14778 sym := auxToSym(l.Aux)
14779 mem := l.Args[1]
14780 ptr := l.Args[0]
14781 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14782 continue
14783 }
14784 v.reset(OpAMD64ORLload)
14785 v.AuxInt = int32ToAuxInt(off)
14786 v.Aux = symToAux(sym)
14787 v.AddArg3(x, ptr, mem)
14788 return true
14789 }
14790 break
14791 }
14792 return false
14793 }
14794 func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
14795 v_0 := v.Args[0]
14796
14797
14798 for {
14799 c := auxIntToInt32(v.AuxInt)
14800 if v_0.Op != OpAMD64ORLconst {
14801 break
14802 }
14803 d := auxIntToInt32(v_0.AuxInt)
14804 x := v_0.Args[0]
14805 v.reset(OpAMD64ORLconst)
14806 v.AuxInt = int32ToAuxInt(c | d)
14807 v.AddArg(x)
14808 return true
14809 }
14810
14811
14812
14813 for {
14814 c := auxIntToInt32(v.AuxInt)
14815 x := v_0
14816 if !(c == 0) {
14817 break
14818 }
14819 v.copyOf(x)
14820 return true
14821 }
14822
14823
14824
14825 for {
14826 c := auxIntToInt32(v.AuxInt)
14827 if !(c == -1) {
14828 break
14829 }
14830 v.reset(OpAMD64MOVLconst)
14831 v.AuxInt = int32ToAuxInt(-1)
14832 return true
14833 }
14834
14835
14836 for {
14837 c := auxIntToInt32(v.AuxInt)
14838 if v_0.Op != OpAMD64MOVLconst {
14839 break
14840 }
14841 d := auxIntToInt32(v_0.AuxInt)
14842 v.reset(OpAMD64MOVLconst)
14843 v.AuxInt = int32ToAuxInt(c | d)
14844 return true
14845 }
14846 return false
14847 }
14848 func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
14849 v_1 := v.Args[1]
14850 v_0 := v.Args[0]
14851
14852
14853
14854 for {
14855 valoff1 := auxIntToValAndOff(v.AuxInt)
14856 sym := auxToSym(v.Aux)
14857 if v_0.Op != OpAMD64ADDQconst {
14858 break
14859 }
14860 off2 := auxIntToInt32(v_0.AuxInt)
14861 base := v_0.Args[0]
14862 mem := v_1
14863 if !(ValAndOff(valoff1).canAdd32(off2)) {
14864 break
14865 }
14866 v.reset(OpAMD64ORLconstmodify)
14867 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14868 v.Aux = symToAux(sym)
14869 v.AddArg2(base, mem)
14870 return true
14871 }
14872
14873
14874
14875 for {
14876 valoff1 := auxIntToValAndOff(v.AuxInt)
14877 sym1 := auxToSym(v.Aux)
14878 if v_0.Op != OpAMD64LEAQ {
14879 break
14880 }
14881 off2 := auxIntToInt32(v_0.AuxInt)
14882 sym2 := auxToSym(v_0.Aux)
14883 base := v_0.Args[0]
14884 mem := v_1
14885 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
14886 break
14887 }
14888 v.reset(OpAMD64ORLconstmodify)
14889 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14890 v.Aux = symToAux(mergeSym(sym1, sym2))
14891 v.AddArg2(base, mem)
14892 return true
14893 }
14894 return false
14895 }
14896 func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
14897 v_2 := v.Args[2]
14898 v_1 := v.Args[1]
14899 v_0 := v.Args[0]
14900 b := v.Block
14901 typ := &b.Func.Config.Types
14902
14903
14904
14905 for {
14906 off1 := auxIntToInt32(v.AuxInt)
14907 sym := auxToSym(v.Aux)
14908 val := v_0
14909 if v_1.Op != OpAMD64ADDQconst {
14910 break
14911 }
14912 off2 := auxIntToInt32(v_1.AuxInt)
14913 base := v_1.Args[0]
14914 mem := v_2
14915 if !(is32Bit(int64(off1) + int64(off2))) {
14916 break
14917 }
14918 v.reset(OpAMD64ORLload)
14919 v.AuxInt = int32ToAuxInt(off1 + off2)
14920 v.Aux = symToAux(sym)
14921 v.AddArg3(val, base, mem)
14922 return true
14923 }
14924
14925
14926
14927 for {
14928 off1 := auxIntToInt32(v.AuxInt)
14929 sym1 := auxToSym(v.Aux)
14930 val := v_0
14931 if v_1.Op != OpAMD64LEAQ {
14932 break
14933 }
14934 off2 := auxIntToInt32(v_1.AuxInt)
14935 sym2 := auxToSym(v_1.Aux)
14936 base := v_1.Args[0]
14937 mem := v_2
14938 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14939 break
14940 }
14941 v.reset(OpAMD64ORLload)
14942 v.AuxInt = int32ToAuxInt(off1 + off2)
14943 v.Aux = symToAux(mergeSym(sym1, sym2))
14944 v.AddArg3(val, base, mem)
14945 return true
14946 }
14947
14948
14949 for {
14950 off := auxIntToInt32(v.AuxInt)
14951 sym := auxToSym(v.Aux)
14952 x := v_0
14953 ptr := v_1
14954 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14955 break
14956 }
14957 y := v_2.Args[1]
14958 if ptr != v_2.Args[0] {
14959 break
14960 }
14961 v.reset(OpAMD64ORL)
14962 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
14963 v0.AddArg(y)
14964 v.AddArg2(x, v0)
14965 return true
14966 }
14967 return false
14968 }
14969 func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
14970 v_2 := v.Args[2]
14971 v_1 := v.Args[1]
14972 v_0 := v.Args[0]
14973
14974
14975
14976 for {
14977 off1 := auxIntToInt32(v.AuxInt)
14978 sym := auxToSym(v.Aux)
14979 if v_0.Op != OpAMD64ADDQconst {
14980 break
14981 }
14982 off2 := auxIntToInt32(v_0.AuxInt)
14983 base := v_0.Args[0]
14984 val := v_1
14985 mem := v_2
14986 if !(is32Bit(int64(off1) + int64(off2))) {
14987 break
14988 }
14989 v.reset(OpAMD64ORLmodify)
14990 v.AuxInt = int32ToAuxInt(off1 + off2)
14991 v.Aux = symToAux(sym)
14992 v.AddArg3(base, val, mem)
14993 return true
14994 }
14995
14996
14997
14998 for {
14999 off1 := auxIntToInt32(v.AuxInt)
15000 sym1 := auxToSym(v.Aux)
15001 if v_0.Op != OpAMD64LEAQ {
15002 break
15003 }
15004 off2 := auxIntToInt32(v_0.AuxInt)
15005 sym2 := auxToSym(v_0.Aux)
15006 base := v_0.Args[0]
15007 val := v_1
15008 mem := v_2
15009 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
15010 break
15011 }
15012 v.reset(OpAMD64ORLmodify)
15013 v.AuxInt = int32ToAuxInt(off1 + off2)
15014 v.Aux = symToAux(mergeSym(sym1, sym2))
15015 v.AddArg3(base, val, mem)
15016 return true
15017 }
15018 return false
15019 }
15020 func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
15021 v_1 := v.Args[1]
15022 v_0 := v.Args[0]
15023
15024
15025 for {
15026 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15027 if v_0.Op != OpAMD64SHLQ {
15028 continue
15029 }
15030 y := v_0.Args[1]
15031 v_0_0 := v_0.Args[0]
15032 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
15033 continue
15034 }
15035 x := v_1
15036 v.reset(OpAMD64BTSQ)
15037 v.AddArg2(x, y)
15038 return true
15039 }
15040 break
15041 }
15042
15043
15044
15045 for {
15046 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15047 if v_0.Op != OpAMD64MOVQconst {
15048 continue
15049 }
15050 c := auxIntToInt64(v_0.AuxInt)
15051 x := v_1
15052 if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
15053 continue
15054 }
15055 v.reset(OpAMD64BTSQconst)
15056 v.AuxInt = int8ToAuxInt(int8(log64(c)))
15057 v.AddArg(x)
15058 return true
15059 }
15060 break
15061 }
15062
15063
15064
15065 for {
15066 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15067 x := v_0
15068 if v_1.Op != OpAMD64MOVQconst {
15069 continue
15070 }
15071 c := auxIntToInt64(v_1.AuxInt)
15072 if !(is32Bit(c)) {
15073 continue
15074 }
15075 v.reset(OpAMD64ORQconst)
15076 v.AuxInt = int32ToAuxInt(int32(c))
15077 v.AddArg(x)
15078 return true
15079 }
15080 break
15081 }
15082
15083
15084 for {
15085 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15086 x := v_0
15087 if v_1.Op != OpAMD64MOVLconst {
15088 continue
15089 }
15090 c := auxIntToInt32(v_1.AuxInt)
15091 v.reset(OpAMD64ORQconst)
15092 v.AuxInt = int32ToAuxInt(c)
15093 v.AddArg(x)
15094 return true
15095 }
15096 break
15097 }
15098
15099
15100 for {
15101 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15102 if v_0.Op != OpAMD64SHRQ {
15103 continue
15104 }
15105 bits := v_0.Args[1]
15106 lo := v_0.Args[0]
15107 if v_1.Op != OpAMD64SHLQ {
15108 continue
15109 }
15110 _ = v_1.Args[1]
15111 hi := v_1.Args[0]
15112 v_1_1 := v_1.Args[1]
15113 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
15114 continue
15115 }
15116 v.reset(OpAMD64SHRDQ)
15117 v.AddArg3(lo, hi, bits)
15118 return true
15119 }
15120 break
15121 }
15122
15123
15124 for {
15125 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15126 if v_0.Op != OpAMD64SHLQ {
15127 continue
15128 }
15129 bits := v_0.Args[1]
15130 lo := v_0.Args[0]
15131 if v_1.Op != OpAMD64SHRQ {
15132 continue
15133 }
15134 _ = v_1.Args[1]
15135 hi := v_1.Args[0]
15136 v_1_1 := v_1.Args[1]
15137 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
15138 continue
15139 }
15140 v.reset(OpAMD64SHLDQ)
15141 v.AddArg3(lo, hi, bits)
15142 return true
15143 }
15144 break
15145 }
15146
15147
15148 for {
15149 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15150 if v_0.Op != OpAMD64SHRXQ {
15151 continue
15152 }
15153 bits := v_0.Args[1]
15154 lo := v_0.Args[0]
15155 if v_1.Op != OpAMD64SHLXQ {
15156 continue
15157 }
15158 _ = v_1.Args[1]
15159 hi := v_1.Args[0]
15160 v_1_1 := v_1.Args[1]
15161 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
15162 continue
15163 }
15164 v.reset(OpAMD64SHRDQ)
15165 v.AddArg3(lo, hi, bits)
15166 return true
15167 }
15168 break
15169 }
15170
15171
15172 for {
15173 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15174 if v_0.Op != OpAMD64SHLXQ {
15175 continue
15176 }
15177 bits := v_0.Args[1]
15178 lo := v_0.Args[0]
15179 if v_1.Op != OpAMD64SHRXQ {
15180 continue
15181 }
15182 _ = v_1.Args[1]
15183 hi := v_1.Args[0]
15184 v_1_1 := v_1.Args[1]
15185 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
15186 continue
15187 }
15188 v.reset(OpAMD64SHLDQ)
15189 v.AddArg3(lo, hi, bits)
15190 return true
15191 }
15192 break
15193 }
15194
15195
15196 for {
15197 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15198 if v_0.Op != OpAMD64MOVQconst {
15199 continue
15200 }
15201 c := auxIntToInt64(v_0.AuxInt)
15202 if v_1.Op != OpAMD64MOVQconst {
15203 continue
15204 }
15205 d := auxIntToInt64(v_1.AuxInt)
15206 v.reset(OpAMD64MOVQconst)
15207 v.AuxInt = int64ToAuxInt(c | d)
15208 return true
15209 }
15210 break
15211 }
15212
15213
15214 for {
15215 x := v_0
15216 if x != v_1 {
15217 break
15218 }
15219 v.copyOf(x)
15220 return true
15221 }
15222
15223
15224
15225 for {
15226 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15227 x := v_0
15228 l := v_1
15229 if l.Op != OpAMD64MOVQload {
15230 continue
15231 }
15232 off := auxIntToInt32(l.AuxInt)
15233 sym := auxToSym(l.Aux)
15234 mem := l.Args[1]
15235 ptr := l.Args[0]
15236 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
15237 continue
15238 }
15239 v.reset(OpAMD64ORQload)
15240 v.AuxInt = int32ToAuxInt(off)
15241 v.Aux = symToAux(sym)
15242 v.AddArg3(x, ptr, mem)
15243 return true
15244 }
15245 break
15246 }
15247 return false
15248 }
15249 func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
15250 v_0 := v.Args[0]
15251
15252
15253 for {
15254 c := auxIntToInt32(v.AuxInt)
15255 if v_0.Op != OpAMD64ORQconst {
15256 break
15257 }
15258 d := auxIntToInt32(v_0.AuxInt)
15259 x := v_0.Args[0]
15260 v.reset(OpAMD64ORQconst)
15261 v.AuxInt = int32ToAuxInt(c | d)
15262 v.AddArg(x)
15263 return true
15264 }
15265
15266
15267 for {
15268 if auxIntToInt32(v.AuxInt) != 0 {
15269 break
15270 }
15271 x := v_0
15272 v.copyOf(x)
15273 return true
15274 }
15275
15276
15277 for {
15278 if auxIntToInt32(v.AuxInt) != -1 {
15279 break
15280 }
15281 v.reset(OpAMD64MOVQconst)
15282 v.AuxInt = int64ToAuxInt(-1)
15283 return true
15284 }
15285
15286
15287 for {
15288 c := auxIntToInt32(v.AuxInt)
15289 if v_0.Op != OpAMD64MOVQconst {
15290 break
15291 }
15292 d := auxIntToInt64(v_0.AuxInt)
15293 v.reset(OpAMD64MOVQconst)
15294 v.AuxInt = int64ToAuxInt(int64(c) | d)
15295 return true
15296 }
15297 return false
15298 }
15299 func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
15300 v_1 := v.Args[1]
15301 v_0 := v.Args[0]
15302
15303
15304
15305 for {
15306 valoff1 := auxIntToValAndOff(v.AuxInt)
15307 sym := auxToSym(v.Aux)
15308 if v_0.Op != OpAMD64ADDQconst {
15309 break
15310 }
15311 off2 := auxIntToInt32(v_0.AuxInt)
15312 base := v_0.Args[0]
15313 mem := v_1
15314 if !(ValAndOff(valoff1).canAdd32(off2)) {
15315 break
15316 }
15317 v.reset(OpAMD64ORQconstmodify)
15318 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
15319 v.Aux = symToAux(sym)
15320 v.AddArg2(base, mem)
15321 return true
15322 }
15323
15324
15325
15326 for {
15327 valoff1 := auxIntToValAndOff(v.AuxInt)
15328 sym1 := auxToSym(v.Aux)
15329 if v_0.Op != OpAMD64LEAQ {
15330 break
15331 }
15332 off2 := auxIntToInt32(v_0.AuxInt)
15333 sym2 := auxToSym(v_0.Aux)
15334 base := v_0.Args[0]
15335 mem := v_1
15336 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
15337 break
15338 }
15339 v.reset(OpAMD64ORQconstmodify)
15340 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
15341 v.Aux = symToAux(mergeSym(sym1, sym2))
15342 v.AddArg2(base, mem)
15343 return true
15344 }
15345 return false
15346 }
15347 func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
15348 v_2 := v.Args[2]
15349 v_1 := v.Args[1]
15350 v_0 := v.Args[0]
15351 b := v.Block
15352 typ := &b.Func.Config.Types
15353
15354
15355
15356 for {
15357 off1 := auxIntToInt32(v.AuxInt)
15358 sym := auxToSym(v.Aux)
15359 val := v_0
15360 if v_1.Op != OpAMD64ADDQconst {
15361 break
15362 }
15363 off2 := auxIntToInt32(v_1.AuxInt)
15364 base := v_1.Args[0]
15365 mem := v_2
15366 if !(is32Bit(int64(off1) + int64(off2))) {
15367 break
15368 }
15369 v.reset(OpAMD64ORQload)
15370 v.AuxInt = int32ToAuxInt(off1 + off2)
15371 v.Aux = symToAux(sym)
15372 v.AddArg3(val, base, mem)
15373 return true
15374 }
15375
15376
15377
15378 for {
15379 off1 := auxIntToInt32(v.AuxInt)
15380 sym1 := auxToSym(v.Aux)
15381 val := v_0
15382 if v_1.Op != OpAMD64LEAQ {
15383 break
15384 }
15385 off2 := auxIntToInt32(v_1.AuxInt)
15386 sym2 := auxToSym(v_1.Aux)
15387 base := v_1.Args[0]
15388 mem := v_2
15389 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
15390 break
15391 }
15392 v.reset(OpAMD64ORQload)
15393 v.AuxInt = int32ToAuxInt(off1 + off2)
15394 v.Aux = symToAux(mergeSym(sym1, sym2))
15395 v.AddArg3(val, base, mem)
15396 return true
15397 }
15398
15399
15400 for {
15401 off := auxIntToInt32(v.AuxInt)
15402 sym := auxToSym(v.Aux)
15403 x := v_0
15404 ptr := v_1
15405 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
15406 break
15407 }
15408 y := v_2.Args[1]
15409 if ptr != v_2.Args[0] {
15410 break
15411 }
15412 v.reset(OpAMD64ORQ)
15413 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
15414 v0.AddArg(y)
15415 v.AddArg2(x, v0)
15416 return true
15417 }
15418 return false
15419 }
15420 func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
15421 v_2 := v.Args[2]
15422 v_1 := v.Args[1]
15423 v_0 := v.Args[0]
15424
15425
15426
15427 for {
15428 off1 := auxIntToInt32(v.AuxInt)
15429 sym := auxToSym(v.Aux)
15430 if v_0.Op != OpAMD64ADDQconst {
15431 break
15432 }
15433 off2 := auxIntToInt32(v_0.AuxInt)
15434 base := v_0.Args[0]
15435 val := v_1
15436 mem := v_2
15437 if !(is32Bit(int64(off1) + int64(off2))) {
15438 break
15439 }
15440 v.reset(OpAMD64ORQmodify)
15441 v.AuxInt = int32ToAuxInt(off1 + off2)
15442 v.Aux = symToAux(sym)
15443 v.AddArg3(base, val, mem)
15444 return true
15445 }
15446
15447
15448
15449 for {
15450 off1 := auxIntToInt32(v.AuxInt)
15451 sym1 := auxToSym(v.Aux)
15452 if v_0.Op != OpAMD64LEAQ {
15453 break
15454 }
15455 off2 := auxIntToInt32(v_0.AuxInt)
15456 sym2 := auxToSym(v_0.Aux)
15457 base := v_0.Args[0]
15458 val := v_1
15459 mem := v_2
15460 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
15461 break
15462 }
15463 v.reset(OpAMD64ORQmodify)
15464 v.AuxInt = int32ToAuxInt(off1 + off2)
15465 v.Aux = symToAux(mergeSym(sym1, sym2))
15466 v.AddArg3(base, val, mem)
15467 return true
15468 }
15469 return false
15470 }
15471 func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
15472 v_1 := v.Args[1]
15473 v_0 := v.Args[0]
15474
15475
15476 for {
15477 x := v_0
15478 if v_1.Op != OpAMD64NEGQ {
15479 break
15480 }
15481 y := v_1.Args[0]
15482 v.reset(OpAMD64RORB)
15483 v.AddArg2(x, y)
15484 return true
15485 }
15486
15487
15488 for {
15489 x := v_0
15490 if v_1.Op != OpAMD64NEGL {
15491 break
15492 }
15493 y := v_1.Args[0]
15494 v.reset(OpAMD64RORB)
15495 v.AddArg2(x, y)
15496 return true
15497 }
15498
15499
15500 for {
15501 x := v_0
15502 if v_1.Op != OpAMD64MOVQconst {
15503 break
15504 }
15505 c := auxIntToInt64(v_1.AuxInt)
15506 v.reset(OpAMD64ROLBconst)
15507 v.AuxInt = int8ToAuxInt(int8(c & 7))
15508 v.AddArg(x)
15509 return true
15510 }
15511
15512
15513 for {
15514 x := v_0
15515 if v_1.Op != OpAMD64MOVLconst {
15516 break
15517 }
15518 c := auxIntToInt32(v_1.AuxInt)
15519 v.reset(OpAMD64ROLBconst)
15520 v.AuxInt = int8ToAuxInt(int8(c & 7))
15521 v.AddArg(x)
15522 return true
15523 }
15524 return false
15525 }
15526 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
15527 v_0 := v.Args[0]
15528
15529
15530 for {
15531 if auxIntToInt8(v.AuxInt) != 0 {
15532 break
15533 }
15534 x := v_0
15535 v.copyOf(x)
15536 return true
15537 }
15538 return false
15539 }
15540 func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
15541 v_1 := v.Args[1]
15542 v_0 := v.Args[0]
15543
15544
15545 for {
15546 x := v_0
15547 if v_1.Op != OpAMD64NEGQ {
15548 break
15549 }
15550 y := v_1.Args[0]
15551 v.reset(OpAMD64RORL)
15552 v.AddArg2(x, y)
15553 return true
15554 }
15555
15556
15557 for {
15558 x := v_0
15559 if v_1.Op != OpAMD64NEGL {
15560 break
15561 }
15562 y := v_1.Args[0]
15563 v.reset(OpAMD64RORL)
15564 v.AddArg2(x, y)
15565 return true
15566 }
15567
15568
15569 for {
15570 x := v_0
15571 if v_1.Op != OpAMD64MOVQconst {
15572 break
15573 }
15574 c := auxIntToInt64(v_1.AuxInt)
15575 v.reset(OpAMD64ROLLconst)
15576 v.AuxInt = int8ToAuxInt(int8(c & 31))
15577 v.AddArg(x)
15578 return true
15579 }
15580
15581
15582 for {
15583 x := v_0
15584 if v_1.Op != OpAMD64MOVLconst {
15585 break
15586 }
15587 c := auxIntToInt32(v_1.AuxInt)
15588 v.reset(OpAMD64ROLLconst)
15589 v.AuxInt = int8ToAuxInt(int8(c & 31))
15590 v.AddArg(x)
15591 return true
15592 }
15593 return false
15594 }
15595 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
15596 v_0 := v.Args[0]
15597
15598
15599 for {
15600 if auxIntToInt8(v.AuxInt) != 0 {
15601 break
15602 }
15603 x := v_0
15604 v.copyOf(x)
15605 return true
15606 }
15607 return false
15608 }
15609 func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
15610 v_1 := v.Args[1]
15611 v_0 := v.Args[0]
15612
15613
15614 for {
15615 x := v_0
15616 if v_1.Op != OpAMD64NEGQ {
15617 break
15618 }
15619 y := v_1.Args[0]
15620 v.reset(OpAMD64RORQ)
15621 v.AddArg2(x, y)
15622 return true
15623 }
15624
15625
15626 for {
15627 x := v_0
15628 if v_1.Op != OpAMD64NEGL {
15629 break
15630 }
15631 y := v_1.Args[0]
15632 v.reset(OpAMD64RORQ)
15633 v.AddArg2(x, y)
15634 return true
15635 }
15636
15637
15638 for {
15639 x := v_0
15640 if v_1.Op != OpAMD64MOVQconst {
15641 break
15642 }
15643 c := auxIntToInt64(v_1.AuxInt)
15644 v.reset(OpAMD64ROLQconst)
15645 v.AuxInt = int8ToAuxInt(int8(c & 63))
15646 v.AddArg(x)
15647 return true
15648 }
15649
15650
15651 for {
15652 x := v_0
15653 if v_1.Op != OpAMD64MOVLconst {
15654 break
15655 }
15656 c := auxIntToInt32(v_1.AuxInt)
15657 v.reset(OpAMD64ROLQconst)
15658 v.AuxInt = int8ToAuxInt(int8(c & 63))
15659 v.AddArg(x)
15660 return true
15661 }
15662 return false
15663 }
15664 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
15665 v_0 := v.Args[0]
15666
15667
15668 for {
15669 if auxIntToInt8(v.AuxInt) != 0 {
15670 break
15671 }
15672 x := v_0
15673 v.copyOf(x)
15674 return true
15675 }
15676 return false
15677 }
15678 func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
15679 v_1 := v.Args[1]
15680 v_0 := v.Args[0]
15681
15682
15683 for {
15684 x := v_0
15685 if v_1.Op != OpAMD64NEGQ {
15686 break
15687 }
15688 y := v_1.Args[0]
15689 v.reset(OpAMD64RORW)
15690 v.AddArg2(x, y)
15691 return true
15692 }
15693
15694
15695 for {
15696 x := v_0
15697 if v_1.Op != OpAMD64NEGL {
15698 break
15699 }
15700 y := v_1.Args[0]
15701 v.reset(OpAMD64RORW)
15702 v.AddArg2(x, y)
15703 return true
15704 }
15705
15706
15707 for {
15708 x := v_0
15709 if v_1.Op != OpAMD64MOVQconst {
15710 break
15711 }
15712 c := auxIntToInt64(v_1.AuxInt)
15713 v.reset(OpAMD64ROLWconst)
15714 v.AuxInt = int8ToAuxInt(int8(c & 15))
15715 v.AddArg(x)
15716 return true
15717 }
15718
15719
15720 for {
15721 x := v_0
15722 if v_1.Op != OpAMD64MOVLconst {
15723 break
15724 }
15725 c := auxIntToInt32(v_1.AuxInt)
15726 v.reset(OpAMD64ROLWconst)
15727 v.AuxInt = int8ToAuxInt(int8(c & 15))
15728 v.AddArg(x)
15729 return true
15730 }
15731 return false
15732 }
15733 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
15734 v_0 := v.Args[0]
15735
15736
15737 for {
15738 if auxIntToInt8(v.AuxInt) != 0 {
15739 break
15740 }
15741 x := v_0
15742 v.copyOf(x)
15743 return true
15744 }
15745 return false
15746 }
15747 func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
15748 v_1 := v.Args[1]
15749 v_0 := v.Args[0]
15750
15751
15752 for {
15753 x := v_0
15754 if v_1.Op != OpAMD64NEGQ {
15755 break
15756 }
15757 y := v_1.Args[0]
15758 v.reset(OpAMD64ROLB)
15759 v.AddArg2(x, y)
15760 return true
15761 }
15762
15763
15764 for {
15765 x := v_0
15766 if v_1.Op != OpAMD64NEGL {
15767 break
15768 }
15769 y := v_1.Args[0]
15770 v.reset(OpAMD64ROLB)
15771 v.AddArg2(x, y)
15772 return true
15773 }
15774
15775
15776 for {
15777 x := v_0
15778 if v_1.Op != OpAMD64MOVQconst {
15779 break
15780 }
15781 c := auxIntToInt64(v_1.AuxInt)
15782 v.reset(OpAMD64ROLBconst)
15783 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
15784 v.AddArg(x)
15785 return true
15786 }
15787
15788
15789 for {
15790 x := v_0
15791 if v_1.Op != OpAMD64MOVLconst {
15792 break
15793 }
15794 c := auxIntToInt32(v_1.AuxInt)
15795 v.reset(OpAMD64ROLBconst)
15796 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
15797 v.AddArg(x)
15798 return true
15799 }
15800 return false
15801 }
15802 func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
15803 v_1 := v.Args[1]
15804 v_0 := v.Args[0]
15805
15806
15807 for {
15808 x := v_0
15809 if v_1.Op != OpAMD64NEGQ {
15810 break
15811 }
15812 y := v_1.Args[0]
15813 v.reset(OpAMD64ROLL)
15814 v.AddArg2(x, y)
15815 return true
15816 }
15817
15818
15819 for {
15820 x := v_0
15821 if v_1.Op != OpAMD64NEGL {
15822 break
15823 }
15824 y := v_1.Args[0]
15825 v.reset(OpAMD64ROLL)
15826 v.AddArg2(x, y)
15827 return true
15828 }
15829
15830
15831 for {
15832 x := v_0
15833 if v_1.Op != OpAMD64MOVQconst {
15834 break
15835 }
15836 c := auxIntToInt64(v_1.AuxInt)
15837 v.reset(OpAMD64ROLLconst)
15838 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
15839 v.AddArg(x)
15840 return true
15841 }
15842
15843
15844 for {
15845 x := v_0
15846 if v_1.Op != OpAMD64MOVLconst {
15847 break
15848 }
15849 c := auxIntToInt32(v_1.AuxInt)
15850 v.reset(OpAMD64ROLLconst)
15851 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
15852 v.AddArg(x)
15853 return true
15854 }
15855 return false
15856 }
15857 func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
15858 v_1 := v.Args[1]
15859 v_0 := v.Args[0]
15860
15861
15862 for {
15863 x := v_0
15864 if v_1.Op != OpAMD64NEGQ {
15865 break
15866 }
15867 y := v_1.Args[0]
15868 v.reset(OpAMD64ROLQ)
15869 v.AddArg2(x, y)
15870 return true
15871 }
15872
15873
15874 for {
15875 x := v_0
15876 if v_1.Op != OpAMD64NEGL {
15877 break
15878 }
15879 y := v_1.Args[0]
15880 v.reset(OpAMD64ROLQ)
15881 v.AddArg2(x, y)
15882 return true
15883 }
15884
15885
15886 for {
15887 x := v_0
15888 if v_1.Op != OpAMD64MOVQconst {
15889 break
15890 }
15891 c := auxIntToInt64(v_1.AuxInt)
15892 v.reset(OpAMD64ROLQconst)
15893 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
15894 v.AddArg(x)
15895 return true
15896 }
15897
15898
15899 for {
15900 x := v_0
15901 if v_1.Op != OpAMD64MOVLconst {
15902 break
15903 }
15904 c := auxIntToInt32(v_1.AuxInt)
15905 v.reset(OpAMD64ROLQconst)
15906 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
15907 v.AddArg(x)
15908 return true
15909 }
15910 return false
15911 }
15912 func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
15913 v_1 := v.Args[1]
15914 v_0 := v.Args[0]
15915
15916
15917 for {
15918 x := v_0
15919 if v_1.Op != OpAMD64NEGQ {
15920 break
15921 }
15922 y := v_1.Args[0]
15923 v.reset(OpAMD64ROLW)
15924 v.AddArg2(x, y)
15925 return true
15926 }
15927
15928
15929 for {
15930 x := v_0
15931 if v_1.Op != OpAMD64NEGL {
15932 break
15933 }
15934 y := v_1.Args[0]
15935 v.reset(OpAMD64ROLW)
15936 v.AddArg2(x, y)
15937 return true
15938 }
15939
15940
15941 for {
15942 x := v_0
15943 if v_1.Op != OpAMD64MOVQconst {
15944 break
15945 }
15946 c := auxIntToInt64(v_1.AuxInt)
15947 v.reset(OpAMD64ROLWconst)
15948 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
15949 v.AddArg(x)
15950 return true
15951 }
15952
15953
15954 for {
15955 x := v_0
15956 if v_1.Op != OpAMD64MOVLconst {
15957 break
15958 }
15959 c := auxIntToInt32(v_1.AuxInt)
15960 v.reset(OpAMD64ROLWconst)
15961 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
15962 v.AddArg(x)
15963 return true
15964 }
15965 return false
15966 }
15967 func rewriteValueAMD64_OpAMD64SARB(v *Value) bool {
15968 v_1 := v.Args[1]
15969 v_0 := v.Args[0]
15970
15971
15972 for {
15973 x := v_0
15974 if v_1.Op != OpAMD64MOVQconst {
15975 break
15976 }
15977 c := auxIntToInt64(v_1.AuxInt)
15978 v.reset(OpAMD64SARBconst)
15979 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
15980 v.AddArg(x)
15981 return true
15982 }
15983
15984
15985 for {
15986 x := v_0
15987 if v_1.Op != OpAMD64MOVLconst {
15988 break
15989 }
15990 c := auxIntToInt32(v_1.AuxInt)
15991 v.reset(OpAMD64SARBconst)
15992 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
15993 v.AddArg(x)
15994 return true
15995 }
15996 return false
15997 }
15998 func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
15999 v_0 := v.Args[0]
16000
16001
16002 for {
16003 if auxIntToInt8(v.AuxInt) != 0 {
16004 break
16005 }
16006 x := v_0
16007 v.copyOf(x)
16008 return true
16009 }
16010
16011
16012 for {
16013 c := auxIntToInt8(v.AuxInt)
16014 if v_0.Op != OpAMD64MOVQconst {
16015 break
16016 }
16017 d := auxIntToInt64(v_0.AuxInt)
16018 v.reset(OpAMD64MOVQconst)
16019 v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c))
16020 return true
16021 }
16022 return false
16023 }
16024 func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
16025 v_1 := v.Args[1]
16026 v_0 := v.Args[0]
16027 b := v.Block
16028
16029
16030 for {
16031 x := v_0
16032 if v_1.Op != OpAMD64MOVQconst {
16033 break
16034 }
16035 c := auxIntToInt64(v_1.AuxInt)
16036 v.reset(OpAMD64SARLconst)
16037 v.AuxInt = int8ToAuxInt(int8(c & 31))
16038 v.AddArg(x)
16039 return true
16040 }
16041
16042
16043 for {
16044 x := v_0
16045 if v_1.Op != OpAMD64MOVLconst {
16046 break
16047 }
16048 c := auxIntToInt32(v_1.AuxInt)
16049 v.reset(OpAMD64SARLconst)
16050 v.AuxInt = int8ToAuxInt(int8(c & 31))
16051 v.AddArg(x)
16052 return true
16053 }
16054
16055
16056
16057 for {
16058 x := v_0
16059 if v_1.Op != OpAMD64ADDQconst {
16060 break
16061 }
16062 c := auxIntToInt32(v_1.AuxInt)
16063 y := v_1.Args[0]
16064 if !(c&31 == 0) {
16065 break
16066 }
16067 v.reset(OpAMD64SARL)
16068 v.AddArg2(x, y)
16069 return true
16070 }
16071
16072
16073
16074 for {
16075 x := v_0
16076 if v_1.Op != OpAMD64NEGQ {
16077 break
16078 }
16079 t := v_1.Type
16080 v_1_0 := v_1.Args[0]
16081 if v_1_0.Op != OpAMD64ADDQconst {
16082 break
16083 }
16084 c := auxIntToInt32(v_1_0.AuxInt)
16085 y := v_1_0.Args[0]
16086 if !(c&31 == 0) {
16087 break
16088 }
16089 v.reset(OpAMD64SARL)
16090 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
16091 v0.AddArg(y)
16092 v.AddArg2(x, v0)
16093 return true
16094 }
16095
16096
16097
16098 for {
16099 x := v_0
16100 if v_1.Op != OpAMD64ANDQconst {
16101 break
16102 }
16103 c := auxIntToInt32(v_1.AuxInt)
16104 y := v_1.Args[0]
16105 if !(c&31 == 31) {
16106 break
16107 }
16108 v.reset(OpAMD64SARL)
16109 v.AddArg2(x, y)
16110 return true
16111 }
16112
16113
16114
16115 for {
16116 x := v_0
16117 if v_1.Op != OpAMD64NEGQ {
16118 break
16119 }
16120 t := v_1.Type
16121 v_1_0 := v_1.Args[0]
16122 if v_1_0.Op != OpAMD64ANDQconst {
16123 break
16124 }
16125 c := auxIntToInt32(v_1_0.AuxInt)
16126 y := v_1_0.Args[0]
16127 if !(c&31 == 31) {
16128 break
16129 }
16130 v.reset(OpAMD64SARL)
16131 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
16132 v0.AddArg(y)
16133 v.AddArg2(x, v0)
16134 return true
16135 }
16136
16137
16138
16139 for {
16140 x := v_0
16141 if v_1.Op != OpAMD64ADDLconst {
16142 break
16143 }
16144 c := auxIntToInt32(v_1.AuxInt)
16145 y := v_1.Args[0]
16146 if !(c&31 == 0) {
16147 break
16148 }
16149 v.reset(OpAMD64SARL)
16150 v.AddArg2(x, y)
16151 return true
16152 }
16153
16154
16155
16156 for {
16157 x := v_0
16158 if v_1.Op != OpAMD64NEGL {
16159 break
16160 }
16161 t := v_1.Type
16162 v_1_0 := v_1.Args[0]
16163 if v_1_0.Op != OpAMD64ADDLconst {
16164 break
16165 }
16166 c := auxIntToInt32(v_1_0.AuxInt)
16167 y := v_1_0.Args[0]
16168 if !(c&31 == 0) {
16169 break
16170 }
16171 v.reset(OpAMD64SARL)
16172 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
16173 v0.AddArg(y)
16174 v.AddArg2(x, v0)
16175 return true
16176 }
16177
16178
16179
16180 for {
16181 x := v_0
16182 if v_1.Op != OpAMD64ANDLconst {
16183 break
16184 }
16185 c := auxIntToInt32(v_1.AuxInt)
16186 y := v_1.Args[0]
16187 if !(c&31 == 31) {
16188 break
16189 }
16190 v.reset(OpAMD64SARL)
16191 v.AddArg2(x, y)
16192 return true
16193 }
16194
16195
16196
16197 for {
16198 x := v_0
16199 if v_1.Op != OpAMD64NEGL {
16200 break
16201 }
16202 t := v_1.Type
16203 v_1_0 := v_1.Args[0]
16204 if v_1_0.Op != OpAMD64ANDLconst {
16205 break
16206 }
16207 c := auxIntToInt32(v_1_0.AuxInt)
16208 y := v_1_0.Args[0]
16209 if !(c&31 == 31) {
16210 break
16211 }
16212 v.reset(OpAMD64SARL)
16213 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
16214 v0.AddArg(y)
16215 v.AddArg2(x, v0)
16216 return true
16217 }
16218
16219
16220
16221 for {
16222 l := v_0
16223 if l.Op != OpAMD64MOVLload {
16224 break
16225 }
16226 off := auxIntToInt32(l.AuxInt)
16227 sym := auxToSym(l.Aux)
16228 mem := l.Args[1]
16229 ptr := l.Args[0]
16230 x := v_1
16231 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
16232 break
16233 }
16234 v.reset(OpAMD64SARXLload)
16235 v.AuxInt = int32ToAuxInt(off)
16236 v.Aux = symToAux(sym)
16237 v.AddArg3(ptr, x, mem)
16238 return true
16239 }
16240 return false
16241 }
16242 func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
16243 v_0 := v.Args[0]
16244
16245
16246 for {
16247 if auxIntToInt8(v.AuxInt) != 0 {
16248 break
16249 }
16250 x := v_0
16251 v.copyOf(x)
16252 return true
16253 }
16254
16255
16256 for {
16257 c := auxIntToInt8(v.AuxInt)
16258 if v_0.Op != OpAMD64MOVQconst {
16259 break
16260 }
16261 d := auxIntToInt64(v_0.AuxInt)
16262 v.reset(OpAMD64MOVQconst)
16263 v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
16264 return true
16265 }
16266 return false
16267 }
16268 func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
16269 v_1 := v.Args[1]
16270 v_0 := v.Args[0]
16271 b := v.Block
16272
16273
16274 for {
16275 x := v_0
16276 if v_1.Op != OpAMD64MOVQconst {
16277 break
16278 }
16279 c := auxIntToInt64(v_1.AuxInt)
16280 v.reset(OpAMD64SARQconst)
16281 v.AuxInt = int8ToAuxInt(int8(c & 63))
16282 v.AddArg(x)
16283 return true
16284 }
16285
16286
16287 for {
16288 x := v_0
16289 if v_1.Op != OpAMD64MOVLconst {
16290 break
16291 }
16292 c := auxIntToInt32(v_1.AuxInt)
16293 v.reset(OpAMD64SARQconst)
16294 v.AuxInt = int8ToAuxInt(int8(c & 63))
16295 v.AddArg(x)
16296 return true
16297 }
16298
16299
16300
16301 for {
16302 x := v_0
16303 if v_1.Op != OpAMD64ADDQconst {
16304 break
16305 }
16306 c := auxIntToInt32(v_1.AuxInt)
16307 y := v_1.Args[0]
16308 if !(c&63 == 0) {
16309 break
16310 }
16311 v.reset(OpAMD64SARQ)
16312 v.AddArg2(x, y)
16313 return true
16314 }
16315
16316
16317
16318 for {
16319 x := v_0
16320 if v_1.Op != OpAMD64NEGQ {
16321 break
16322 }
16323 t := v_1.Type
16324 v_1_0 := v_1.Args[0]
16325 if v_1_0.Op != OpAMD64ADDQconst {
16326 break
16327 }
16328 c := auxIntToInt32(v_1_0.AuxInt)
16329 y := v_1_0.Args[0]
16330 if !(c&63 == 0) {
16331 break
16332 }
16333 v.reset(OpAMD64SARQ)
16334 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
16335 v0.AddArg(y)
16336 v.AddArg2(x, v0)
16337 return true
16338 }
16339
16340
16341
16342 for {
16343 x := v_0
16344 if v_1.Op != OpAMD64ANDQconst {
16345 break
16346 }
16347 c := auxIntToInt32(v_1.AuxInt)
16348 y := v_1.Args[0]
16349 if !(c&63 == 63) {
16350 break
16351 }
16352 v.reset(OpAMD64SARQ)
16353 v.AddArg2(x, y)
16354 return true
16355 }
16356
16357
16358
16359 for {
16360 x := v_0
16361 if v_1.Op != OpAMD64NEGQ {
16362 break
16363 }
16364 t := v_1.Type
16365 v_1_0 := v_1.Args[0]
16366 if v_1_0.Op != OpAMD64ANDQconst {
16367 break
16368 }
16369 c := auxIntToInt32(v_1_0.AuxInt)
16370 y := v_1_0.Args[0]
16371 if !(c&63 == 63) {
16372 break
16373 }
16374 v.reset(OpAMD64SARQ)
16375 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
16376 v0.AddArg(y)
16377 v.AddArg2(x, v0)
16378 return true
16379 }
16380
16381
16382
16383 for {
16384 x := v_0
16385 if v_1.Op != OpAMD64ADDLconst {
16386 break
16387 }
16388 c := auxIntToInt32(v_1.AuxInt)
16389 y := v_1.Args[0]
16390 if !(c&63 == 0) {
16391 break
16392 }
16393 v.reset(OpAMD64SARQ)
16394 v.AddArg2(x, y)
16395 return true
16396 }
16397
16398
16399
16400 for {
16401 x := v_0
16402 if v_1.Op != OpAMD64NEGL {
16403 break
16404 }
16405 t := v_1.Type
16406 v_1_0 := v_1.Args[0]
16407 if v_1_0.Op != OpAMD64ADDLconst {
16408 break
16409 }
16410 c := auxIntToInt32(v_1_0.AuxInt)
16411 y := v_1_0.Args[0]
16412 if !(c&63 == 0) {
16413 break
16414 }
16415 v.reset(OpAMD64SARQ)
16416 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
16417 v0.AddArg(y)
16418 v.AddArg2(x, v0)
16419 return true
16420 }
16421
16422
16423
16424 for {
16425 x := v_0
16426 if v_1.Op != OpAMD64ANDLconst {
16427 break
16428 }
16429 c := auxIntToInt32(v_1.AuxInt)
16430 y := v_1.Args[0]
16431 if !(c&63 == 63) {
16432 break
16433 }
16434 v.reset(OpAMD64SARQ)
16435 v.AddArg2(x, y)
16436 return true
16437 }
16438
16439
16440
16441 for {
16442 x := v_0
16443 if v_1.Op != OpAMD64NEGL {
16444 break
16445 }
16446 t := v_1.Type
16447 v_1_0 := v_1.Args[0]
16448 if v_1_0.Op != OpAMD64ANDLconst {
16449 break
16450 }
16451 c := auxIntToInt32(v_1_0.AuxInt)
16452 y := v_1_0.Args[0]
16453 if !(c&63 == 63) {
16454 break
16455 }
16456 v.reset(OpAMD64SARQ)
16457 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
16458 v0.AddArg(y)
16459 v.AddArg2(x, v0)
16460 return true
16461 }
16462
16463
16464
16465 for {
16466 l := v_0
16467 if l.Op != OpAMD64MOVQload {
16468 break
16469 }
16470 off := auxIntToInt32(l.AuxInt)
16471 sym := auxToSym(l.Aux)
16472 mem := l.Args[1]
16473 ptr := l.Args[0]
16474 x := v_1
16475 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
16476 break
16477 }
16478 v.reset(OpAMD64SARXQload)
16479 v.AuxInt = int32ToAuxInt(off)
16480 v.Aux = symToAux(sym)
16481 v.AddArg3(ptr, x, mem)
16482 return true
16483 }
16484 return false
16485 }
16486 func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
16487 v_0 := v.Args[0]
16488
16489
16490 for {
16491 if auxIntToInt8(v.AuxInt) != 0 {
16492 break
16493 }
16494 x := v_0
16495 v.copyOf(x)
16496 return true
16497 }
16498
16499
16500 for {
16501 c := auxIntToInt8(v.AuxInt)
16502 if v_0.Op != OpAMD64MOVQconst {
16503 break
16504 }
16505 d := auxIntToInt64(v_0.AuxInt)
16506 v.reset(OpAMD64MOVQconst)
16507 v.AuxInt = int64ToAuxInt(d >> uint64(c))
16508 return true
16509 }
16510 return false
16511 }
16512 func rewriteValueAMD64_OpAMD64SARW(v *Value) bool {
16513 v_1 := v.Args[1]
16514 v_0 := v.Args[0]
16515
16516
16517 for {
16518 x := v_0
16519 if v_1.Op != OpAMD64MOVQconst {
16520 break
16521 }
16522 c := auxIntToInt64(v_1.AuxInt)
16523 v.reset(OpAMD64SARWconst)
16524 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
16525 v.AddArg(x)
16526 return true
16527 }
16528
16529
16530 for {
16531 x := v_0
16532 if v_1.Op != OpAMD64MOVLconst {
16533 break
16534 }
16535 c := auxIntToInt32(v_1.AuxInt)
16536 v.reset(OpAMD64SARWconst)
16537 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
16538 v.AddArg(x)
16539 return true
16540 }
16541 return false
16542 }
16543 func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
16544 v_0 := v.Args[0]
16545
16546
16547 for {
16548 if auxIntToInt8(v.AuxInt) != 0 {
16549 break
16550 }
16551 x := v_0
16552 v.copyOf(x)
16553 return true
16554 }
16555
16556
16557 for {
16558 c := auxIntToInt8(v.AuxInt)
16559 if v_0.Op != OpAMD64MOVQconst {
16560 break
16561 }
16562 d := auxIntToInt64(v_0.AuxInt)
16563 v.reset(OpAMD64MOVQconst)
16564 v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c))
16565 return true
16566 }
16567 return false
16568 }
16569 func rewriteValueAMD64_OpAMD64SARXLload(v *Value) bool {
16570 v_2 := v.Args[2]
16571 v_1 := v.Args[1]
16572 v_0 := v.Args[0]
16573 b := v.Block
16574 typ := &b.Func.Config.Types
16575
16576
16577 for {
16578 off := auxIntToInt32(v.AuxInt)
16579 sym := auxToSym(v.Aux)
16580 ptr := v_0
16581 if v_1.Op != OpAMD64MOVLconst {
16582 break
16583 }
16584 c := auxIntToInt32(v_1.AuxInt)
16585 mem := v_2
16586 v.reset(OpAMD64SARLconst)
16587 v.AuxInt = int8ToAuxInt(int8(c & 31))
16588 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
16589 v0.AuxInt = int32ToAuxInt(off)
16590 v0.Aux = symToAux(sym)
16591 v0.AddArg2(ptr, mem)
16592 v.AddArg(v0)
16593 return true
16594 }
16595 return false
16596 }
16597 func rewriteValueAMD64_OpAMD64SARXQload(v *Value) bool {
16598 v_2 := v.Args[2]
16599 v_1 := v.Args[1]
16600 v_0 := v.Args[0]
16601 b := v.Block
16602 typ := &b.Func.Config.Types
16603
16604
16605 for {
16606 off := auxIntToInt32(v.AuxInt)
16607 sym := auxToSym(v.Aux)
16608 ptr := v_0
16609 if v_1.Op != OpAMD64MOVQconst {
16610 break
16611 }
16612 c := auxIntToInt64(v_1.AuxInt)
16613 mem := v_2
16614 v.reset(OpAMD64SARQconst)
16615 v.AuxInt = int8ToAuxInt(int8(c & 63))
16616 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
16617 v0.AuxInt = int32ToAuxInt(off)
16618 v0.Aux = symToAux(sym)
16619 v0.AddArg2(ptr, mem)
16620 v.AddArg(v0)
16621 return true
16622 }
16623
16624
16625 for {
16626 off := auxIntToInt32(v.AuxInt)
16627 sym := auxToSym(v.Aux)
16628 ptr := v_0
16629 if v_1.Op != OpAMD64MOVLconst {
16630 break
16631 }
16632 c := auxIntToInt32(v_1.AuxInt)
16633 mem := v_2
16634 v.reset(OpAMD64SARQconst)
16635 v.AuxInt = int8ToAuxInt(int8(c & 63))
16636 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
16637 v0.AuxInt = int32ToAuxInt(off)
16638 v0.Aux = symToAux(sym)
16639 v0.AddArg2(ptr, mem)
16640 v.AddArg(v0)
16641 return true
16642 }
16643 return false
16644 }
16645 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
16646 v_0 := v.Args[0]
16647
16648
16649 for {
16650 if v_0.Op != OpAMD64FlagEQ {
16651 break
16652 }
16653 v.reset(OpAMD64MOVLconst)
16654 v.AuxInt = int32ToAuxInt(0)
16655 return true
16656 }
16657
16658
16659 for {
16660 if v_0.Op != OpAMD64FlagLT_ULT {
16661 break
16662 }
16663 v.reset(OpAMD64MOVLconst)
16664 v.AuxInt = int32ToAuxInt(-1)
16665 return true
16666 }
16667
16668
16669 for {
16670 if v_0.Op != OpAMD64FlagLT_UGT {
16671 break
16672 }
16673 v.reset(OpAMD64MOVLconst)
16674 v.AuxInt = int32ToAuxInt(0)
16675 return true
16676 }
16677
16678
16679 for {
16680 if v_0.Op != OpAMD64FlagGT_ULT {
16681 break
16682 }
16683 v.reset(OpAMD64MOVLconst)
16684 v.AuxInt = int32ToAuxInt(-1)
16685 return true
16686 }
16687
16688
16689 for {
16690 if v_0.Op != OpAMD64FlagGT_UGT {
16691 break
16692 }
16693 v.reset(OpAMD64MOVLconst)
16694 v.AuxInt = int32ToAuxInt(0)
16695 return true
16696 }
16697 return false
16698 }
16699 func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
16700 v_2 := v.Args[2]
16701 v_1 := v.Args[1]
16702 v_0 := v.Args[0]
16703
16704
16705
16706 for {
16707 x := v_0
16708 if v_1.Op != OpAMD64MOVQconst {
16709 break
16710 }
16711 c := auxIntToInt64(v_1.AuxInt)
16712 borrow := v_2
16713 if !(is32Bit(c)) {
16714 break
16715 }
16716 v.reset(OpAMD64SBBQconst)
16717 v.AuxInt = int32ToAuxInt(int32(c))
16718 v.AddArg2(x, borrow)
16719 return true
16720 }
16721
16722
16723 for {
16724 x := v_0
16725 y := v_1
16726 if v_2.Op != OpAMD64FlagEQ {
16727 break
16728 }
16729 v.reset(OpAMD64SUBQborrow)
16730 v.AddArg2(x, y)
16731 return true
16732 }
16733 return false
16734 }
16735 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool {
16736 v_0 := v.Args[0]
16737
16738
16739 for {
16740 if v_0.Op != OpAMD64FlagEQ {
16741 break
16742 }
16743 v.reset(OpAMD64MOVQconst)
16744 v.AuxInt = int64ToAuxInt(0)
16745 return true
16746 }
16747
16748
16749 for {
16750 if v_0.Op != OpAMD64FlagLT_ULT {
16751 break
16752 }
16753 v.reset(OpAMD64MOVQconst)
16754 v.AuxInt = int64ToAuxInt(-1)
16755 return true
16756 }
16757
16758
16759 for {
16760 if v_0.Op != OpAMD64FlagLT_UGT {
16761 break
16762 }
16763 v.reset(OpAMD64MOVQconst)
16764 v.AuxInt = int64ToAuxInt(0)
16765 return true
16766 }
16767
16768
16769 for {
16770 if v_0.Op != OpAMD64FlagGT_ULT {
16771 break
16772 }
16773 v.reset(OpAMD64MOVQconst)
16774 v.AuxInt = int64ToAuxInt(-1)
16775 return true
16776 }
16777
16778
16779 for {
16780 if v_0.Op != OpAMD64FlagGT_UGT {
16781 break
16782 }
16783 v.reset(OpAMD64MOVQconst)
16784 v.AuxInt = int64ToAuxInt(0)
16785 return true
16786 }
16787 return false
16788 }
16789 func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool {
16790 v_1 := v.Args[1]
16791 v_0 := v.Args[0]
16792
16793
16794 for {
16795 c := auxIntToInt32(v.AuxInt)
16796 x := v_0
16797 if v_1.Op != OpAMD64FlagEQ {
16798 break
16799 }
16800 v.reset(OpAMD64SUBQconstborrow)
16801 v.AuxInt = int32ToAuxInt(c)
16802 v.AddArg(x)
16803 return true
16804 }
16805 return false
16806 }
16807 func rewriteValueAMD64_OpAMD64SETA(v *Value) bool {
16808 v_0 := v.Args[0]
16809
16810
16811 for {
16812 if v_0.Op != OpAMD64InvertFlags {
16813 break
16814 }
16815 x := v_0.Args[0]
16816 v.reset(OpAMD64SETB)
16817 v.AddArg(x)
16818 return true
16819 }
16820
16821
16822 for {
16823 if v_0.Op != OpAMD64FlagEQ {
16824 break
16825 }
16826 v.reset(OpAMD64MOVLconst)
16827 v.AuxInt = int32ToAuxInt(0)
16828 return true
16829 }
16830
16831
16832 for {
16833 if v_0.Op != OpAMD64FlagLT_ULT {
16834 break
16835 }
16836 v.reset(OpAMD64MOVLconst)
16837 v.AuxInt = int32ToAuxInt(0)
16838 return true
16839 }
16840
16841
16842 for {
16843 if v_0.Op != OpAMD64FlagLT_UGT {
16844 break
16845 }
16846 v.reset(OpAMD64MOVLconst)
16847 v.AuxInt = int32ToAuxInt(1)
16848 return true
16849 }
16850
16851
16852 for {
16853 if v_0.Op != OpAMD64FlagGT_ULT {
16854 break
16855 }
16856 v.reset(OpAMD64MOVLconst)
16857 v.AuxInt = int32ToAuxInt(0)
16858 return true
16859 }
16860
16861
16862 for {
16863 if v_0.Op != OpAMD64FlagGT_UGT {
16864 break
16865 }
16866 v.reset(OpAMD64MOVLconst)
16867 v.AuxInt = int32ToAuxInt(1)
16868 return true
16869 }
16870 return false
16871 }
16872 func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool {
16873 v_0 := v.Args[0]
16874 b := v.Block
16875 typ := &b.Func.Config.Types
16876
16877
16878 for {
16879 if v_0.Op != OpAMD64TESTQ {
16880 break
16881 }
16882 x := v_0.Args[1]
16883 if x != v_0.Args[0] {
16884 break
16885 }
16886 v.reset(OpConstBool)
16887 v.AuxInt = boolToAuxInt(true)
16888 return true
16889 }
16890
16891
16892 for {
16893 if v_0.Op != OpAMD64TESTL {
16894 break
16895 }
16896 x := v_0.Args[1]
16897 if x != v_0.Args[0] {
16898 break
16899 }
16900 v.reset(OpConstBool)
16901 v.AuxInt = boolToAuxInt(true)
16902 return true
16903 }
16904
16905
16906 for {
16907 if v_0.Op != OpAMD64TESTW {
16908 break
16909 }
16910 x := v_0.Args[1]
16911 if x != v_0.Args[0] {
16912 break
16913 }
16914 v.reset(OpConstBool)
16915 v.AuxInt = boolToAuxInt(true)
16916 return true
16917 }
16918
16919
16920 for {
16921 if v_0.Op != OpAMD64TESTB {
16922 break
16923 }
16924 x := v_0.Args[1]
16925 if x != v_0.Args[0] {
16926 break
16927 }
16928 v.reset(OpConstBool)
16929 v.AuxInt = boolToAuxInt(true)
16930 return true
16931 }
16932
16933
16934 for {
16935 if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
16936 break
16937 }
16938 x := v_0.Args[0]
16939 v.reset(OpAMD64XORLconst)
16940 v.AuxInt = int32ToAuxInt(1)
16941 v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, typ.Bool)
16942 v0.AuxInt = int32ToAuxInt(1)
16943 v0.AddArg(x)
16944 v.AddArg(v0)
16945 return true
16946 }
16947
16948
16949 for {
16950 if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
16951 break
16952 }
16953 x := v_0.Args[0]
16954 v.reset(OpAMD64XORLconst)
16955 v.AuxInt = int32ToAuxInt(1)
16956 v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, typ.Bool)
16957 v0.AuxInt = int32ToAuxInt(1)
16958 v0.AddArg(x)
16959 v.AddArg(v0)
16960 return true
16961 }
16962
16963
16964
16965 for {
16966 c := v_0
16967 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
16968 break
16969 }
16970 x := c.Args[0]
16971 if !(c.Uses == 1) {
16972 break
16973 }
16974 v.reset(OpAMD64SETA)
16975 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
16976 v0.AuxInt = int32ToAuxInt(127)
16977 v0.AddArg(x)
16978 v.AddArg(v0)
16979 return true
16980 }
16981
16982
16983
16984 for {
16985 c := v_0
16986 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
16987 break
16988 }
16989 x := c.Args[0]
16990 if !(c.Uses == 1) {
16991 break
16992 }
16993 v.reset(OpAMD64SETA)
16994 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
16995 v0.AuxInt = int32ToAuxInt(127)
16996 v0.AddArg(x)
16997 v.AddArg(v0)
16998 return true
16999 }
17000
17001
17002 for {
17003 if v_0.Op != OpAMD64InvertFlags {
17004 break
17005 }
17006 x := v_0.Args[0]
17007 v.reset(OpAMD64SETBE)
17008 v.AddArg(x)
17009 return true
17010 }
17011
17012
17013 for {
17014 if v_0.Op != OpAMD64FlagEQ {
17015 break
17016 }
17017 v.reset(OpAMD64MOVLconst)
17018 v.AuxInt = int32ToAuxInt(1)
17019 return true
17020 }
17021
17022
17023 for {
17024 if v_0.Op != OpAMD64FlagLT_ULT {
17025 break
17026 }
17027 v.reset(OpAMD64MOVLconst)
17028 v.AuxInt = int32ToAuxInt(0)
17029 return true
17030 }
17031
17032
17033 for {
17034 if v_0.Op != OpAMD64FlagLT_UGT {
17035 break
17036 }
17037 v.reset(OpAMD64MOVLconst)
17038 v.AuxInt = int32ToAuxInt(1)
17039 return true
17040 }
17041
17042
17043 for {
17044 if v_0.Op != OpAMD64FlagGT_ULT {
17045 break
17046 }
17047 v.reset(OpAMD64MOVLconst)
17048 v.AuxInt = int32ToAuxInt(0)
17049 return true
17050 }
17051
17052
17053 for {
17054 if v_0.Op != OpAMD64FlagGT_UGT {
17055 break
17056 }
17057 v.reset(OpAMD64MOVLconst)
17058 v.AuxInt = int32ToAuxInt(1)
17059 return true
17060 }
17061 return false
17062 }
17063 func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
17064 v_2 := v.Args[2]
17065 v_1 := v.Args[1]
17066 v_0 := v.Args[0]
17067 b := v.Block
17068 typ := &b.Func.Config.Types
17069
17070
17071 for {
17072 off := auxIntToInt32(v.AuxInt)
17073 sym := auxToSym(v.Aux)
17074 ptr := v_0
17075 if v_1.Op != OpAMD64InvertFlags {
17076 break
17077 }
17078 x := v_1.Args[0]
17079 mem := v_2
17080 v.reset(OpAMD64SETBEstore)
17081 v.AuxInt = int32ToAuxInt(off)
17082 v.Aux = symToAux(sym)
17083 v.AddArg3(ptr, x, mem)
17084 return true
17085 }
17086
17087
17088
17089 for {
17090 off1 := auxIntToInt32(v.AuxInt)
17091 sym := auxToSym(v.Aux)
17092 if v_0.Op != OpAMD64ADDQconst {
17093 break
17094 }
17095 off2 := auxIntToInt32(v_0.AuxInt)
17096 base := v_0.Args[0]
17097 val := v_1
17098 mem := v_2
17099 if !(is32Bit(int64(off1) + int64(off2))) {
17100 break
17101 }
17102 v.reset(OpAMD64SETAEstore)
17103 v.AuxInt = int32ToAuxInt(off1 + off2)
17104 v.Aux = symToAux(sym)
17105 v.AddArg3(base, val, mem)
17106 return true
17107 }
17108
17109
17110
17111 for {
17112 off1 := auxIntToInt32(v.AuxInt)
17113 sym1 := auxToSym(v.Aux)
17114 if v_0.Op != OpAMD64LEAQ {
17115 break
17116 }
17117 off2 := auxIntToInt32(v_0.AuxInt)
17118 sym2 := auxToSym(v_0.Aux)
17119 base := v_0.Args[0]
17120 val := v_1
17121 mem := v_2
17122 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17123 break
17124 }
17125 v.reset(OpAMD64SETAEstore)
17126 v.AuxInt = int32ToAuxInt(off1 + off2)
17127 v.Aux = symToAux(mergeSym(sym1, sym2))
17128 v.AddArg3(base, val, mem)
17129 return true
17130 }
17131
17132
17133 for {
17134 off := auxIntToInt32(v.AuxInt)
17135 sym := auxToSym(v.Aux)
17136 ptr := v_0
17137 if v_1.Op != OpAMD64FlagEQ {
17138 break
17139 }
17140 mem := v_2
17141 v.reset(OpAMD64MOVBstore)
17142 v.AuxInt = int32ToAuxInt(off)
17143 v.Aux = symToAux(sym)
17144 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17145 v0.AuxInt = int32ToAuxInt(1)
17146 v.AddArg3(ptr, v0, mem)
17147 return true
17148 }
17149
17150
17151 for {
17152 off := auxIntToInt32(v.AuxInt)
17153 sym := auxToSym(v.Aux)
17154 ptr := v_0
17155 if v_1.Op != OpAMD64FlagLT_ULT {
17156 break
17157 }
17158 mem := v_2
17159 v.reset(OpAMD64MOVBstore)
17160 v.AuxInt = int32ToAuxInt(off)
17161 v.Aux = symToAux(sym)
17162 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17163 v0.AuxInt = int32ToAuxInt(0)
17164 v.AddArg3(ptr, v0, mem)
17165 return true
17166 }
17167
17168
17169 for {
17170 off := auxIntToInt32(v.AuxInt)
17171 sym := auxToSym(v.Aux)
17172 ptr := v_0
17173 if v_1.Op != OpAMD64FlagLT_UGT {
17174 break
17175 }
17176 mem := v_2
17177 v.reset(OpAMD64MOVBstore)
17178 v.AuxInt = int32ToAuxInt(off)
17179 v.Aux = symToAux(sym)
17180 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17181 v0.AuxInt = int32ToAuxInt(1)
17182 v.AddArg3(ptr, v0, mem)
17183 return true
17184 }
17185
17186
17187 for {
17188 off := auxIntToInt32(v.AuxInt)
17189 sym := auxToSym(v.Aux)
17190 ptr := v_0
17191 if v_1.Op != OpAMD64FlagGT_ULT {
17192 break
17193 }
17194 mem := v_2
17195 v.reset(OpAMD64MOVBstore)
17196 v.AuxInt = int32ToAuxInt(off)
17197 v.Aux = symToAux(sym)
17198 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17199 v0.AuxInt = int32ToAuxInt(0)
17200 v.AddArg3(ptr, v0, mem)
17201 return true
17202 }
17203
17204
17205 for {
17206 off := auxIntToInt32(v.AuxInt)
17207 sym := auxToSym(v.Aux)
17208 ptr := v_0
17209 if v_1.Op != OpAMD64FlagGT_UGT {
17210 break
17211 }
17212 mem := v_2
17213 v.reset(OpAMD64MOVBstore)
17214 v.AuxInt = int32ToAuxInt(off)
17215 v.Aux = symToAux(sym)
17216 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17217 v0.AuxInt = int32ToAuxInt(1)
17218 v.AddArg3(ptr, v0, mem)
17219 return true
17220 }
17221 return false
17222 }
17223 func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
17224 v_2 := v.Args[2]
17225 v_1 := v.Args[1]
17226 v_0 := v.Args[0]
17227 b := v.Block
17228 typ := &b.Func.Config.Types
17229
17230
17231 for {
17232 off := auxIntToInt32(v.AuxInt)
17233 sym := auxToSym(v.Aux)
17234 ptr := v_0
17235 if v_1.Op != OpAMD64InvertFlags {
17236 break
17237 }
17238 x := v_1.Args[0]
17239 mem := v_2
17240 v.reset(OpAMD64SETBstore)
17241 v.AuxInt = int32ToAuxInt(off)
17242 v.Aux = symToAux(sym)
17243 v.AddArg3(ptr, x, mem)
17244 return true
17245 }
17246
17247
17248
17249 for {
17250 off1 := auxIntToInt32(v.AuxInt)
17251 sym := auxToSym(v.Aux)
17252 if v_0.Op != OpAMD64ADDQconst {
17253 break
17254 }
17255 off2 := auxIntToInt32(v_0.AuxInt)
17256 base := v_0.Args[0]
17257 val := v_1
17258 mem := v_2
17259 if !(is32Bit(int64(off1) + int64(off2))) {
17260 break
17261 }
17262 v.reset(OpAMD64SETAstore)
17263 v.AuxInt = int32ToAuxInt(off1 + off2)
17264 v.Aux = symToAux(sym)
17265 v.AddArg3(base, val, mem)
17266 return true
17267 }
17268
17269
17270
17271 for {
17272 off1 := auxIntToInt32(v.AuxInt)
17273 sym1 := auxToSym(v.Aux)
17274 if v_0.Op != OpAMD64LEAQ {
17275 break
17276 }
17277 off2 := auxIntToInt32(v_0.AuxInt)
17278 sym2 := auxToSym(v_0.Aux)
17279 base := v_0.Args[0]
17280 val := v_1
17281 mem := v_2
17282 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17283 break
17284 }
17285 v.reset(OpAMD64SETAstore)
17286 v.AuxInt = int32ToAuxInt(off1 + off2)
17287 v.Aux = symToAux(mergeSym(sym1, sym2))
17288 v.AddArg3(base, val, mem)
17289 return true
17290 }
17291
17292
17293 for {
17294 off := auxIntToInt32(v.AuxInt)
17295 sym := auxToSym(v.Aux)
17296 ptr := v_0
17297 if v_1.Op != OpAMD64FlagEQ {
17298 break
17299 }
17300 mem := v_2
17301 v.reset(OpAMD64MOVBstore)
17302 v.AuxInt = int32ToAuxInt(off)
17303 v.Aux = symToAux(sym)
17304 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17305 v0.AuxInt = int32ToAuxInt(0)
17306 v.AddArg3(ptr, v0, mem)
17307 return true
17308 }
17309
17310
17311 for {
17312 off := auxIntToInt32(v.AuxInt)
17313 sym := auxToSym(v.Aux)
17314 ptr := v_0
17315 if v_1.Op != OpAMD64FlagLT_ULT {
17316 break
17317 }
17318 mem := v_2
17319 v.reset(OpAMD64MOVBstore)
17320 v.AuxInt = int32ToAuxInt(off)
17321 v.Aux = symToAux(sym)
17322 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17323 v0.AuxInt = int32ToAuxInt(0)
17324 v.AddArg3(ptr, v0, mem)
17325 return true
17326 }
17327
17328
17329 for {
17330 off := auxIntToInt32(v.AuxInt)
17331 sym := auxToSym(v.Aux)
17332 ptr := v_0
17333 if v_1.Op != OpAMD64FlagLT_UGT {
17334 break
17335 }
17336 mem := v_2
17337 v.reset(OpAMD64MOVBstore)
17338 v.AuxInt = int32ToAuxInt(off)
17339 v.Aux = symToAux(sym)
17340 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17341 v0.AuxInt = int32ToAuxInt(1)
17342 v.AddArg3(ptr, v0, mem)
17343 return true
17344 }
17345
17346
17347 for {
17348 off := auxIntToInt32(v.AuxInt)
17349 sym := auxToSym(v.Aux)
17350 ptr := v_0
17351 if v_1.Op != OpAMD64FlagGT_ULT {
17352 break
17353 }
17354 mem := v_2
17355 v.reset(OpAMD64MOVBstore)
17356 v.AuxInt = int32ToAuxInt(off)
17357 v.Aux = symToAux(sym)
17358 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17359 v0.AuxInt = int32ToAuxInt(0)
17360 v.AddArg3(ptr, v0, mem)
17361 return true
17362 }
17363
17364
17365 for {
17366 off := auxIntToInt32(v.AuxInt)
17367 sym := auxToSym(v.Aux)
17368 ptr := v_0
17369 if v_1.Op != OpAMD64FlagGT_UGT {
17370 break
17371 }
17372 mem := v_2
17373 v.reset(OpAMD64MOVBstore)
17374 v.AuxInt = int32ToAuxInt(off)
17375 v.Aux = symToAux(sym)
17376 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17377 v0.AuxInt = int32ToAuxInt(1)
17378 v.AddArg3(ptr, v0, mem)
17379 return true
17380 }
17381 return false
17382 }
17383 func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
17384 v_0 := v.Args[0]
17385 b := v.Block
17386
17387
17388 for {
17389 if v_0.Op != OpAMD64TESTQ {
17390 break
17391 }
17392 x := v_0.Args[1]
17393 if x != v_0.Args[0] {
17394 break
17395 }
17396 v.reset(OpConstBool)
17397 v.AuxInt = boolToAuxInt(false)
17398 return true
17399 }
17400
17401
17402 for {
17403 if v_0.Op != OpAMD64TESTL {
17404 break
17405 }
17406 x := v_0.Args[1]
17407 if x != v_0.Args[0] {
17408 break
17409 }
17410 v.reset(OpConstBool)
17411 v.AuxInt = boolToAuxInt(false)
17412 return true
17413 }
17414
17415
17416 for {
17417 if v_0.Op != OpAMD64TESTW {
17418 break
17419 }
17420 x := v_0.Args[1]
17421 if x != v_0.Args[0] {
17422 break
17423 }
17424 v.reset(OpConstBool)
17425 v.AuxInt = boolToAuxInt(false)
17426 return true
17427 }
17428
17429
17430 for {
17431 if v_0.Op != OpAMD64TESTB {
17432 break
17433 }
17434 x := v_0.Args[1]
17435 if x != v_0.Args[0] {
17436 break
17437 }
17438 v.reset(OpConstBool)
17439 v.AuxInt = boolToAuxInt(false)
17440 return true
17441 }
17442
17443
17444 for {
17445 if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
17446 break
17447 }
17448 x := v_0.Args[0]
17449 v.reset(OpAMD64ANDLconst)
17450 v.AuxInt = int32ToAuxInt(1)
17451 v.AddArg(x)
17452 return true
17453 }
17454
17455
17456 for {
17457 if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
17458 break
17459 }
17460 x := v_0.Args[0]
17461 v.reset(OpAMD64ANDQconst)
17462 v.AuxInt = int32ToAuxInt(1)
17463 v.AddArg(x)
17464 return true
17465 }
17466
17467
17468
17469 for {
17470 c := v_0
17471 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
17472 break
17473 }
17474 x := c.Args[0]
17475 if !(c.Uses == 1) {
17476 break
17477 }
17478 v.reset(OpAMD64SETBE)
17479 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
17480 v0.AuxInt = int32ToAuxInt(127)
17481 v0.AddArg(x)
17482 v.AddArg(v0)
17483 return true
17484 }
17485
17486
17487
17488 for {
17489 c := v_0
17490 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
17491 break
17492 }
17493 x := c.Args[0]
17494 if !(c.Uses == 1) {
17495 break
17496 }
17497 v.reset(OpAMD64SETBE)
17498 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
17499 v0.AuxInt = int32ToAuxInt(127)
17500 v0.AddArg(x)
17501 v.AddArg(v0)
17502 return true
17503 }
17504
17505
17506 for {
17507 if v_0.Op != OpAMD64InvertFlags {
17508 break
17509 }
17510 x := v_0.Args[0]
17511 v.reset(OpAMD64SETA)
17512 v.AddArg(x)
17513 return true
17514 }
17515
17516
17517 for {
17518 if v_0.Op != OpAMD64FlagEQ {
17519 break
17520 }
17521 v.reset(OpAMD64MOVLconst)
17522 v.AuxInt = int32ToAuxInt(0)
17523 return true
17524 }
17525
17526
17527 for {
17528 if v_0.Op != OpAMD64FlagLT_ULT {
17529 break
17530 }
17531 v.reset(OpAMD64MOVLconst)
17532 v.AuxInt = int32ToAuxInt(1)
17533 return true
17534 }
17535
17536
17537 for {
17538 if v_0.Op != OpAMD64FlagLT_UGT {
17539 break
17540 }
17541 v.reset(OpAMD64MOVLconst)
17542 v.AuxInt = int32ToAuxInt(0)
17543 return true
17544 }
17545
17546
17547 for {
17548 if v_0.Op != OpAMD64FlagGT_ULT {
17549 break
17550 }
17551 v.reset(OpAMD64MOVLconst)
17552 v.AuxInt = int32ToAuxInt(1)
17553 return true
17554 }
17555
17556
17557 for {
17558 if v_0.Op != OpAMD64FlagGT_UGT {
17559 break
17560 }
17561 v.reset(OpAMD64MOVLconst)
17562 v.AuxInt = int32ToAuxInt(0)
17563 return true
17564 }
17565 return false
17566 }
17567 func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool {
17568 v_0 := v.Args[0]
17569
17570
17571 for {
17572 if v_0.Op != OpAMD64InvertFlags {
17573 break
17574 }
17575 x := v_0.Args[0]
17576 v.reset(OpAMD64SETAE)
17577 v.AddArg(x)
17578 return true
17579 }
17580
17581
17582 for {
17583 if v_0.Op != OpAMD64FlagEQ {
17584 break
17585 }
17586 v.reset(OpAMD64MOVLconst)
17587 v.AuxInt = int32ToAuxInt(1)
17588 return true
17589 }
17590
17591
17592 for {
17593 if v_0.Op != OpAMD64FlagLT_ULT {
17594 break
17595 }
17596 v.reset(OpAMD64MOVLconst)
17597 v.AuxInt = int32ToAuxInt(1)
17598 return true
17599 }
17600
17601
17602 for {
17603 if v_0.Op != OpAMD64FlagLT_UGT {
17604 break
17605 }
17606 v.reset(OpAMD64MOVLconst)
17607 v.AuxInt = int32ToAuxInt(0)
17608 return true
17609 }
17610
17611
17612 for {
17613 if v_0.Op != OpAMD64FlagGT_ULT {
17614 break
17615 }
17616 v.reset(OpAMD64MOVLconst)
17617 v.AuxInt = int32ToAuxInt(1)
17618 return true
17619 }
17620
17621
17622 for {
17623 if v_0.Op != OpAMD64FlagGT_UGT {
17624 break
17625 }
17626 v.reset(OpAMD64MOVLconst)
17627 v.AuxInt = int32ToAuxInt(0)
17628 return true
17629 }
17630 return false
17631 }
17632 func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
17633 v_2 := v.Args[2]
17634 v_1 := v.Args[1]
17635 v_0 := v.Args[0]
17636 b := v.Block
17637 typ := &b.Func.Config.Types
17638
17639
17640 for {
17641 off := auxIntToInt32(v.AuxInt)
17642 sym := auxToSym(v.Aux)
17643 ptr := v_0
17644 if v_1.Op != OpAMD64InvertFlags {
17645 break
17646 }
17647 x := v_1.Args[0]
17648 mem := v_2
17649 v.reset(OpAMD64SETAEstore)
17650 v.AuxInt = int32ToAuxInt(off)
17651 v.Aux = symToAux(sym)
17652 v.AddArg3(ptr, x, mem)
17653 return true
17654 }
17655
17656
17657
17658 for {
17659 off1 := auxIntToInt32(v.AuxInt)
17660 sym := auxToSym(v.Aux)
17661 if v_0.Op != OpAMD64ADDQconst {
17662 break
17663 }
17664 off2 := auxIntToInt32(v_0.AuxInt)
17665 base := v_0.Args[0]
17666 val := v_1
17667 mem := v_2
17668 if !(is32Bit(int64(off1) + int64(off2))) {
17669 break
17670 }
17671 v.reset(OpAMD64SETBEstore)
17672 v.AuxInt = int32ToAuxInt(off1 + off2)
17673 v.Aux = symToAux(sym)
17674 v.AddArg3(base, val, mem)
17675 return true
17676 }
17677
17678
17679
17680 for {
17681 off1 := auxIntToInt32(v.AuxInt)
17682 sym1 := auxToSym(v.Aux)
17683 if v_0.Op != OpAMD64LEAQ {
17684 break
17685 }
17686 off2 := auxIntToInt32(v_0.AuxInt)
17687 sym2 := auxToSym(v_0.Aux)
17688 base := v_0.Args[0]
17689 val := v_1
17690 mem := v_2
17691 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17692 break
17693 }
17694 v.reset(OpAMD64SETBEstore)
17695 v.AuxInt = int32ToAuxInt(off1 + off2)
17696 v.Aux = symToAux(mergeSym(sym1, sym2))
17697 v.AddArg3(base, val, mem)
17698 return true
17699 }
17700
17701
17702 for {
17703 off := auxIntToInt32(v.AuxInt)
17704 sym := auxToSym(v.Aux)
17705 ptr := v_0
17706 if v_1.Op != OpAMD64FlagEQ {
17707 break
17708 }
17709 mem := v_2
17710 v.reset(OpAMD64MOVBstore)
17711 v.AuxInt = int32ToAuxInt(off)
17712 v.Aux = symToAux(sym)
17713 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17714 v0.AuxInt = int32ToAuxInt(1)
17715 v.AddArg3(ptr, v0, mem)
17716 return true
17717 }
17718
17719
17720 for {
17721 off := auxIntToInt32(v.AuxInt)
17722 sym := auxToSym(v.Aux)
17723 ptr := v_0
17724 if v_1.Op != OpAMD64FlagLT_ULT {
17725 break
17726 }
17727 mem := v_2
17728 v.reset(OpAMD64MOVBstore)
17729 v.AuxInt = int32ToAuxInt(off)
17730 v.Aux = symToAux(sym)
17731 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17732 v0.AuxInt = int32ToAuxInt(1)
17733 v.AddArg3(ptr, v0, mem)
17734 return true
17735 }
17736
17737
17738 for {
17739 off := auxIntToInt32(v.AuxInt)
17740 sym := auxToSym(v.Aux)
17741 ptr := v_0
17742 if v_1.Op != OpAMD64FlagLT_UGT {
17743 break
17744 }
17745 mem := v_2
17746 v.reset(OpAMD64MOVBstore)
17747 v.AuxInt = int32ToAuxInt(off)
17748 v.Aux = symToAux(sym)
17749 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17750 v0.AuxInt = int32ToAuxInt(0)
17751 v.AddArg3(ptr, v0, mem)
17752 return true
17753 }
17754
17755
17756 for {
17757 off := auxIntToInt32(v.AuxInt)
17758 sym := auxToSym(v.Aux)
17759 ptr := v_0
17760 if v_1.Op != OpAMD64FlagGT_ULT {
17761 break
17762 }
17763 mem := v_2
17764 v.reset(OpAMD64MOVBstore)
17765 v.AuxInt = int32ToAuxInt(off)
17766 v.Aux = symToAux(sym)
17767 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17768 v0.AuxInt = int32ToAuxInt(1)
17769 v.AddArg3(ptr, v0, mem)
17770 return true
17771 }
17772
17773
17774 for {
17775 off := auxIntToInt32(v.AuxInt)
17776 sym := auxToSym(v.Aux)
17777 ptr := v_0
17778 if v_1.Op != OpAMD64FlagGT_UGT {
17779 break
17780 }
17781 mem := v_2
17782 v.reset(OpAMD64MOVBstore)
17783 v.AuxInt = int32ToAuxInt(off)
17784 v.Aux = symToAux(sym)
17785 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17786 v0.AuxInt = int32ToAuxInt(0)
17787 v.AddArg3(ptr, v0, mem)
17788 return true
17789 }
17790 return false
17791 }
17792 func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
17793 v_2 := v.Args[2]
17794 v_1 := v.Args[1]
17795 v_0 := v.Args[0]
17796 b := v.Block
17797 typ := &b.Func.Config.Types
17798
17799
17800 for {
17801 off := auxIntToInt32(v.AuxInt)
17802 sym := auxToSym(v.Aux)
17803 ptr := v_0
17804 if v_1.Op != OpAMD64InvertFlags {
17805 break
17806 }
17807 x := v_1.Args[0]
17808 mem := v_2
17809 v.reset(OpAMD64SETAstore)
17810 v.AuxInt = int32ToAuxInt(off)
17811 v.Aux = symToAux(sym)
17812 v.AddArg3(ptr, x, mem)
17813 return true
17814 }
17815
17816
17817
17818 for {
17819 off1 := auxIntToInt32(v.AuxInt)
17820 sym := auxToSym(v.Aux)
17821 if v_0.Op != OpAMD64ADDQconst {
17822 break
17823 }
17824 off2 := auxIntToInt32(v_0.AuxInt)
17825 base := v_0.Args[0]
17826 val := v_1
17827 mem := v_2
17828 if !(is32Bit(int64(off1) + int64(off2))) {
17829 break
17830 }
17831 v.reset(OpAMD64SETBstore)
17832 v.AuxInt = int32ToAuxInt(off1 + off2)
17833 v.Aux = symToAux(sym)
17834 v.AddArg3(base, val, mem)
17835 return true
17836 }
17837
17838
17839
17840 for {
17841 off1 := auxIntToInt32(v.AuxInt)
17842 sym1 := auxToSym(v.Aux)
17843 if v_0.Op != OpAMD64LEAQ {
17844 break
17845 }
17846 off2 := auxIntToInt32(v_0.AuxInt)
17847 sym2 := auxToSym(v_0.Aux)
17848 base := v_0.Args[0]
17849 val := v_1
17850 mem := v_2
17851 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17852 break
17853 }
17854 v.reset(OpAMD64SETBstore)
17855 v.AuxInt = int32ToAuxInt(off1 + off2)
17856 v.Aux = symToAux(mergeSym(sym1, sym2))
17857 v.AddArg3(base, val, mem)
17858 return true
17859 }
17860
17861
17862 for {
17863 off := auxIntToInt32(v.AuxInt)
17864 sym := auxToSym(v.Aux)
17865 ptr := v_0
17866 if v_1.Op != OpAMD64FlagEQ {
17867 break
17868 }
17869 mem := v_2
17870 v.reset(OpAMD64MOVBstore)
17871 v.AuxInt = int32ToAuxInt(off)
17872 v.Aux = symToAux(sym)
17873 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17874 v0.AuxInt = int32ToAuxInt(0)
17875 v.AddArg3(ptr, v0, mem)
17876 return true
17877 }
17878
17879
17880 for {
17881 off := auxIntToInt32(v.AuxInt)
17882 sym := auxToSym(v.Aux)
17883 ptr := v_0
17884 if v_1.Op != OpAMD64FlagLT_ULT {
17885 break
17886 }
17887 mem := v_2
17888 v.reset(OpAMD64MOVBstore)
17889 v.AuxInt = int32ToAuxInt(off)
17890 v.Aux = symToAux(sym)
17891 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17892 v0.AuxInt = int32ToAuxInt(1)
17893 v.AddArg3(ptr, v0, mem)
17894 return true
17895 }
17896
17897
17898 for {
17899 off := auxIntToInt32(v.AuxInt)
17900 sym := auxToSym(v.Aux)
17901 ptr := v_0
17902 if v_1.Op != OpAMD64FlagLT_UGT {
17903 break
17904 }
17905 mem := v_2
17906 v.reset(OpAMD64MOVBstore)
17907 v.AuxInt = int32ToAuxInt(off)
17908 v.Aux = symToAux(sym)
17909 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17910 v0.AuxInt = int32ToAuxInt(0)
17911 v.AddArg3(ptr, v0, mem)
17912 return true
17913 }
17914
17915
17916 for {
17917 off := auxIntToInt32(v.AuxInt)
17918 sym := auxToSym(v.Aux)
17919 ptr := v_0
17920 if v_1.Op != OpAMD64FlagGT_ULT {
17921 break
17922 }
17923 mem := v_2
17924 v.reset(OpAMD64MOVBstore)
17925 v.AuxInt = int32ToAuxInt(off)
17926 v.Aux = symToAux(sym)
17927 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17928 v0.AuxInt = int32ToAuxInt(1)
17929 v.AddArg3(ptr, v0, mem)
17930 return true
17931 }
17932
17933
17934 for {
17935 off := auxIntToInt32(v.AuxInt)
17936 sym := auxToSym(v.Aux)
17937 ptr := v_0
17938 if v_1.Op != OpAMD64FlagGT_UGT {
17939 break
17940 }
17941 mem := v_2
17942 v.reset(OpAMD64MOVBstore)
17943 v.AuxInt = int32ToAuxInt(off)
17944 v.Aux = symToAux(sym)
17945 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17946 v0.AuxInt = int32ToAuxInt(0)
17947 v.AddArg3(ptr, v0, mem)
17948 return true
17949 }
17950 return false
17951 }
17952 func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
17953 v_0 := v.Args[0]
17954 b := v.Block
17955
17956
17957 for {
17958 if v_0.Op != OpAMD64TESTL {
17959 break
17960 }
17961 _ = v_0.Args[1]
17962 v_0_0 := v_0.Args[0]
17963 v_0_1 := v_0.Args[1]
17964 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17965 if v_0_0.Op != OpAMD64SHLL {
17966 continue
17967 }
17968 x := v_0_0.Args[1]
17969 v_0_0_0 := v_0_0.Args[0]
17970 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
17971 continue
17972 }
17973 y := v_0_1
17974 v.reset(OpAMD64SETAE)
17975 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
17976 v0.AddArg2(x, y)
17977 v.AddArg(v0)
17978 return true
17979 }
17980 break
17981 }
17982
17983
17984 for {
17985 if v_0.Op != OpAMD64TESTQ {
17986 break
17987 }
17988 _ = v_0.Args[1]
17989 v_0_0 := v_0.Args[0]
17990 v_0_1 := v_0.Args[1]
17991 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17992 if v_0_0.Op != OpAMD64SHLQ {
17993 continue
17994 }
17995 x := v_0_0.Args[1]
17996 v_0_0_0 := v_0_0.Args[0]
17997 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
17998 continue
17999 }
18000 y := v_0_1
18001 v.reset(OpAMD64SETAE)
18002 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
18003 v0.AddArg2(x, y)
18004 v.AddArg(v0)
18005 return true
18006 }
18007 break
18008 }
18009
18010
18011
18012 for {
18013 if v_0.Op != OpAMD64TESTLconst {
18014 break
18015 }
18016 c := auxIntToInt32(v_0.AuxInt)
18017 x := v_0.Args[0]
18018 if !(isUint32PowerOfTwo(int64(c))) {
18019 break
18020 }
18021 v.reset(OpAMD64SETAE)
18022 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18023 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
18024 v0.AddArg(x)
18025 v.AddArg(v0)
18026 return true
18027 }
18028
18029
18030
18031 for {
18032 if v_0.Op != OpAMD64TESTQconst {
18033 break
18034 }
18035 c := auxIntToInt32(v_0.AuxInt)
18036 x := v_0.Args[0]
18037 if !(isUint64PowerOfTwo(int64(c))) {
18038 break
18039 }
18040 v.reset(OpAMD64SETAE)
18041 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18042 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
18043 v0.AddArg(x)
18044 v.AddArg(v0)
18045 return true
18046 }
18047
18048
18049
18050 for {
18051 if v_0.Op != OpAMD64TESTQ {
18052 break
18053 }
18054 _ = v_0.Args[1]
18055 v_0_0 := v_0.Args[0]
18056 v_0_1 := v_0.Args[1]
18057 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
18058 if v_0_0.Op != OpAMD64MOVQconst {
18059 continue
18060 }
18061 c := auxIntToInt64(v_0_0.AuxInt)
18062 x := v_0_1
18063 if !(isUint64PowerOfTwo(c)) {
18064 continue
18065 }
18066 v.reset(OpAMD64SETAE)
18067 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18068 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
18069 v0.AddArg(x)
18070 v.AddArg(v0)
18071 return true
18072 }
18073 break
18074 }
18075
18076
18077 for {
18078 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
18079 break
18080 }
18081 s := v_0.Args[0]
18082 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
18083 break
18084 }
18085 v.reset(OpAMD64SETNE)
18086 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
18087 v0.AuxInt = int32ToAuxInt(0)
18088 v0.AddArg(s)
18089 v.AddArg(v0)
18090 return true
18091 }
18092
18093
18094 for {
18095 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
18096 break
18097 }
18098 s := v_0.Args[0]
18099 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
18100 break
18101 }
18102 v.reset(OpAMD64SETNE)
18103 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
18104 v0.AuxInt = int32ToAuxInt(0)
18105 v0.AddArg(s)
18106 v.AddArg(v0)
18107 return true
18108 }
18109
18110
18111
18112 for {
18113 if v_0.Op != OpAMD64TESTQ {
18114 break
18115 }
18116 _ = v_0.Args[1]
18117 v_0_0 := v_0.Args[0]
18118 v_0_1 := v_0.Args[1]
18119 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
18120 z1 := v_0_0
18121 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
18122 continue
18123 }
18124 z1_0 := z1.Args[0]
18125 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
18126 continue
18127 }
18128 x := z1_0.Args[0]
18129 z2 := v_0_1
18130 if !(z1 == z2) {
18131 continue
18132 }
18133 v.reset(OpAMD64SETAE)
18134 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18135 v0.AuxInt = int8ToAuxInt(63)
18136 v0.AddArg(x)
18137 v.AddArg(v0)
18138 return true
18139 }
18140 break
18141 }
18142
18143
18144
18145 for {
18146 if v_0.Op != OpAMD64TESTL {
18147 break
18148 }
18149 _ = v_0.Args[1]
18150 v_0_0 := v_0.Args[0]
18151 v_0_1 := v_0.Args[1]
18152 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
18153 z1 := v_0_0
18154 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
18155 continue
18156 }
18157 z1_0 := z1.Args[0]
18158 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
18159 continue
18160 }
18161 x := z1_0.Args[0]
18162 z2 := v_0_1
18163 if !(z1 == z2) {
18164 continue
18165 }
18166 v.reset(OpAMD64SETAE)
18167 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18168 v0.AuxInt = int8ToAuxInt(31)
18169 v0.AddArg(x)
18170 v.AddArg(v0)
18171 return true
18172 }
18173 break
18174 }
18175
18176
18177
18178 for {
18179 if v_0.Op != OpAMD64TESTQ {
18180 break
18181 }
18182 _ = v_0.Args[1]
18183 v_0_0 := v_0.Args[0]
18184 v_0_1 := v_0.Args[1]
18185 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
18186 z1 := v_0_0
18187 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18188 continue
18189 }
18190 z1_0 := z1.Args[0]
18191 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
18192 continue
18193 }
18194 x := z1_0.Args[0]
18195 z2 := v_0_1
18196 if !(z1 == z2) {
18197 continue
18198 }
18199 v.reset(OpAMD64SETAE)
18200 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18201 v0.AuxInt = int8ToAuxInt(0)
18202 v0.AddArg(x)
18203 v.AddArg(v0)
18204 return true
18205 }
18206 break
18207 }
18208
18209
18210
18211 for {
18212 if v_0.Op != OpAMD64TESTL {
18213 break
18214 }
18215 _ = v_0.Args[1]
18216 v_0_0 := v_0.Args[0]
18217 v_0_1 := v_0.Args[1]
18218 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
18219 z1 := v_0_0
18220 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18221 continue
18222 }
18223 z1_0 := z1.Args[0]
18224 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
18225 continue
18226 }
18227 x := z1_0.Args[0]
18228 z2 := v_0_1
18229 if !(z1 == z2) {
18230 continue
18231 }
18232 v.reset(OpAMD64SETAE)
18233 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18234 v0.AuxInt = int8ToAuxInt(0)
18235 v0.AddArg(x)
18236 v.AddArg(v0)
18237 return true
18238 }
18239 break
18240 }
18241
18242
18243
18244 for {
18245 if v_0.Op != OpAMD64TESTQ {
18246 break
18247 }
18248 _ = v_0.Args[1]
18249 v_0_0 := v_0.Args[0]
18250 v_0_1 := v_0.Args[1]
18251 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
18252 z1 := v_0_0
18253 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18254 continue
18255 }
18256 x := z1.Args[0]
18257 z2 := v_0_1
18258 if !(z1 == z2) {
18259 continue
18260 }
18261 v.reset(OpAMD64SETAE)
18262 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18263 v0.AuxInt = int8ToAuxInt(63)
18264 v0.AddArg(x)
18265 v.AddArg(v0)
18266 return true
18267 }
18268 break
18269 }
18270
18271
18272
18273 for {
18274 if v_0.Op != OpAMD64TESTL {
18275 break
18276 }
18277 _ = v_0.Args[1]
18278 v_0_0 := v_0.Args[0]
18279 v_0_1 := v_0.Args[1]
18280 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
18281 z1 := v_0_0
18282 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18283 continue
18284 }
18285 x := z1.Args[0]
18286 z2 := v_0_1
18287 if !(z1 == z2) {
18288 continue
18289 }
18290 v.reset(OpAMD64SETAE)
18291 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18292 v0.AuxInt = int8ToAuxInt(31)
18293 v0.AddArg(x)
18294 v.AddArg(v0)
18295 return true
18296 }
18297 break
18298 }
18299
18300
18301 for {
18302 if v_0.Op != OpAMD64InvertFlags {
18303 break
18304 }
18305 x := v_0.Args[0]
18306 v.reset(OpAMD64SETEQ)
18307 v.AddArg(x)
18308 return true
18309 }
18310
18311
18312 for {
18313 if v_0.Op != OpAMD64FlagEQ {
18314 break
18315 }
18316 v.reset(OpAMD64MOVLconst)
18317 v.AuxInt = int32ToAuxInt(1)
18318 return true
18319 }
18320
18321
18322 for {
18323 if v_0.Op != OpAMD64FlagLT_ULT {
18324 break
18325 }
18326 v.reset(OpAMD64MOVLconst)
18327 v.AuxInt = int32ToAuxInt(0)
18328 return true
18329 }
18330
18331
18332 for {
18333 if v_0.Op != OpAMD64FlagLT_UGT {
18334 break
18335 }
18336 v.reset(OpAMD64MOVLconst)
18337 v.AuxInt = int32ToAuxInt(0)
18338 return true
18339 }
18340
18341
18342 for {
18343 if v_0.Op != OpAMD64FlagGT_ULT {
18344 break
18345 }
18346 v.reset(OpAMD64MOVLconst)
18347 v.AuxInt = int32ToAuxInt(0)
18348 return true
18349 }
18350
18351
18352 for {
18353 if v_0.Op != OpAMD64FlagGT_UGT {
18354 break
18355 }
18356 v.reset(OpAMD64MOVLconst)
18357 v.AuxInt = int32ToAuxInt(0)
18358 return true
18359 }
18360
18361
18362 for {
18363 if v_0.Op != OpAMD64TESTQ {
18364 break
18365 }
18366 _ = v_0.Args[1]
18367 v_0_0 := v_0.Args[0]
18368 v_0_1 := v_0.Args[1]
18369 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
18370 s := v_0_0
18371 if s.Op != OpSelect0 {
18372 continue
18373 }
18374 blsr := s.Args[0]
18375 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
18376 continue
18377 }
18378 v.reset(OpAMD64SETEQ)
18379 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
18380 v0.AddArg(blsr)
18381 v.AddArg(v0)
18382 return true
18383 }
18384 break
18385 }
18386
18387
18388 for {
18389 if v_0.Op != OpAMD64TESTL {
18390 break
18391 }
18392 _ = v_0.Args[1]
18393 v_0_0 := v_0.Args[0]
18394 v_0_1 := v_0.Args[1]
18395 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
18396 s := v_0_0
18397 if s.Op != OpSelect0 {
18398 continue
18399 }
18400 blsr := s.Args[0]
18401 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
18402 continue
18403 }
18404 v.reset(OpAMD64SETEQ)
18405 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
18406 v0.AddArg(blsr)
18407 v.AddArg(v0)
18408 return true
18409 }
18410 break
18411 }
18412 return false
18413 }
18414 func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
18415 v_2 := v.Args[2]
18416 v_1 := v.Args[1]
18417 v_0 := v.Args[0]
18418 b := v.Block
18419 typ := &b.Func.Config.Types
18420
18421
18422 for {
18423 off := auxIntToInt32(v.AuxInt)
18424 sym := auxToSym(v.Aux)
18425 ptr := v_0
18426 if v_1.Op != OpAMD64TESTL {
18427 break
18428 }
18429 _ = v_1.Args[1]
18430 v_1_0 := v_1.Args[0]
18431 v_1_1 := v_1.Args[1]
18432 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18433 if v_1_0.Op != OpAMD64SHLL {
18434 continue
18435 }
18436 x := v_1_0.Args[1]
18437 v_1_0_0 := v_1_0.Args[0]
18438 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
18439 continue
18440 }
18441 y := v_1_1
18442 mem := v_2
18443 v.reset(OpAMD64SETAEstore)
18444 v.AuxInt = int32ToAuxInt(off)
18445 v.Aux = symToAux(sym)
18446 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
18447 v0.AddArg2(x, y)
18448 v.AddArg3(ptr, v0, mem)
18449 return true
18450 }
18451 break
18452 }
18453
18454
18455 for {
18456 off := auxIntToInt32(v.AuxInt)
18457 sym := auxToSym(v.Aux)
18458 ptr := v_0
18459 if v_1.Op != OpAMD64TESTQ {
18460 break
18461 }
18462 _ = v_1.Args[1]
18463 v_1_0 := v_1.Args[0]
18464 v_1_1 := v_1.Args[1]
18465 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18466 if v_1_0.Op != OpAMD64SHLQ {
18467 continue
18468 }
18469 x := v_1_0.Args[1]
18470 v_1_0_0 := v_1_0.Args[0]
18471 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
18472 continue
18473 }
18474 y := v_1_1
18475 mem := v_2
18476 v.reset(OpAMD64SETAEstore)
18477 v.AuxInt = int32ToAuxInt(off)
18478 v.Aux = symToAux(sym)
18479 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
18480 v0.AddArg2(x, y)
18481 v.AddArg3(ptr, v0, mem)
18482 return true
18483 }
18484 break
18485 }
18486
18487
18488
18489 for {
18490 off := auxIntToInt32(v.AuxInt)
18491 sym := auxToSym(v.Aux)
18492 ptr := v_0
18493 if v_1.Op != OpAMD64TESTLconst {
18494 break
18495 }
18496 c := auxIntToInt32(v_1.AuxInt)
18497 x := v_1.Args[0]
18498 mem := v_2
18499 if !(isUint32PowerOfTwo(int64(c))) {
18500 break
18501 }
18502 v.reset(OpAMD64SETAEstore)
18503 v.AuxInt = int32ToAuxInt(off)
18504 v.Aux = symToAux(sym)
18505 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18506 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
18507 v0.AddArg(x)
18508 v.AddArg3(ptr, v0, mem)
18509 return true
18510 }
18511
18512
18513
18514 for {
18515 off := auxIntToInt32(v.AuxInt)
18516 sym := auxToSym(v.Aux)
18517 ptr := v_0
18518 if v_1.Op != OpAMD64TESTQconst {
18519 break
18520 }
18521 c := auxIntToInt32(v_1.AuxInt)
18522 x := v_1.Args[0]
18523 mem := v_2
18524 if !(isUint64PowerOfTwo(int64(c))) {
18525 break
18526 }
18527 v.reset(OpAMD64SETAEstore)
18528 v.AuxInt = int32ToAuxInt(off)
18529 v.Aux = symToAux(sym)
18530 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18531 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
18532 v0.AddArg(x)
18533 v.AddArg3(ptr, v0, mem)
18534 return true
18535 }
18536
18537
18538
18539 for {
18540 off := auxIntToInt32(v.AuxInt)
18541 sym := auxToSym(v.Aux)
18542 ptr := v_0
18543 if v_1.Op != OpAMD64TESTQ {
18544 break
18545 }
18546 _ = v_1.Args[1]
18547 v_1_0 := v_1.Args[0]
18548 v_1_1 := v_1.Args[1]
18549 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18550 if v_1_0.Op != OpAMD64MOVQconst {
18551 continue
18552 }
18553 c := auxIntToInt64(v_1_0.AuxInt)
18554 x := v_1_1
18555 mem := v_2
18556 if !(isUint64PowerOfTwo(c)) {
18557 continue
18558 }
18559 v.reset(OpAMD64SETAEstore)
18560 v.AuxInt = int32ToAuxInt(off)
18561 v.Aux = symToAux(sym)
18562 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18563 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
18564 v0.AddArg(x)
18565 v.AddArg3(ptr, v0, mem)
18566 return true
18567 }
18568 break
18569 }
18570
18571
18572 for {
18573 off := auxIntToInt32(v.AuxInt)
18574 sym := auxToSym(v.Aux)
18575 ptr := v_0
18576 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
18577 break
18578 }
18579 s := v_1.Args[0]
18580 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
18581 break
18582 }
18583 mem := v_2
18584 v.reset(OpAMD64SETNEstore)
18585 v.AuxInt = int32ToAuxInt(off)
18586 v.Aux = symToAux(sym)
18587 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
18588 v0.AuxInt = int32ToAuxInt(0)
18589 v0.AddArg(s)
18590 v.AddArg3(ptr, v0, mem)
18591 return true
18592 }
18593
18594
18595 for {
18596 off := auxIntToInt32(v.AuxInt)
18597 sym := auxToSym(v.Aux)
18598 ptr := v_0
18599 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
18600 break
18601 }
18602 s := v_1.Args[0]
18603 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
18604 break
18605 }
18606 mem := v_2
18607 v.reset(OpAMD64SETNEstore)
18608 v.AuxInt = int32ToAuxInt(off)
18609 v.Aux = symToAux(sym)
18610 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
18611 v0.AuxInt = int32ToAuxInt(0)
18612 v0.AddArg(s)
18613 v.AddArg3(ptr, v0, mem)
18614 return true
18615 }
18616
18617
18618
18619 for {
18620 off := auxIntToInt32(v.AuxInt)
18621 sym := auxToSym(v.Aux)
18622 ptr := v_0
18623 if v_1.Op != OpAMD64TESTQ {
18624 break
18625 }
18626 _ = v_1.Args[1]
18627 v_1_0 := v_1.Args[0]
18628 v_1_1 := v_1.Args[1]
18629 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18630 z1 := v_1_0
18631 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
18632 continue
18633 }
18634 z1_0 := z1.Args[0]
18635 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
18636 continue
18637 }
18638 x := z1_0.Args[0]
18639 z2 := v_1_1
18640 mem := v_2
18641 if !(z1 == z2) {
18642 continue
18643 }
18644 v.reset(OpAMD64SETAEstore)
18645 v.AuxInt = int32ToAuxInt(off)
18646 v.Aux = symToAux(sym)
18647 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18648 v0.AuxInt = int8ToAuxInt(63)
18649 v0.AddArg(x)
18650 v.AddArg3(ptr, v0, mem)
18651 return true
18652 }
18653 break
18654 }
18655
18656
18657
18658 for {
18659 off := auxIntToInt32(v.AuxInt)
18660 sym := auxToSym(v.Aux)
18661 ptr := v_0
18662 if v_1.Op != OpAMD64TESTL {
18663 break
18664 }
18665 _ = v_1.Args[1]
18666 v_1_0 := v_1.Args[0]
18667 v_1_1 := v_1.Args[1]
18668 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18669 z1 := v_1_0
18670 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
18671 continue
18672 }
18673 z1_0 := z1.Args[0]
18674 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
18675 continue
18676 }
18677 x := z1_0.Args[0]
18678 z2 := v_1_1
18679 mem := v_2
18680 if !(z1 == z2) {
18681 continue
18682 }
18683 v.reset(OpAMD64SETAEstore)
18684 v.AuxInt = int32ToAuxInt(off)
18685 v.Aux = symToAux(sym)
18686 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18687 v0.AuxInt = int8ToAuxInt(31)
18688 v0.AddArg(x)
18689 v.AddArg3(ptr, v0, mem)
18690 return true
18691 }
18692 break
18693 }
18694
18695
18696
18697 for {
18698 off := auxIntToInt32(v.AuxInt)
18699 sym := auxToSym(v.Aux)
18700 ptr := v_0
18701 if v_1.Op != OpAMD64TESTQ {
18702 break
18703 }
18704 _ = v_1.Args[1]
18705 v_1_0 := v_1.Args[0]
18706 v_1_1 := v_1.Args[1]
18707 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18708 z1 := v_1_0
18709 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18710 continue
18711 }
18712 z1_0 := z1.Args[0]
18713 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
18714 continue
18715 }
18716 x := z1_0.Args[0]
18717 z2 := v_1_1
18718 mem := v_2
18719 if !(z1 == z2) {
18720 continue
18721 }
18722 v.reset(OpAMD64SETAEstore)
18723 v.AuxInt = int32ToAuxInt(off)
18724 v.Aux = symToAux(sym)
18725 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18726 v0.AuxInt = int8ToAuxInt(0)
18727 v0.AddArg(x)
18728 v.AddArg3(ptr, v0, mem)
18729 return true
18730 }
18731 break
18732 }
18733
18734
18735
18736 for {
18737 off := auxIntToInt32(v.AuxInt)
18738 sym := auxToSym(v.Aux)
18739 ptr := v_0
18740 if v_1.Op != OpAMD64TESTL {
18741 break
18742 }
18743 _ = v_1.Args[1]
18744 v_1_0 := v_1.Args[0]
18745 v_1_1 := v_1.Args[1]
18746 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18747 z1 := v_1_0
18748 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18749 continue
18750 }
18751 z1_0 := z1.Args[0]
18752 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
18753 continue
18754 }
18755 x := z1_0.Args[0]
18756 z2 := v_1_1
18757 mem := v_2
18758 if !(z1 == z2) {
18759 continue
18760 }
18761 v.reset(OpAMD64SETAEstore)
18762 v.AuxInt = int32ToAuxInt(off)
18763 v.Aux = symToAux(sym)
18764 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18765 v0.AuxInt = int8ToAuxInt(0)
18766 v0.AddArg(x)
18767 v.AddArg3(ptr, v0, mem)
18768 return true
18769 }
18770 break
18771 }
18772
18773
18774
18775 for {
18776 off := auxIntToInt32(v.AuxInt)
18777 sym := auxToSym(v.Aux)
18778 ptr := v_0
18779 if v_1.Op != OpAMD64TESTQ {
18780 break
18781 }
18782 _ = v_1.Args[1]
18783 v_1_0 := v_1.Args[0]
18784 v_1_1 := v_1.Args[1]
18785 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18786 z1 := v_1_0
18787 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18788 continue
18789 }
18790 x := z1.Args[0]
18791 z2 := v_1_1
18792 mem := v_2
18793 if !(z1 == z2) {
18794 continue
18795 }
18796 v.reset(OpAMD64SETAEstore)
18797 v.AuxInt = int32ToAuxInt(off)
18798 v.Aux = symToAux(sym)
18799 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18800 v0.AuxInt = int8ToAuxInt(63)
18801 v0.AddArg(x)
18802 v.AddArg3(ptr, v0, mem)
18803 return true
18804 }
18805 break
18806 }
18807
18808
18809
18810 for {
18811 off := auxIntToInt32(v.AuxInt)
18812 sym := auxToSym(v.Aux)
18813 ptr := v_0
18814 if v_1.Op != OpAMD64TESTL {
18815 break
18816 }
18817 _ = v_1.Args[1]
18818 v_1_0 := v_1.Args[0]
18819 v_1_1 := v_1.Args[1]
18820 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18821 z1 := v_1_0
18822 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18823 continue
18824 }
18825 x := z1.Args[0]
18826 z2 := v_1_1
18827 mem := v_2
18828 if !(z1 == z2) {
18829 continue
18830 }
18831 v.reset(OpAMD64SETAEstore)
18832 v.AuxInt = int32ToAuxInt(off)
18833 v.Aux = symToAux(sym)
18834 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18835 v0.AuxInt = int8ToAuxInt(31)
18836 v0.AddArg(x)
18837 v.AddArg3(ptr, v0, mem)
18838 return true
18839 }
18840 break
18841 }
18842
18843
18844 for {
18845 off := auxIntToInt32(v.AuxInt)
18846 sym := auxToSym(v.Aux)
18847 ptr := v_0
18848 if v_1.Op != OpAMD64InvertFlags {
18849 break
18850 }
18851 x := v_1.Args[0]
18852 mem := v_2
18853 v.reset(OpAMD64SETEQstore)
18854 v.AuxInt = int32ToAuxInt(off)
18855 v.Aux = symToAux(sym)
18856 v.AddArg3(ptr, x, mem)
18857 return true
18858 }
18859
18860
18861
18862 for {
18863 off1 := auxIntToInt32(v.AuxInt)
18864 sym := auxToSym(v.Aux)
18865 if v_0.Op != OpAMD64ADDQconst {
18866 break
18867 }
18868 off2 := auxIntToInt32(v_0.AuxInt)
18869 base := v_0.Args[0]
18870 val := v_1
18871 mem := v_2
18872 if !(is32Bit(int64(off1) + int64(off2))) {
18873 break
18874 }
18875 v.reset(OpAMD64SETEQstore)
18876 v.AuxInt = int32ToAuxInt(off1 + off2)
18877 v.Aux = symToAux(sym)
18878 v.AddArg3(base, val, mem)
18879 return true
18880 }
18881
18882
18883
18884 for {
18885 off1 := auxIntToInt32(v.AuxInt)
18886 sym1 := auxToSym(v.Aux)
18887 if v_0.Op != OpAMD64LEAQ {
18888 break
18889 }
18890 off2 := auxIntToInt32(v_0.AuxInt)
18891 sym2 := auxToSym(v_0.Aux)
18892 base := v_0.Args[0]
18893 val := v_1
18894 mem := v_2
18895 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18896 break
18897 }
18898 v.reset(OpAMD64SETEQstore)
18899 v.AuxInt = int32ToAuxInt(off1 + off2)
18900 v.Aux = symToAux(mergeSym(sym1, sym2))
18901 v.AddArg3(base, val, mem)
18902 return true
18903 }
18904
18905
18906 for {
18907 off := auxIntToInt32(v.AuxInt)
18908 sym := auxToSym(v.Aux)
18909 ptr := v_0
18910 if v_1.Op != OpAMD64FlagEQ {
18911 break
18912 }
18913 mem := v_2
18914 v.reset(OpAMD64MOVBstore)
18915 v.AuxInt = int32ToAuxInt(off)
18916 v.Aux = symToAux(sym)
18917 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18918 v0.AuxInt = int32ToAuxInt(1)
18919 v.AddArg3(ptr, v0, mem)
18920 return true
18921 }
18922
18923
18924 for {
18925 off := auxIntToInt32(v.AuxInt)
18926 sym := auxToSym(v.Aux)
18927 ptr := v_0
18928 if v_1.Op != OpAMD64FlagLT_ULT {
18929 break
18930 }
18931 mem := v_2
18932 v.reset(OpAMD64MOVBstore)
18933 v.AuxInt = int32ToAuxInt(off)
18934 v.Aux = symToAux(sym)
18935 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18936 v0.AuxInt = int32ToAuxInt(0)
18937 v.AddArg3(ptr, v0, mem)
18938 return true
18939 }
18940
18941
18942 for {
18943 off := auxIntToInt32(v.AuxInt)
18944 sym := auxToSym(v.Aux)
18945 ptr := v_0
18946 if v_1.Op != OpAMD64FlagLT_UGT {
18947 break
18948 }
18949 mem := v_2
18950 v.reset(OpAMD64MOVBstore)
18951 v.AuxInt = int32ToAuxInt(off)
18952 v.Aux = symToAux(sym)
18953 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18954 v0.AuxInt = int32ToAuxInt(0)
18955 v.AddArg3(ptr, v0, mem)
18956 return true
18957 }
18958
18959
18960 for {
18961 off := auxIntToInt32(v.AuxInt)
18962 sym := auxToSym(v.Aux)
18963 ptr := v_0
18964 if v_1.Op != OpAMD64FlagGT_ULT {
18965 break
18966 }
18967 mem := v_2
18968 v.reset(OpAMD64MOVBstore)
18969 v.AuxInt = int32ToAuxInt(off)
18970 v.Aux = symToAux(sym)
18971 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18972 v0.AuxInt = int32ToAuxInt(0)
18973 v.AddArg3(ptr, v0, mem)
18974 return true
18975 }
18976
18977
18978 for {
18979 off := auxIntToInt32(v.AuxInt)
18980 sym := auxToSym(v.Aux)
18981 ptr := v_0
18982 if v_1.Op != OpAMD64FlagGT_UGT {
18983 break
18984 }
18985 mem := v_2
18986 v.reset(OpAMD64MOVBstore)
18987 v.AuxInt = int32ToAuxInt(off)
18988 v.Aux = symToAux(sym)
18989 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18990 v0.AuxInt = int32ToAuxInt(0)
18991 v.AddArg3(ptr, v0, mem)
18992 return true
18993 }
18994 return false
18995 }
18996 func rewriteValueAMD64_OpAMD64SETG(v *Value) bool {
18997 v_0 := v.Args[0]
18998
18999
19000 for {
19001 if v_0.Op != OpAMD64InvertFlags {
19002 break
19003 }
19004 x := v_0.Args[0]
19005 v.reset(OpAMD64SETL)
19006 v.AddArg(x)
19007 return true
19008 }
19009
19010
19011 for {
19012 if v_0.Op != OpAMD64FlagEQ {
19013 break
19014 }
19015 v.reset(OpAMD64MOVLconst)
19016 v.AuxInt = int32ToAuxInt(0)
19017 return true
19018 }
19019
19020
19021 for {
19022 if v_0.Op != OpAMD64FlagLT_ULT {
19023 break
19024 }
19025 v.reset(OpAMD64MOVLconst)
19026 v.AuxInt = int32ToAuxInt(0)
19027 return true
19028 }
19029
19030
19031 for {
19032 if v_0.Op != OpAMD64FlagLT_UGT {
19033 break
19034 }
19035 v.reset(OpAMD64MOVLconst)
19036 v.AuxInt = int32ToAuxInt(0)
19037 return true
19038 }
19039
19040
19041 for {
19042 if v_0.Op != OpAMD64FlagGT_ULT {
19043 break
19044 }
19045 v.reset(OpAMD64MOVLconst)
19046 v.AuxInt = int32ToAuxInt(1)
19047 return true
19048 }
19049
19050
19051 for {
19052 if v_0.Op != OpAMD64FlagGT_UGT {
19053 break
19054 }
19055 v.reset(OpAMD64MOVLconst)
19056 v.AuxInt = int32ToAuxInt(1)
19057 return true
19058 }
19059 return false
19060 }
19061 func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
19062 v_0 := v.Args[0]
19063 b := v.Block
19064
19065
19066
19067 for {
19068 c := v_0
19069 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
19070 break
19071 }
19072 x := c.Args[0]
19073 if !(c.Uses == 1) {
19074 break
19075 }
19076 v.reset(OpAMD64SETG)
19077 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
19078 v0.AuxInt = int32ToAuxInt(127)
19079 v0.AddArg(x)
19080 v.AddArg(v0)
19081 return true
19082 }
19083
19084
19085
19086 for {
19087 c := v_0
19088 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
19089 break
19090 }
19091 x := c.Args[0]
19092 if !(c.Uses == 1) {
19093 break
19094 }
19095 v.reset(OpAMD64SETG)
19096 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
19097 v0.AuxInt = int32ToAuxInt(127)
19098 v0.AddArg(x)
19099 v.AddArg(v0)
19100 return true
19101 }
19102
19103
19104 for {
19105 if v_0.Op != OpAMD64InvertFlags {
19106 break
19107 }
19108 x := v_0.Args[0]
19109 v.reset(OpAMD64SETLE)
19110 v.AddArg(x)
19111 return true
19112 }
19113
19114
19115 for {
19116 if v_0.Op != OpAMD64FlagEQ {
19117 break
19118 }
19119 v.reset(OpAMD64MOVLconst)
19120 v.AuxInt = int32ToAuxInt(1)
19121 return true
19122 }
19123
19124
19125 for {
19126 if v_0.Op != OpAMD64FlagLT_ULT {
19127 break
19128 }
19129 v.reset(OpAMD64MOVLconst)
19130 v.AuxInt = int32ToAuxInt(0)
19131 return true
19132 }
19133
19134
19135 for {
19136 if v_0.Op != OpAMD64FlagLT_UGT {
19137 break
19138 }
19139 v.reset(OpAMD64MOVLconst)
19140 v.AuxInt = int32ToAuxInt(0)
19141 return true
19142 }
19143
19144
19145 for {
19146 if v_0.Op != OpAMD64FlagGT_ULT {
19147 break
19148 }
19149 v.reset(OpAMD64MOVLconst)
19150 v.AuxInt = int32ToAuxInt(1)
19151 return true
19152 }
19153
19154
19155 for {
19156 if v_0.Op != OpAMD64FlagGT_UGT {
19157 break
19158 }
19159 v.reset(OpAMD64MOVLconst)
19160 v.AuxInt = int32ToAuxInt(1)
19161 return true
19162 }
19163 return false
19164 }
19165 func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
19166 v_2 := v.Args[2]
19167 v_1 := v.Args[1]
19168 v_0 := v.Args[0]
19169 b := v.Block
19170 typ := &b.Func.Config.Types
19171
19172
19173 for {
19174 off := auxIntToInt32(v.AuxInt)
19175 sym := auxToSym(v.Aux)
19176 ptr := v_0
19177 if v_1.Op != OpAMD64InvertFlags {
19178 break
19179 }
19180 x := v_1.Args[0]
19181 mem := v_2
19182 v.reset(OpAMD64SETLEstore)
19183 v.AuxInt = int32ToAuxInt(off)
19184 v.Aux = symToAux(sym)
19185 v.AddArg3(ptr, x, mem)
19186 return true
19187 }
19188
19189
19190
19191 for {
19192 off1 := auxIntToInt32(v.AuxInt)
19193 sym := auxToSym(v.Aux)
19194 if v_0.Op != OpAMD64ADDQconst {
19195 break
19196 }
19197 off2 := auxIntToInt32(v_0.AuxInt)
19198 base := v_0.Args[0]
19199 val := v_1
19200 mem := v_2
19201 if !(is32Bit(int64(off1) + int64(off2))) {
19202 break
19203 }
19204 v.reset(OpAMD64SETGEstore)
19205 v.AuxInt = int32ToAuxInt(off1 + off2)
19206 v.Aux = symToAux(sym)
19207 v.AddArg3(base, val, mem)
19208 return true
19209 }
19210
19211
19212
19213 for {
19214 off1 := auxIntToInt32(v.AuxInt)
19215 sym1 := auxToSym(v.Aux)
19216 if v_0.Op != OpAMD64LEAQ {
19217 break
19218 }
19219 off2 := auxIntToInt32(v_0.AuxInt)
19220 sym2 := auxToSym(v_0.Aux)
19221 base := v_0.Args[0]
19222 val := v_1
19223 mem := v_2
19224 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19225 break
19226 }
19227 v.reset(OpAMD64SETGEstore)
19228 v.AuxInt = int32ToAuxInt(off1 + off2)
19229 v.Aux = symToAux(mergeSym(sym1, sym2))
19230 v.AddArg3(base, val, mem)
19231 return true
19232 }
19233
19234
19235 for {
19236 off := auxIntToInt32(v.AuxInt)
19237 sym := auxToSym(v.Aux)
19238 ptr := v_0
19239 if v_1.Op != OpAMD64FlagEQ {
19240 break
19241 }
19242 mem := v_2
19243 v.reset(OpAMD64MOVBstore)
19244 v.AuxInt = int32ToAuxInt(off)
19245 v.Aux = symToAux(sym)
19246 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19247 v0.AuxInt = int32ToAuxInt(1)
19248 v.AddArg3(ptr, v0, mem)
19249 return true
19250 }
19251
19252
19253 for {
19254 off := auxIntToInt32(v.AuxInt)
19255 sym := auxToSym(v.Aux)
19256 ptr := v_0
19257 if v_1.Op != OpAMD64FlagLT_ULT {
19258 break
19259 }
19260 mem := v_2
19261 v.reset(OpAMD64MOVBstore)
19262 v.AuxInt = int32ToAuxInt(off)
19263 v.Aux = symToAux(sym)
19264 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19265 v0.AuxInt = int32ToAuxInt(0)
19266 v.AddArg3(ptr, v0, mem)
19267 return true
19268 }
19269
19270
19271 for {
19272 off := auxIntToInt32(v.AuxInt)
19273 sym := auxToSym(v.Aux)
19274 ptr := v_0
19275 if v_1.Op != OpAMD64FlagLT_UGT {
19276 break
19277 }
19278 mem := v_2
19279 v.reset(OpAMD64MOVBstore)
19280 v.AuxInt = int32ToAuxInt(off)
19281 v.Aux = symToAux(sym)
19282 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19283 v0.AuxInt = int32ToAuxInt(0)
19284 v.AddArg3(ptr, v0, mem)
19285 return true
19286 }
19287
19288
19289 for {
19290 off := auxIntToInt32(v.AuxInt)
19291 sym := auxToSym(v.Aux)
19292 ptr := v_0
19293 if v_1.Op != OpAMD64FlagGT_ULT {
19294 break
19295 }
19296 mem := v_2
19297 v.reset(OpAMD64MOVBstore)
19298 v.AuxInt = int32ToAuxInt(off)
19299 v.Aux = symToAux(sym)
19300 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19301 v0.AuxInt = int32ToAuxInt(1)
19302 v.AddArg3(ptr, v0, mem)
19303 return true
19304 }
19305
19306
19307 for {
19308 off := auxIntToInt32(v.AuxInt)
19309 sym := auxToSym(v.Aux)
19310 ptr := v_0
19311 if v_1.Op != OpAMD64FlagGT_UGT {
19312 break
19313 }
19314 mem := v_2
19315 v.reset(OpAMD64MOVBstore)
19316 v.AuxInt = int32ToAuxInt(off)
19317 v.Aux = symToAux(sym)
19318 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19319 v0.AuxInt = int32ToAuxInt(1)
19320 v.AddArg3(ptr, v0, mem)
19321 return true
19322 }
19323 return false
19324 }
19325 func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
19326 v_2 := v.Args[2]
19327 v_1 := v.Args[1]
19328 v_0 := v.Args[0]
19329 b := v.Block
19330 typ := &b.Func.Config.Types
19331
19332
19333 for {
19334 off := auxIntToInt32(v.AuxInt)
19335 sym := auxToSym(v.Aux)
19336 ptr := v_0
19337 if v_1.Op != OpAMD64InvertFlags {
19338 break
19339 }
19340 x := v_1.Args[0]
19341 mem := v_2
19342 v.reset(OpAMD64SETLstore)
19343 v.AuxInt = int32ToAuxInt(off)
19344 v.Aux = symToAux(sym)
19345 v.AddArg3(ptr, x, mem)
19346 return true
19347 }
19348
19349
19350
19351 for {
19352 off1 := auxIntToInt32(v.AuxInt)
19353 sym := auxToSym(v.Aux)
19354 if v_0.Op != OpAMD64ADDQconst {
19355 break
19356 }
19357 off2 := auxIntToInt32(v_0.AuxInt)
19358 base := v_0.Args[0]
19359 val := v_1
19360 mem := v_2
19361 if !(is32Bit(int64(off1) + int64(off2))) {
19362 break
19363 }
19364 v.reset(OpAMD64SETGstore)
19365 v.AuxInt = int32ToAuxInt(off1 + off2)
19366 v.Aux = symToAux(sym)
19367 v.AddArg3(base, val, mem)
19368 return true
19369 }
19370
19371
19372
19373 for {
19374 off1 := auxIntToInt32(v.AuxInt)
19375 sym1 := auxToSym(v.Aux)
19376 if v_0.Op != OpAMD64LEAQ {
19377 break
19378 }
19379 off2 := auxIntToInt32(v_0.AuxInt)
19380 sym2 := auxToSym(v_0.Aux)
19381 base := v_0.Args[0]
19382 val := v_1
19383 mem := v_2
19384 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19385 break
19386 }
19387 v.reset(OpAMD64SETGstore)
19388 v.AuxInt = int32ToAuxInt(off1 + off2)
19389 v.Aux = symToAux(mergeSym(sym1, sym2))
19390 v.AddArg3(base, val, mem)
19391 return true
19392 }
19393
19394
19395 for {
19396 off := auxIntToInt32(v.AuxInt)
19397 sym := auxToSym(v.Aux)
19398 ptr := v_0
19399 if v_1.Op != OpAMD64FlagEQ {
19400 break
19401 }
19402 mem := v_2
19403 v.reset(OpAMD64MOVBstore)
19404 v.AuxInt = int32ToAuxInt(off)
19405 v.Aux = symToAux(sym)
19406 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19407 v0.AuxInt = int32ToAuxInt(0)
19408 v.AddArg3(ptr, v0, mem)
19409 return true
19410 }
19411
19412
19413 for {
19414 off := auxIntToInt32(v.AuxInt)
19415 sym := auxToSym(v.Aux)
19416 ptr := v_0
19417 if v_1.Op != OpAMD64FlagLT_ULT {
19418 break
19419 }
19420 mem := v_2
19421 v.reset(OpAMD64MOVBstore)
19422 v.AuxInt = int32ToAuxInt(off)
19423 v.Aux = symToAux(sym)
19424 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19425 v0.AuxInt = int32ToAuxInt(0)
19426 v.AddArg3(ptr, v0, mem)
19427 return true
19428 }
19429
19430
19431 for {
19432 off := auxIntToInt32(v.AuxInt)
19433 sym := auxToSym(v.Aux)
19434 ptr := v_0
19435 if v_1.Op != OpAMD64FlagLT_UGT {
19436 break
19437 }
19438 mem := v_2
19439 v.reset(OpAMD64MOVBstore)
19440 v.AuxInt = int32ToAuxInt(off)
19441 v.Aux = symToAux(sym)
19442 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19443 v0.AuxInt = int32ToAuxInt(0)
19444 v.AddArg3(ptr, v0, mem)
19445 return true
19446 }
19447
19448
19449 for {
19450 off := auxIntToInt32(v.AuxInt)
19451 sym := auxToSym(v.Aux)
19452 ptr := v_0
19453 if v_1.Op != OpAMD64FlagGT_ULT {
19454 break
19455 }
19456 mem := v_2
19457 v.reset(OpAMD64MOVBstore)
19458 v.AuxInt = int32ToAuxInt(off)
19459 v.Aux = symToAux(sym)
19460 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19461 v0.AuxInt = int32ToAuxInt(1)
19462 v.AddArg3(ptr, v0, mem)
19463 return true
19464 }
19465
19466
19467 for {
19468 off := auxIntToInt32(v.AuxInt)
19469 sym := auxToSym(v.Aux)
19470 ptr := v_0
19471 if v_1.Op != OpAMD64FlagGT_UGT {
19472 break
19473 }
19474 mem := v_2
19475 v.reset(OpAMD64MOVBstore)
19476 v.AuxInt = int32ToAuxInt(off)
19477 v.Aux = symToAux(sym)
19478 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19479 v0.AuxInt = int32ToAuxInt(1)
19480 v.AddArg3(ptr, v0, mem)
19481 return true
19482 }
19483 return false
19484 }
19485 func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
19486 v_0 := v.Args[0]
19487 b := v.Block
19488
19489
19490
19491 for {
19492 c := v_0
19493 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
19494 break
19495 }
19496 x := c.Args[0]
19497 if !(c.Uses == 1) {
19498 break
19499 }
19500 v.reset(OpAMD64SETLE)
19501 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
19502 v0.AuxInt = int32ToAuxInt(127)
19503 v0.AddArg(x)
19504 v.AddArg(v0)
19505 return true
19506 }
19507
19508
19509
19510 for {
19511 c := v_0
19512 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
19513 break
19514 }
19515 x := c.Args[0]
19516 if !(c.Uses == 1) {
19517 break
19518 }
19519 v.reset(OpAMD64SETLE)
19520 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
19521 v0.AuxInt = int32ToAuxInt(127)
19522 v0.AddArg(x)
19523 v.AddArg(v0)
19524 return true
19525 }
19526
19527
19528 for {
19529 if v_0.Op != OpAMD64InvertFlags {
19530 break
19531 }
19532 x := v_0.Args[0]
19533 v.reset(OpAMD64SETG)
19534 v.AddArg(x)
19535 return true
19536 }
19537
19538
19539 for {
19540 if v_0.Op != OpAMD64FlagEQ {
19541 break
19542 }
19543 v.reset(OpAMD64MOVLconst)
19544 v.AuxInt = int32ToAuxInt(0)
19545 return true
19546 }
19547
19548
19549 for {
19550 if v_0.Op != OpAMD64FlagLT_ULT {
19551 break
19552 }
19553 v.reset(OpAMD64MOVLconst)
19554 v.AuxInt = int32ToAuxInt(1)
19555 return true
19556 }
19557
19558
19559 for {
19560 if v_0.Op != OpAMD64FlagLT_UGT {
19561 break
19562 }
19563 v.reset(OpAMD64MOVLconst)
19564 v.AuxInt = int32ToAuxInt(1)
19565 return true
19566 }
19567
19568
19569 for {
19570 if v_0.Op != OpAMD64FlagGT_ULT {
19571 break
19572 }
19573 v.reset(OpAMD64MOVLconst)
19574 v.AuxInt = int32ToAuxInt(0)
19575 return true
19576 }
19577
19578
19579 for {
19580 if v_0.Op != OpAMD64FlagGT_UGT {
19581 break
19582 }
19583 v.reset(OpAMD64MOVLconst)
19584 v.AuxInt = int32ToAuxInt(0)
19585 return true
19586 }
19587 return false
19588 }
19589 func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool {
19590 v_0 := v.Args[0]
19591
19592
19593 for {
19594 if v_0.Op != OpAMD64InvertFlags {
19595 break
19596 }
19597 x := v_0.Args[0]
19598 v.reset(OpAMD64SETGE)
19599 v.AddArg(x)
19600 return true
19601 }
19602
19603
19604 for {
19605 if v_0.Op != OpAMD64FlagEQ {
19606 break
19607 }
19608 v.reset(OpAMD64MOVLconst)
19609 v.AuxInt = int32ToAuxInt(1)
19610 return true
19611 }
19612
19613
19614 for {
19615 if v_0.Op != OpAMD64FlagLT_ULT {
19616 break
19617 }
19618 v.reset(OpAMD64MOVLconst)
19619 v.AuxInt = int32ToAuxInt(1)
19620 return true
19621 }
19622
19623
19624 for {
19625 if v_0.Op != OpAMD64FlagLT_UGT {
19626 break
19627 }
19628 v.reset(OpAMD64MOVLconst)
19629 v.AuxInt = int32ToAuxInt(1)
19630 return true
19631 }
19632
19633
19634 for {
19635 if v_0.Op != OpAMD64FlagGT_ULT {
19636 break
19637 }
19638 v.reset(OpAMD64MOVLconst)
19639 v.AuxInt = int32ToAuxInt(0)
19640 return true
19641 }
19642
19643
19644 for {
19645 if v_0.Op != OpAMD64FlagGT_UGT {
19646 break
19647 }
19648 v.reset(OpAMD64MOVLconst)
19649 v.AuxInt = int32ToAuxInt(0)
19650 return true
19651 }
19652 return false
19653 }
19654 func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
19655 v_2 := v.Args[2]
19656 v_1 := v.Args[1]
19657 v_0 := v.Args[0]
19658 b := v.Block
19659 typ := &b.Func.Config.Types
19660
19661
19662 for {
19663 off := auxIntToInt32(v.AuxInt)
19664 sym := auxToSym(v.Aux)
19665 ptr := v_0
19666 if v_1.Op != OpAMD64InvertFlags {
19667 break
19668 }
19669 x := v_1.Args[0]
19670 mem := v_2
19671 v.reset(OpAMD64SETGEstore)
19672 v.AuxInt = int32ToAuxInt(off)
19673 v.Aux = symToAux(sym)
19674 v.AddArg3(ptr, x, mem)
19675 return true
19676 }
19677
19678
19679
19680 for {
19681 off1 := auxIntToInt32(v.AuxInt)
19682 sym := auxToSym(v.Aux)
19683 if v_0.Op != OpAMD64ADDQconst {
19684 break
19685 }
19686 off2 := auxIntToInt32(v_0.AuxInt)
19687 base := v_0.Args[0]
19688 val := v_1
19689 mem := v_2
19690 if !(is32Bit(int64(off1) + int64(off2))) {
19691 break
19692 }
19693 v.reset(OpAMD64SETLEstore)
19694 v.AuxInt = int32ToAuxInt(off1 + off2)
19695 v.Aux = symToAux(sym)
19696 v.AddArg3(base, val, mem)
19697 return true
19698 }
19699
19700
19701
19702 for {
19703 off1 := auxIntToInt32(v.AuxInt)
19704 sym1 := auxToSym(v.Aux)
19705 if v_0.Op != OpAMD64LEAQ {
19706 break
19707 }
19708 off2 := auxIntToInt32(v_0.AuxInt)
19709 sym2 := auxToSym(v_0.Aux)
19710 base := v_0.Args[0]
19711 val := v_1
19712 mem := v_2
19713 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19714 break
19715 }
19716 v.reset(OpAMD64SETLEstore)
19717 v.AuxInt = int32ToAuxInt(off1 + off2)
19718 v.Aux = symToAux(mergeSym(sym1, sym2))
19719 v.AddArg3(base, val, mem)
19720 return true
19721 }
19722
19723
19724 for {
19725 off := auxIntToInt32(v.AuxInt)
19726 sym := auxToSym(v.Aux)
19727 ptr := v_0
19728 if v_1.Op != OpAMD64FlagEQ {
19729 break
19730 }
19731 mem := v_2
19732 v.reset(OpAMD64MOVBstore)
19733 v.AuxInt = int32ToAuxInt(off)
19734 v.Aux = symToAux(sym)
19735 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19736 v0.AuxInt = int32ToAuxInt(1)
19737 v.AddArg3(ptr, v0, mem)
19738 return true
19739 }
19740
19741
19742 for {
19743 off := auxIntToInt32(v.AuxInt)
19744 sym := auxToSym(v.Aux)
19745 ptr := v_0
19746 if v_1.Op != OpAMD64FlagLT_ULT {
19747 break
19748 }
19749 mem := v_2
19750 v.reset(OpAMD64MOVBstore)
19751 v.AuxInt = int32ToAuxInt(off)
19752 v.Aux = symToAux(sym)
19753 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19754 v0.AuxInt = int32ToAuxInt(1)
19755 v.AddArg3(ptr, v0, mem)
19756 return true
19757 }
19758
19759
19760 for {
19761 off := auxIntToInt32(v.AuxInt)
19762 sym := auxToSym(v.Aux)
19763 ptr := v_0
19764 if v_1.Op != OpAMD64FlagLT_UGT {
19765 break
19766 }
19767 mem := v_2
19768 v.reset(OpAMD64MOVBstore)
19769 v.AuxInt = int32ToAuxInt(off)
19770 v.Aux = symToAux(sym)
19771 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19772 v0.AuxInt = int32ToAuxInt(1)
19773 v.AddArg3(ptr, v0, mem)
19774 return true
19775 }
19776
19777
19778 for {
19779 off := auxIntToInt32(v.AuxInt)
19780 sym := auxToSym(v.Aux)
19781 ptr := v_0
19782 if v_1.Op != OpAMD64FlagGT_ULT {
19783 break
19784 }
19785 mem := v_2
19786 v.reset(OpAMD64MOVBstore)
19787 v.AuxInt = int32ToAuxInt(off)
19788 v.Aux = symToAux(sym)
19789 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19790 v0.AuxInt = int32ToAuxInt(0)
19791 v.AddArg3(ptr, v0, mem)
19792 return true
19793 }
19794
19795
19796 for {
19797 off := auxIntToInt32(v.AuxInt)
19798 sym := auxToSym(v.Aux)
19799 ptr := v_0
19800 if v_1.Op != OpAMD64FlagGT_UGT {
19801 break
19802 }
19803 mem := v_2
19804 v.reset(OpAMD64MOVBstore)
19805 v.AuxInt = int32ToAuxInt(off)
19806 v.Aux = symToAux(sym)
19807 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19808 v0.AuxInt = int32ToAuxInt(0)
19809 v.AddArg3(ptr, v0, mem)
19810 return true
19811 }
19812 return false
19813 }
19814 func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
19815 v_2 := v.Args[2]
19816 v_1 := v.Args[1]
19817 v_0 := v.Args[0]
19818 b := v.Block
19819 typ := &b.Func.Config.Types
19820
19821
19822 for {
19823 off := auxIntToInt32(v.AuxInt)
19824 sym := auxToSym(v.Aux)
19825 ptr := v_0
19826 if v_1.Op != OpAMD64InvertFlags {
19827 break
19828 }
19829 x := v_1.Args[0]
19830 mem := v_2
19831 v.reset(OpAMD64SETGstore)
19832 v.AuxInt = int32ToAuxInt(off)
19833 v.Aux = symToAux(sym)
19834 v.AddArg3(ptr, x, mem)
19835 return true
19836 }
19837
19838
19839
19840 for {
19841 off1 := auxIntToInt32(v.AuxInt)
19842 sym := auxToSym(v.Aux)
19843 if v_0.Op != OpAMD64ADDQconst {
19844 break
19845 }
19846 off2 := auxIntToInt32(v_0.AuxInt)
19847 base := v_0.Args[0]
19848 val := v_1
19849 mem := v_2
19850 if !(is32Bit(int64(off1) + int64(off2))) {
19851 break
19852 }
19853 v.reset(OpAMD64SETLstore)
19854 v.AuxInt = int32ToAuxInt(off1 + off2)
19855 v.Aux = symToAux(sym)
19856 v.AddArg3(base, val, mem)
19857 return true
19858 }
19859
19860
19861
19862 for {
19863 off1 := auxIntToInt32(v.AuxInt)
19864 sym1 := auxToSym(v.Aux)
19865 if v_0.Op != OpAMD64LEAQ {
19866 break
19867 }
19868 off2 := auxIntToInt32(v_0.AuxInt)
19869 sym2 := auxToSym(v_0.Aux)
19870 base := v_0.Args[0]
19871 val := v_1
19872 mem := v_2
19873 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19874 break
19875 }
19876 v.reset(OpAMD64SETLstore)
19877 v.AuxInt = int32ToAuxInt(off1 + off2)
19878 v.Aux = symToAux(mergeSym(sym1, sym2))
19879 v.AddArg3(base, val, mem)
19880 return true
19881 }
19882
19883
19884 for {
19885 off := auxIntToInt32(v.AuxInt)
19886 sym := auxToSym(v.Aux)
19887 ptr := v_0
19888 if v_1.Op != OpAMD64FlagEQ {
19889 break
19890 }
19891 mem := v_2
19892 v.reset(OpAMD64MOVBstore)
19893 v.AuxInt = int32ToAuxInt(off)
19894 v.Aux = symToAux(sym)
19895 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19896 v0.AuxInt = int32ToAuxInt(0)
19897 v.AddArg3(ptr, v0, mem)
19898 return true
19899 }
19900
19901
19902 for {
19903 off := auxIntToInt32(v.AuxInt)
19904 sym := auxToSym(v.Aux)
19905 ptr := v_0
19906 if v_1.Op != OpAMD64FlagLT_ULT {
19907 break
19908 }
19909 mem := v_2
19910 v.reset(OpAMD64MOVBstore)
19911 v.AuxInt = int32ToAuxInt(off)
19912 v.Aux = symToAux(sym)
19913 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19914 v0.AuxInt = int32ToAuxInt(1)
19915 v.AddArg3(ptr, v0, mem)
19916 return true
19917 }
19918
19919
19920 for {
19921 off := auxIntToInt32(v.AuxInt)
19922 sym := auxToSym(v.Aux)
19923 ptr := v_0
19924 if v_1.Op != OpAMD64FlagLT_UGT {
19925 break
19926 }
19927 mem := v_2
19928 v.reset(OpAMD64MOVBstore)
19929 v.AuxInt = int32ToAuxInt(off)
19930 v.Aux = symToAux(sym)
19931 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19932 v0.AuxInt = int32ToAuxInt(1)
19933 v.AddArg3(ptr, v0, mem)
19934 return true
19935 }
19936
19937
19938 for {
19939 off := auxIntToInt32(v.AuxInt)
19940 sym := auxToSym(v.Aux)
19941 ptr := v_0
19942 if v_1.Op != OpAMD64FlagGT_ULT {
19943 break
19944 }
19945 mem := v_2
19946 v.reset(OpAMD64MOVBstore)
19947 v.AuxInt = int32ToAuxInt(off)
19948 v.Aux = symToAux(sym)
19949 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19950 v0.AuxInt = int32ToAuxInt(0)
19951 v.AddArg3(ptr, v0, mem)
19952 return true
19953 }
19954
19955
19956 for {
19957 off := auxIntToInt32(v.AuxInt)
19958 sym := auxToSym(v.Aux)
19959 ptr := v_0
19960 if v_1.Op != OpAMD64FlagGT_UGT {
19961 break
19962 }
19963 mem := v_2
19964 v.reset(OpAMD64MOVBstore)
19965 v.AuxInt = int32ToAuxInt(off)
19966 v.Aux = symToAux(sym)
19967 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19968 v0.AuxInt = int32ToAuxInt(0)
19969 v.AddArg3(ptr, v0, mem)
19970 return true
19971 }
19972 return false
19973 }
19974 func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
19975 v_0 := v.Args[0]
19976 b := v.Block
19977
19978
19979 for {
19980 if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 {
19981 break
19982 }
19983 x := v_0.Args[0]
19984 v.reset(OpAMD64ANDLconst)
19985 v.AuxInt = int32ToAuxInt(1)
19986 v.AddArg(x)
19987 return true
19988 }
19989
19990
19991 for {
19992 if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 {
19993 break
19994 }
19995 x := v_0.Args[0]
19996 v.reset(OpAMD64ANDLconst)
19997 v.AuxInt = int32ToAuxInt(1)
19998 v.AddArg(x)
19999 return true
20000 }
20001
20002
20003 for {
20004 if v_0.Op != OpAMD64TESTL {
20005 break
20006 }
20007 _ = v_0.Args[1]
20008 v_0_0 := v_0.Args[0]
20009 v_0_1 := v_0.Args[1]
20010 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
20011 if v_0_0.Op != OpAMD64SHLL {
20012 continue
20013 }
20014 x := v_0_0.Args[1]
20015 v_0_0_0 := v_0_0.Args[0]
20016 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
20017 continue
20018 }
20019 y := v_0_1
20020 v.reset(OpAMD64SETB)
20021 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
20022 v0.AddArg2(x, y)
20023 v.AddArg(v0)
20024 return true
20025 }
20026 break
20027 }
20028
20029
20030 for {
20031 if v_0.Op != OpAMD64TESTQ {
20032 break
20033 }
20034 _ = v_0.Args[1]
20035 v_0_0 := v_0.Args[0]
20036 v_0_1 := v_0.Args[1]
20037 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
20038 if v_0_0.Op != OpAMD64SHLQ {
20039 continue
20040 }
20041 x := v_0_0.Args[1]
20042 v_0_0_0 := v_0_0.Args[0]
20043 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
20044 continue
20045 }
20046 y := v_0_1
20047 v.reset(OpAMD64SETB)
20048 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
20049 v0.AddArg2(x, y)
20050 v.AddArg(v0)
20051 return true
20052 }
20053 break
20054 }
20055
20056
20057
20058 for {
20059 if v_0.Op != OpAMD64TESTLconst {
20060 break
20061 }
20062 c := auxIntToInt32(v_0.AuxInt)
20063 x := v_0.Args[0]
20064 if !(isUint32PowerOfTwo(int64(c))) {
20065 break
20066 }
20067 v.reset(OpAMD64SETB)
20068 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20069 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
20070 v0.AddArg(x)
20071 v.AddArg(v0)
20072 return true
20073 }
20074
20075
20076
20077 for {
20078 if v_0.Op != OpAMD64TESTQconst {
20079 break
20080 }
20081 c := auxIntToInt32(v_0.AuxInt)
20082 x := v_0.Args[0]
20083 if !(isUint64PowerOfTwo(int64(c))) {
20084 break
20085 }
20086 v.reset(OpAMD64SETB)
20087 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20088 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
20089 v0.AddArg(x)
20090 v.AddArg(v0)
20091 return true
20092 }
20093
20094
20095
20096 for {
20097 if v_0.Op != OpAMD64TESTQ {
20098 break
20099 }
20100 _ = v_0.Args[1]
20101 v_0_0 := v_0.Args[0]
20102 v_0_1 := v_0.Args[1]
20103 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
20104 if v_0_0.Op != OpAMD64MOVQconst {
20105 continue
20106 }
20107 c := auxIntToInt64(v_0_0.AuxInt)
20108 x := v_0_1
20109 if !(isUint64PowerOfTwo(c)) {
20110 continue
20111 }
20112 v.reset(OpAMD64SETB)
20113 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20114 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
20115 v0.AddArg(x)
20116 v.AddArg(v0)
20117 return true
20118 }
20119 break
20120 }
20121
20122
20123 for {
20124 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
20125 break
20126 }
20127 s := v_0.Args[0]
20128 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
20129 break
20130 }
20131 v.reset(OpAMD64SETEQ)
20132 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
20133 v0.AuxInt = int32ToAuxInt(0)
20134 v0.AddArg(s)
20135 v.AddArg(v0)
20136 return true
20137 }
20138
20139
20140 for {
20141 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
20142 break
20143 }
20144 s := v_0.Args[0]
20145 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
20146 break
20147 }
20148 v.reset(OpAMD64SETEQ)
20149 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
20150 v0.AuxInt = int32ToAuxInt(0)
20151 v0.AddArg(s)
20152 v.AddArg(v0)
20153 return true
20154 }
20155
20156
20157
20158 for {
20159 if v_0.Op != OpAMD64TESTQ {
20160 break
20161 }
20162 _ = v_0.Args[1]
20163 v_0_0 := v_0.Args[0]
20164 v_0_1 := v_0.Args[1]
20165 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
20166 z1 := v_0_0
20167 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
20168 continue
20169 }
20170 z1_0 := z1.Args[0]
20171 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
20172 continue
20173 }
20174 x := z1_0.Args[0]
20175 z2 := v_0_1
20176 if !(z1 == z2) {
20177 continue
20178 }
20179 v.reset(OpAMD64SETB)
20180 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20181 v0.AuxInt = int8ToAuxInt(63)
20182 v0.AddArg(x)
20183 v.AddArg(v0)
20184 return true
20185 }
20186 break
20187 }
20188
20189
20190
20191 for {
20192 if v_0.Op != OpAMD64TESTL {
20193 break
20194 }
20195 _ = v_0.Args[1]
20196 v_0_0 := v_0.Args[0]
20197 v_0_1 := v_0.Args[1]
20198 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
20199 z1 := v_0_0
20200 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
20201 continue
20202 }
20203 z1_0 := z1.Args[0]
20204 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20205 continue
20206 }
20207 x := z1_0.Args[0]
20208 z2 := v_0_1
20209 if !(z1 == z2) {
20210 continue
20211 }
20212 v.reset(OpAMD64SETB)
20213 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20214 v0.AuxInt = int8ToAuxInt(31)
20215 v0.AddArg(x)
20216 v.AddArg(v0)
20217 return true
20218 }
20219 break
20220 }
20221
20222
20223
20224 for {
20225 if v_0.Op != OpAMD64TESTQ {
20226 break
20227 }
20228 _ = v_0.Args[1]
20229 v_0_0 := v_0.Args[0]
20230 v_0_1 := v_0.Args[1]
20231 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
20232 z1 := v_0_0
20233 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20234 continue
20235 }
20236 z1_0 := z1.Args[0]
20237 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
20238 continue
20239 }
20240 x := z1_0.Args[0]
20241 z2 := v_0_1
20242 if !(z1 == z2) {
20243 continue
20244 }
20245 v.reset(OpAMD64SETB)
20246 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20247 v0.AuxInt = int8ToAuxInt(0)
20248 v0.AddArg(x)
20249 v.AddArg(v0)
20250 return true
20251 }
20252 break
20253 }
20254
20255
20256
20257 for {
20258 if v_0.Op != OpAMD64TESTL {
20259 break
20260 }
20261 _ = v_0.Args[1]
20262 v_0_0 := v_0.Args[0]
20263 v_0_1 := v_0.Args[1]
20264 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
20265 z1 := v_0_0
20266 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20267 continue
20268 }
20269 z1_0 := z1.Args[0]
20270 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20271 continue
20272 }
20273 x := z1_0.Args[0]
20274 z2 := v_0_1
20275 if !(z1 == z2) {
20276 continue
20277 }
20278 v.reset(OpAMD64SETB)
20279 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20280 v0.AuxInt = int8ToAuxInt(0)
20281 v0.AddArg(x)
20282 v.AddArg(v0)
20283 return true
20284 }
20285 break
20286 }
20287
20288
20289
20290 for {
20291 if v_0.Op != OpAMD64TESTQ {
20292 break
20293 }
20294 _ = v_0.Args[1]
20295 v_0_0 := v_0.Args[0]
20296 v_0_1 := v_0.Args[1]
20297 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
20298 z1 := v_0_0
20299 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20300 continue
20301 }
20302 x := z1.Args[0]
20303 z2 := v_0_1
20304 if !(z1 == z2) {
20305 continue
20306 }
20307 v.reset(OpAMD64SETB)
20308 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20309 v0.AuxInt = int8ToAuxInt(63)
20310 v0.AddArg(x)
20311 v.AddArg(v0)
20312 return true
20313 }
20314 break
20315 }
20316
20317
20318
20319 for {
20320 if v_0.Op != OpAMD64TESTL {
20321 break
20322 }
20323 _ = v_0.Args[1]
20324 v_0_0 := v_0.Args[0]
20325 v_0_1 := v_0.Args[1]
20326 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
20327 z1 := v_0_0
20328 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20329 continue
20330 }
20331 x := z1.Args[0]
20332 z2 := v_0_1
20333 if !(z1 == z2) {
20334 continue
20335 }
20336 v.reset(OpAMD64SETB)
20337 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20338 v0.AuxInt = int8ToAuxInt(31)
20339 v0.AddArg(x)
20340 v.AddArg(v0)
20341 return true
20342 }
20343 break
20344 }
20345
20346
20347 for {
20348 if v_0.Op != OpAMD64InvertFlags {
20349 break
20350 }
20351 x := v_0.Args[0]
20352 v.reset(OpAMD64SETNE)
20353 v.AddArg(x)
20354 return true
20355 }
20356
20357
20358 for {
20359 if v_0.Op != OpAMD64FlagEQ {
20360 break
20361 }
20362 v.reset(OpAMD64MOVLconst)
20363 v.AuxInt = int32ToAuxInt(0)
20364 return true
20365 }
20366
20367
20368 for {
20369 if v_0.Op != OpAMD64FlagLT_ULT {
20370 break
20371 }
20372 v.reset(OpAMD64MOVLconst)
20373 v.AuxInt = int32ToAuxInt(1)
20374 return true
20375 }
20376
20377
20378 for {
20379 if v_0.Op != OpAMD64FlagLT_UGT {
20380 break
20381 }
20382 v.reset(OpAMD64MOVLconst)
20383 v.AuxInt = int32ToAuxInt(1)
20384 return true
20385 }
20386
20387
20388 for {
20389 if v_0.Op != OpAMD64FlagGT_ULT {
20390 break
20391 }
20392 v.reset(OpAMD64MOVLconst)
20393 v.AuxInt = int32ToAuxInt(1)
20394 return true
20395 }
20396
20397
20398 for {
20399 if v_0.Op != OpAMD64FlagGT_UGT {
20400 break
20401 }
20402 v.reset(OpAMD64MOVLconst)
20403 v.AuxInt = int32ToAuxInt(1)
20404 return true
20405 }
20406
20407
20408 for {
20409 if v_0.Op != OpAMD64TESTQ {
20410 break
20411 }
20412 _ = v_0.Args[1]
20413 v_0_0 := v_0.Args[0]
20414 v_0_1 := v_0.Args[1]
20415 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
20416 s := v_0_0
20417 if s.Op != OpSelect0 {
20418 continue
20419 }
20420 blsr := s.Args[0]
20421 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
20422 continue
20423 }
20424 v.reset(OpAMD64SETNE)
20425 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
20426 v0.AddArg(blsr)
20427 v.AddArg(v0)
20428 return true
20429 }
20430 break
20431 }
20432
20433
20434 for {
20435 if v_0.Op != OpAMD64TESTL {
20436 break
20437 }
20438 _ = v_0.Args[1]
20439 v_0_0 := v_0.Args[0]
20440 v_0_1 := v_0.Args[1]
20441 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
20442 s := v_0_0
20443 if s.Op != OpSelect0 {
20444 continue
20445 }
20446 blsr := s.Args[0]
20447 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
20448 continue
20449 }
20450 v.reset(OpAMD64SETNE)
20451 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
20452 v0.AddArg(blsr)
20453 v.AddArg(v0)
20454 return true
20455 }
20456 break
20457 }
20458 return false
20459 }
20460 func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
20461 v_2 := v.Args[2]
20462 v_1 := v.Args[1]
20463 v_0 := v.Args[0]
20464 b := v.Block
20465 typ := &b.Func.Config.Types
20466
20467
20468 for {
20469 off := auxIntToInt32(v.AuxInt)
20470 sym := auxToSym(v.Aux)
20471 ptr := v_0
20472 if v_1.Op != OpAMD64TESTL {
20473 break
20474 }
20475 _ = v_1.Args[1]
20476 v_1_0 := v_1.Args[0]
20477 v_1_1 := v_1.Args[1]
20478 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20479 if v_1_0.Op != OpAMD64SHLL {
20480 continue
20481 }
20482 x := v_1_0.Args[1]
20483 v_1_0_0 := v_1_0.Args[0]
20484 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
20485 continue
20486 }
20487 y := v_1_1
20488 mem := v_2
20489 v.reset(OpAMD64SETBstore)
20490 v.AuxInt = int32ToAuxInt(off)
20491 v.Aux = symToAux(sym)
20492 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
20493 v0.AddArg2(x, y)
20494 v.AddArg3(ptr, v0, mem)
20495 return true
20496 }
20497 break
20498 }
20499
20500
20501 for {
20502 off := auxIntToInt32(v.AuxInt)
20503 sym := auxToSym(v.Aux)
20504 ptr := v_0
20505 if v_1.Op != OpAMD64TESTQ {
20506 break
20507 }
20508 _ = v_1.Args[1]
20509 v_1_0 := v_1.Args[0]
20510 v_1_1 := v_1.Args[1]
20511 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20512 if v_1_0.Op != OpAMD64SHLQ {
20513 continue
20514 }
20515 x := v_1_0.Args[1]
20516 v_1_0_0 := v_1_0.Args[0]
20517 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
20518 continue
20519 }
20520 y := v_1_1
20521 mem := v_2
20522 v.reset(OpAMD64SETBstore)
20523 v.AuxInt = int32ToAuxInt(off)
20524 v.Aux = symToAux(sym)
20525 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
20526 v0.AddArg2(x, y)
20527 v.AddArg3(ptr, v0, mem)
20528 return true
20529 }
20530 break
20531 }
20532
20533
20534
20535 for {
20536 off := auxIntToInt32(v.AuxInt)
20537 sym := auxToSym(v.Aux)
20538 ptr := v_0
20539 if v_1.Op != OpAMD64TESTLconst {
20540 break
20541 }
20542 c := auxIntToInt32(v_1.AuxInt)
20543 x := v_1.Args[0]
20544 mem := v_2
20545 if !(isUint32PowerOfTwo(int64(c))) {
20546 break
20547 }
20548 v.reset(OpAMD64SETBstore)
20549 v.AuxInt = int32ToAuxInt(off)
20550 v.Aux = symToAux(sym)
20551 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20552 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
20553 v0.AddArg(x)
20554 v.AddArg3(ptr, v0, mem)
20555 return true
20556 }
20557
20558
20559
20560 for {
20561 off := auxIntToInt32(v.AuxInt)
20562 sym := auxToSym(v.Aux)
20563 ptr := v_0
20564 if v_1.Op != OpAMD64TESTQconst {
20565 break
20566 }
20567 c := auxIntToInt32(v_1.AuxInt)
20568 x := v_1.Args[0]
20569 mem := v_2
20570 if !(isUint64PowerOfTwo(int64(c))) {
20571 break
20572 }
20573 v.reset(OpAMD64SETBstore)
20574 v.AuxInt = int32ToAuxInt(off)
20575 v.Aux = symToAux(sym)
20576 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20577 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
20578 v0.AddArg(x)
20579 v.AddArg3(ptr, v0, mem)
20580 return true
20581 }
20582
20583
20584
20585 for {
20586 off := auxIntToInt32(v.AuxInt)
20587 sym := auxToSym(v.Aux)
20588 ptr := v_0
20589 if v_1.Op != OpAMD64TESTQ {
20590 break
20591 }
20592 _ = v_1.Args[1]
20593 v_1_0 := v_1.Args[0]
20594 v_1_1 := v_1.Args[1]
20595 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20596 if v_1_0.Op != OpAMD64MOVQconst {
20597 continue
20598 }
20599 c := auxIntToInt64(v_1_0.AuxInt)
20600 x := v_1_1
20601 mem := v_2
20602 if !(isUint64PowerOfTwo(c)) {
20603 continue
20604 }
20605 v.reset(OpAMD64SETBstore)
20606 v.AuxInt = int32ToAuxInt(off)
20607 v.Aux = symToAux(sym)
20608 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20609 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
20610 v0.AddArg(x)
20611 v.AddArg3(ptr, v0, mem)
20612 return true
20613 }
20614 break
20615 }
20616
20617
20618 for {
20619 off := auxIntToInt32(v.AuxInt)
20620 sym := auxToSym(v.Aux)
20621 ptr := v_0
20622 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
20623 break
20624 }
20625 s := v_1.Args[0]
20626 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
20627 break
20628 }
20629 mem := v_2
20630 v.reset(OpAMD64SETEQstore)
20631 v.AuxInt = int32ToAuxInt(off)
20632 v.Aux = symToAux(sym)
20633 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
20634 v0.AuxInt = int32ToAuxInt(0)
20635 v0.AddArg(s)
20636 v.AddArg3(ptr, v0, mem)
20637 return true
20638 }
20639
20640
20641 for {
20642 off := auxIntToInt32(v.AuxInt)
20643 sym := auxToSym(v.Aux)
20644 ptr := v_0
20645 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
20646 break
20647 }
20648 s := v_1.Args[0]
20649 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
20650 break
20651 }
20652 mem := v_2
20653 v.reset(OpAMD64SETEQstore)
20654 v.AuxInt = int32ToAuxInt(off)
20655 v.Aux = symToAux(sym)
20656 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
20657 v0.AuxInt = int32ToAuxInt(0)
20658 v0.AddArg(s)
20659 v.AddArg3(ptr, v0, mem)
20660 return true
20661 }
20662
20663
20664
20665 for {
20666 off := auxIntToInt32(v.AuxInt)
20667 sym := auxToSym(v.Aux)
20668 ptr := v_0
20669 if v_1.Op != OpAMD64TESTQ {
20670 break
20671 }
20672 _ = v_1.Args[1]
20673 v_1_0 := v_1.Args[0]
20674 v_1_1 := v_1.Args[1]
20675 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20676 z1 := v_1_0
20677 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
20678 continue
20679 }
20680 z1_0 := z1.Args[0]
20681 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
20682 continue
20683 }
20684 x := z1_0.Args[0]
20685 z2 := v_1_1
20686 mem := v_2
20687 if !(z1 == z2) {
20688 continue
20689 }
20690 v.reset(OpAMD64SETBstore)
20691 v.AuxInt = int32ToAuxInt(off)
20692 v.Aux = symToAux(sym)
20693 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20694 v0.AuxInt = int8ToAuxInt(63)
20695 v0.AddArg(x)
20696 v.AddArg3(ptr, v0, mem)
20697 return true
20698 }
20699 break
20700 }
20701
20702
20703
20704 for {
20705 off := auxIntToInt32(v.AuxInt)
20706 sym := auxToSym(v.Aux)
20707 ptr := v_0
20708 if v_1.Op != OpAMD64TESTL {
20709 break
20710 }
20711 _ = v_1.Args[1]
20712 v_1_0 := v_1.Args[0]
20713 v_1_1 := v_1.Args[1]
20714 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20715 z1 := v_1_0
20716 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
20717 continue
20718 }
20719 z1_0 := z1.Args[0]
20720 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20721 continue
20722 }
20723 x := z1_0.Args[0]
20724 z2 := v_1_1
20725 mem := v_2
20726 if !(z1 == z2) {
20727 continue
20728 }
20729 v.reset(OpAMD64SETBstore)
20730 v.AuxInt = int32ToAuxInt(off)
20731 v.Aux = symToAux(sym)
20732 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20733 v0.AuxInt = int8ToAuxInt(31)
20734 v0.AddArg(x)
20735 v.AddArg3(ptr, v0, mem)
20736 return true
20737 }
20738 break
20739 }
20740
20741
20742
20743 for {
20744 off := auxIntToInt32(v.AuxInt)
20745 sym := auxToSym(v.Aux)
20746 ptr := v_0
20747 if v_1.Op != OpAMD64TESTQ {
20748 break
20749 }
20750 _ = v_1.Args[1]
20751 v_1_0 := v_1.Args[0]
20752 v_1_1 := v_1.Args[1]
20753 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20754 z1 := v_1_0
20755 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20756 continue
20757 }
20758 z1_0 := z1.Args[0]
20759 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
20760 continue
20761 }
20762 x := z1_0.Args[0]
20763 z2 := v_1_1
20764 mem := v_2
20765 if !(z1 == z2) {
20766 continue
20767 }
20768 v.reset(OpAMD64SETBstore)
20769 v.AuxInt = int32ToAuxInt(off)
20770 v.Aux = symToAux(sym)
20771 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20772 v0.AuxInt = int8ToAuxInt(0)
20773 v0.AddArg(x)
20774 v.AddArg3(ptr, v0, mem)
20775 return true
20776 }
20777 break
20778 }
20779
20780
20781
20782 for {
20783 off := auxIntToInt32(v.AuxInt)
20784 sym := auxToSym(v.Aux)
20785 ptr := v_0
20786 if v_1.Op != OpAMD64TESTL {
20787 break
20788 }
20789 _ = v_1.Args[1]
20790 v_1_0 := v_1.Args[0]
20791 v_1_1 := v_1.Args[1]
20792 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20793 z1 := v_1_0
20794 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20795 continue
20796 }
20797 z1_0 := z1.Args[0]
20798 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20799 continue
20800 }
20801 x := z1_0.Args[0]
20802 z2 := v_1_1
20803 mem := v_2
20804 if !(z1 == z2) {
20805 continue
20806 }
20807 v.reset(OpAMD64SETBstore)
20808 v.AuxInt = int32ToAuxInt(off)
20809 v.Aux = symToAux(sym)
20810 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20811 v0.AuxInt = int8ToAuxInt(0)
20812 v0.AddArg(x)
20813 v.AddArg3(ptr, v0, mem)
20814 return true
20815 }
20816 break
20817 }
20818
20819
20820
20821 for {
20822 off := auxIntToInt32(v.AuxInt)
20823 sym := auxToSym(v.Aux)
20824 ptr := v_0
20825 if v_1.Op != OpAMD64TESTQ {
20826 break
20827 }
20828 _ = v_1.Args[1]
20829 v_1_0 := v_1.Args[0]
20830 v_1_1 := v_1.Args[1]
20831 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20832 z1 := v_1_0
20833 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20834 continue
20835 }
20836 x := z1.Args[0]
20837 z2 := v_1_1
20838 mem := v_2
20839 if !(z1 == z2) {
20840 continue
20841 }
20842 v.reset(OpAMD64SETBstore)
20843 v.AuxInt = int32ToAuxInt(off)
20844 v.Aux = symToAux(sym)
20845 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20846 v0.AuxInt = int8ToAuxInt(63)
20847 v0.AddArg(x)
20848 v.AddArg3(ptr, v0, mem)
20849 return true
20850 }
20851 break
20852 }
20853
20854
20855
20856 for {
20857 off := auxIntToInt32(v.AuxInt)
20858 sym := auxToSym(v.Aux)
20859 ptr := v_0
20860 if v_1.Op != OpAMD64TESTL {
20861 break
20862 }
20863 _ = v_1.Args[1]
20864 v_1_0 := v_1.Args[0]
20865 v_1_1 := v_1.Args[1]
20866 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20867 z1 := v_1_0
20868 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20869 continue
20870 }
20871 x := z1.Args[0]
20872 z2 := v_1_1
20873 mem := v_2
20874 if !(z1 == z2) {
20875 continue
20876 }
20877 v.reset(OpAMD64SETBstore)
20878 v.AuxInt = int32ToAuxInt(off)
20879 v.Aux = symToAux(sym)
20880 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20881 v0.AuxInt = int8ToAuxInt(31)
20882 v0.AddArg(x)
20883 v.AddArg3(ptr, v0, mem)
20884 return true
20885 }
20886 break
20887 }
20888
20889
20890 for {
20891 off := auxIntToInt32(v.AuxInt)
20892 sym := auxToSym(v.Aux)
20893 ptr := v_0
20894 if v_1.Op != OpAMD64InvertFlags {
20895 break
20896 }
20897 x := v_1.Args[0]
20898 mem := v_2
20899 v.reset(OpAMD64SETNEstore)
20900 v.AuxInt = int32ToAuxInt(off)
20901 v.Aux = symToAux(sym)
20902 v.AddArg3(ptr, x, mem)
20903 return true
20904 }
20905
20906
20907
20908 for {
20909 off1 := auxIntToInt32(v.AuxInt)
20910 sym := auxToSym(v.Aux)
20911 if v_0.Op != OpAMD64ADDQconst {
20912 break
20913 }
20914 off2 := auxIntToInt32(v_0.AuxInt)
20915 base := v_0.Args[0]
20916 val := v_1
20917 mem := v_2
20918 if !(is32Bit(int64(off1) + int64(off2))) {
20919 break
20920 }
20921 v.reset(OpAMD64SETNEstore)
20922 v.AuxInt = int32ToAuxInt(off1 + off2)
20923 v.Aux = symToAux(sym)
20924 v.AddArg3(base, val, mem)
20925 return true
20926 }
20927
20928
20929
20930 for {
20931 off1 := auxIntToInt32(v.AuxInt)
20932 sym1 := auxToSym(v.Aux)
20933 if v_0.Op != OpAMD64LEAQ {
20934 break
20935 }
20936 off2 := auxIntToInt32(v_0.AuxInt)
20937 sym2 := auxToSym(v_0.Aux)
20938 base := v_0.Args[0]
20939 val := v_1
20940 mem := v_2
20941 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
20942 break
20943 }
20944 v.reset(OpAMD64SETNEstore)
20945 v.AuxInt = int32ToAuxInt(off1 + off2)
20946 v.Aux = symToAux(mergeSym(sym1, sym2))
20947 v.AddArg3(base, val, mem)
20948 return true
20949 }
20950
20951
20952 for {
20953 off := auxIntToInt32(v.AuxInt)
20954 sym := auxToSym(v.Aux)
20955 ptr := v_0
20956 if v_1.Op != OpAMD64FlagEQ {
20957 break
20958 }
20959 mem := v_2
20960 v.reset(OpAMD64MOVBstore)
20961 v.AuxInt = int32ToAuxInt(off)
20962 v.Aux = symToAux(sym)
20963 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20964 v0.AuxInt = int32ToAuxInt(0)
20965 v.AddArg3(ptr, v0, mem)
20966 return true
20967 }
20968
20969
20970 for {
20971 off := auxIntToInt32(v.AuxInt)
20972 sym := auxToSym(v.Aux)
20973 ptr := v_0
20974 if v_1.Op != OpAMD64FlagLT_ULT {
20975 break
20976 }
20977 mem := v_2
20978 v.reset(OpAMD64MOVBstore)
20979 v.AuxInt = int32ToAuxInt(off)
20980 v.Aux = symToAux(sym)
20981 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20982 v0.AuxInt = int32ToAuxInt(1)
20983 v.AddArg3(ptr, v0, mem)
20984 return true
20985 }
20986
20987
20988 for {
20989 off := auxIntToInt32(v.AuxInt)
20990 sym := auxToSym(v.Aux)
20991 ptr := v_0
20992 if v_1.Op != OpAMD64FlagLT_UGT {
20993 break
20994 }
20995 mem := v_2
20996 v.reset(OpAMD64MOVBstore)
20997 v.AuxInt = int32ToAuxInt(off)
20998 v.Aux = symToAux(sym)
20999 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21000 v0.AuxInt = int32ToAuxInt(1)
21001 v.AddArg3(ptr, v0, mem)
21002 return true
21003 }
21004
21005
21006 for {
21007 off := auxIntToInt32(v.AuxInt)
21008 sym := auxToSym(v.Aux)
21009 ptr := v_0
21010 if v_1.Op != OpAMD64FlagGT_ULT {
21011 break
21012 }
21013 mem := v_2
21014 v.reset(OpAMD64MOVBstore)
21015 v.AuxInt = int32ToAuxInt(off)
21016 v.Aux = symToAux(sym)
21017 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21018 v0.AuxInt = int32ToAuxInt(1)
21019 v.AddArg3(ptr, v0, mem)
21020 return true
21021 }
21022
21023
21024 for {
21025 off := auxIntToInt32(v.AuxInt)
21026 sym := auxToSym(v.Aux)
21027 ptr := v_0
21028 if v_1.Op != OpAMD64FlagGT_UGT {
21029 break
21030 }
21031 mem := v_2
21032 v.reset(OpAMD64MOVBstore)
21033 v.AuxInt = int32ToAuxInt(off)
21034 v.Aux = symToAux(sym)
21035 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21036 v0.AuxInt = int32ToAuxInt(1)
21037 v.AddArg3(ptr, v0, mem)
21038 return true
21039 }
21040 return false
21041 }
21042 func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
21043 v_1 := v.Args[1]
21044 v_0 := v.Args[0]
21045 b := v.Block
21046
21047
21048 for {
21049 x := v_0
21050 if v_1.Op != OpAMD64MOVQconst {
21051 break
21052 }
21053 c := auxIntToInt64(v_1.AuxInt)
21054 v.reset(OpAMD64SHLLconst)
21055 v.AuxInt = int8ToAuxInt(int8(c & 31))
21056 v.AddArg(x)
21057 return true
21058 }
21059
21060
21061 for {
21062 x := v_0
21063 if v_1.Op != OpAMD64MOVLconst {
21064 break
21065 }
21066 c := auxIntToInt32(v_1.AuxInt)
21067 v.reset(OpAMD64SHLLconst)
21068 v.AuxInt = int8ToAuxInt(int8(c & 31))
21069 v.AddArg(x)
21070 return true
21071 }
21072
21073
21074
21075 for {
21076 x := v_0
21077 if v_1.Op != OpAMD64ADDQconst {
21078 break
21079 }
21080 c := auxIntToInt32(v_1.AuxInt)
21081 y := v_1.Args[0]
21082 if !(c&31 == 0) {
21083 break
21084 }
21085 v.reset(OpAMD64SHLL)
21086 v.AddArg2(x, y)
21087 return true
21088 }
21089
21090
21091
21092 for {
21093 x := v_0
21094 if v_1.Op != OpAMD64NEGQ {
21095 break
21096 }
21097 t := v_1.Type
21098 v_1_0 := v_1.Args[0]
21099 if v_1_0.Op != OpAMD64ADDQconst {
21100 break
21101 }
21102 c := auxIntToInt32(v_1_0.AuxInt)
21103 y := v_1_0.Args[0]
21104 if !(c&31 == 0) {
21105 break
21106 }
21107 v.reset(OpAMD64SHLL)
21108 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21109 v0.AddArg(y)
21110 v.AddArg2(x, v0)
21111 return true
21112 }
21113
21114
21115
21116 for {
21117 x := v_0
21118 if v_1.Op != OpAMD64ANDQconst {
21119 break
21120 }
21121 c := auxIntToInt32(v_1.AuxInt)
21122 y := v_1.Args[0]
21123 if !(c&31 == 31) {
21124 break
21125 }
21126 v.reset(OpAMD64SHLL)
21127 v.AddArg2(x, y)
21128 return true
21129 }
21130
21131
21132
21133 for {
21134 x := v_0
21135 if v_1.Op != OpAMD64NEGQ {
21136 break
21137 }
21138 t := v_1.Type
21139 v_1_0 := v_1.Args[0]
21140 if v_1_0.Op != OpAMD64ANDQconst {
21141 break
21142 }
21143 c := auxIntToInt32(v_1_0.AuxInt)
21144 y := v_1_0.Args[0]
21145 if !(c&31 == 31) {
21146 break
21147 }
21148 v.reset(OpAMD64SHLL)
21149 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21150 v0.AddArg(y)
21151 v.AddArg2(x, v0)
21152 return true
21153 }
21154
21155
21156
21157 for {
21158 x := v_0
21159 if v_1.Op != OpAMD64ADDLconst {
21160 break
21161 }
21162 c := auxIntToInt32(v_1.AuxInt)
21163 y := v_1.Args[0]
21164 if !(c&31 == 0) {
21165 break
21166 }
21167 v.reset(OpAMD64SHLL)
21168 v.AddArg2(x, y)
21169 return true
21170 }
21171
21172
21173
21174 for {
21175 x := v_0
21176 if v_1.Op != OpAMD64NEGL {
21177 break
21178 }
21179 t := v_1.Type
21180 v_1_0 := v_1.Args[0]
21181 if v_1_0.Op != OpAMD64ADDLconst {
21182 break
21183 }
21184 c := auxIntToInt32(v_1_0.AuxInt)
21185 y := v_1_0.Args[0]
21186 if !(c&31 == 0) {
21187 break
21188 }
21189 v.reset(OpAMD64SHLL)
21190 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21191 v0.AddArg(y)
21192 v.AddArg2(x, v0)
21193 return true
21194 }
21195
21196
21197
21198 for {
21199 x := v_0
21200 if v_1.Op != OpAMD64ANDLconst {
21201 break
21202 }
21203 c := auxIntToInt32(v_1.AuxInt)
21204 y := v_1.Args[0]
21205 if !(c&31 == 31) {
21206 break
21207 }
21208 v.reset(OpAMD64SHLL)
21209 v.AddArg2(x, y)
21210 return true
21211 }
21212
21213
21214
21215 for {
21216 x := v_0
21217 if v_1.Op != OpAMD64NEGL {
21218 break
21219 }
21220 t := v_1.Type
21221 v_1_0 := v_1.Args[0]
21222 if v_1_0.Op != OpAMD64ANDLconst {
21223 break
21224 }
21225 c := auxIntToInt32(v_1_0.AuxInt)
21226 y := v_1_0.Args[0]
21227 if !(c&31 == 31) {
21228 break
21229 }
21230 v.reset(OpAMD64SHLL)
21231 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21232 v0.AddArg(y)
21233 v.AddArg2(x, v0)
21234 return true
21235 }
21236
21237
21238
21239 for {
21240 l := v_0
21241 if l.Op != OpAMD64MOVLload {
21242 break
21243 }
21244 off := auxIntToInt32(l.AuxInt)
21245 sym := auxToSym(l.Aux)
21246 mem := l.Args[1]
21247 ptr := l.Args[0]
21248 x := v_1
21249 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
21250 break
21251 }
21252 v.reset(OpAMD64SHLXLload)
21253 v.AuxInt = int32ToAuxInt(off)
21254 v.Aux = symToAux(sym)
21255 v.AddArg3(ptr, x, mem)
21256 return true
21257 }
21258 return false
21259 }
21260 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
21261 v_0 := v.Args[0]
21262
21263
21264 for {
21265 if auxIntToInt8(v.AuxInt) != 0 {
21266 break
21267 }
21268 x := v_0
21269 v.copyOf(x)
21270 return true
21271 }
21272
21273
21274 for {
21275 if auxIntToInt8(v.AuxInt) != 1 {
21276 break
21277 }
21278 x := v_0
21279 v.reset(OpAMD64ADDL)
21280 v.AddArg2(x, x)
21281 return true
21282 }
21283
21284
21285 for {
21286 c := auxIntToInt8(v.AuxInt)
21287 if v_0.Op != OpAMD64ADDL {
21288 break
21289 }
21290 x := v_0.Args[1]
21291 if x != v_0.Args[0] {
21292 break
21293 }
21294 v.reset(OpAMD64SHLLconst)
21295 v.AuxInt = int8ToAuxInt(c + 1)
21296 v.AddArg(x)
21297 return true
21298 }
21299
21300
21301 for {
21302 d := auxIntToInt8(v.AuxInt)
21303 if v_0.Op != OpAMD64MOVLconst {
21304 break
21305 }
21306 c := auxIntToInt32(v_0.AuxInt)
21307 v.reset(OpAMD64MOVLconst)
21308 v.AuxInt = int32ToAuxInt(c << uint64(d))
21309 return true
21310 }
21311 return false
21312 }
21313 func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
21314 v_1 := v.Args[1]
21315 v_0 := v.Args[0]
21316 b := v.Block
21317
21318
21319 for {
21320 x := v_0
21321 if v_1.Op != OpAMD64MOVQconst {
21322 break
21323 }
21324 c := auxIntToInt64(v_1.AuxInt)
21325 v.reset(OpAMD64SHLQconst)
21326 v.AuxInt = int8ToAuxInt(int8(c & 63))
21327 v.AddArg(x)
21328 return true
21329 }
21330
21331
21332 for {
21333 x := v_0
21334 if v_1.Op != OpAMD64MOVLconst {
21335 break
21336 }
21337 c := auxIntToInt32(v_1.AuxInt)
21338 v.reset(OpAMD64SHLQconst)
21339 v.AuxInt = int8ToAuxInt(int8(c & 63))
21340 v.AddArg(x)
21341 return true
21342 }
21343
21344
21345
21346 for {
21347 x := v_0
21348 if v_1.Op != OpAMD64ADDQconst {
21349 break
21350 }
21351 c := auxIntToInt32(v_1.AuxInt)
21352 y := v_1.Args[0]
21353 if !(c&63 == 0) {
21354 break
21355 }
21356 v.reset(OpAMD64SHLQ)
21357 v.AddArg2(x, y)
21358 return true
21359 }
21360
21361
21362
21363 for {
21364 x := v_0
21365 if v_1.Op != OpAMD64NEGQ {
21366 break
21367 }
21368 t := v_1.Type
21369 v_1_0 := v_1.Args[0]
21370 if v_1_0.Op != OpAMD64ADDQconst {
21371 break
21372 }
21373 c := auxIntToInt32(v_1_0.AuxInt)
21374 y := v_1_0.Args[0]
21375 if !(c&63 == 0) {
21376 break
21377 }
21378 v.reset(OpAMD64SHLQ)
21379 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21380 v0.AddArg(y)
21381 v.AddArg2(x, v0)
21382 return true
21383 }
21384
21385
21386
21387 for {
21388 x := v_0
21389 if v_1.Op != OpAMD64ANDQconst {
21390 break
21391 }
21392 c := auxIntToInt32(v_1.AuxInt)
21393 y := v_1.Args[0]
21394 if !(c&63 == 63) {
21395 break
21396 }
21397 v.reset(OpAMD64SHLQ)
21398 v.AddArg2(x, y)
21399 return true
21400 }
21401
21402
21403
21404 for {
21405 x := v_0
21406 if v_1.Op != OpAMD64NEGQ {
21407 break
21408 }
21409 t := v_1.Type
21410 v_1_0 := v_1.Args[0]
21411 if v_1_0.Op != OpAMD64ANDQconst {
21412 break
21413 }
21414 c := auxIntToInt32(v_1_0.AuxInt)
21415 y := v_1_0.Args[0]
21416 if !(c&63 == 63) {
21417 break
21418 }
21419 v.reset(OpAMD64SHLQ)
21420 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21421 v0.AddArg(y)
21422 v.AddArg2(x, v0)
21423 return true
21424 }
21425
21426
21427
21428 for {
21429 x := v_0
21430 if v_1.Op != OpAMD64ADDLconst {
21431 break
21432 }
21433 c := auxIntToInt32(v_1.AuxInt)
21434 y := v_1.Args[0]
21435 if !(c&63 == 0) {
21436 break
21437 }
21438 v.reset(OpAMD64SHLQ)
21439 v.AddArg2(x, y)
21440 return true
21441 }
21442
21443
21444
21445 for {
21446 x := v_0
21447 if v_1.Op != OpAMD64NEGL {
21448 break
21449 }
21450 t := v_1.Type
21451 v_1_0 := v_1.Args[0]
21452 if v_1_0.Op != OpAMD64ADDLconst {
21453 break
21454 }
21455 c := auxIntToInt32(v_1_0.AuxInt)
21456 y := v_1_0.Args[0]
21457 if !(c&63 == 0) {
21458 break
21459 }
21460 v.reset(OpAMD64SHLQ)
21461 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21462 v0.AddArg(y)
21463 v.AddArg2(x, v0)
21464 return true
21465 }
21466
21467
21468
21469 for {
21470 x := v_0
21471 if v_1.Op != OpAMD64ANDLconst {
21472 break
21473 }
21474 c := auxIntToInt32(v_1.AuxInt)
21475 y := v_1.Args[0]
21476 if !(c&63 == 63) {
21477 break
21478 }
21479 v.reset(OpAMD64SHLQ)
21480 v.AddArg2(x, y)
21481 return true
21482 }
21483
21484
21485
21486 for {
21487 x := v_0
21488 if v_1.Op != OpAMD64NEGL {
21489 break
21490 }
21491 t := v_1.Type
21492 v_1_0 := v_1.Args[0]
21493 if v_1_0.Op != OpAMD64ANDLconst {
21494 break
21495 }
21496 c := auxIntToInt32(v_1_0.AuxInt)
21497 y := v_1_0.Args[0]
21498 if !(c&63 == 63) {
21499 break
21500 }
21501 v.reset(OpAMD64SHLQ)
21502 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21503 v0.AddArg(y)
21504 v.AddArg2(x, v0)
21505 return true
21506 }
21507
21508
21509
21510 for {
21511 l := v_0
21512 if l.Op != OpAMD64MOVQload {
21513 break
21514 }
21515 off := auxIntToInt32(l.AuxInt)
21516 sym := auxToSym(l.Aux)
21517 mem := l.Args[1]
21518 ptr := l.Args[0]
21519 x := v_1
21520 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
21521 break
21522 }
21523 v.reset(OpAMD64SHLXQload)
21524 v.AuxInt = int32ToAuxInt(off)
21525 v.Aux = symToAux(sym)
21526 v.AddArg3(ptr, x, mem)
21527 return true
21528 }
21529 return false
21530 }
21531 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
21532 v_0 := v.Args[0]
21533
21534
21535 for {
21536 if auxIntToInt8(v.AuxInt) != 0 {
21537 break
21538 }
21539 x := v_0
21540 v.copyOf(x)
21541 return true
21542 }
21543
21544
21545 for {
21546 if auxIntToInt8(v.AuxInt) != 1 {
21547 break
21548 }
21549 x := v_0
21550 v.reset(OpAMD64ADDQ)
21551 v.AddArg2(x, x)
21552 return true
21553 }
21554
21555
21556 for {
21557 c := auxIntToInt8(v.AuxInt)
21558 if v_0.Op != OpAMD64ADDQ {
21559 break
21560 }
21561 x := v_0.Args[1]
21562 if x != v_0.Args[0] {
21563 break
21564 }
21565 v.reset(OpAMD64SHLQconst)
21566 v.AuxInt = int8ToAuxInt(c + 1)
21567 v.AddArg(x)
21568 return true
21569 }
21570
21571
21572 for {
21573 d := auxIntToInt8(v.AuxInt)
21574 if v_0.Op != OpAMD64MOVQconst {
21575 break
21576 }
21577 c := auxIntToInt64(v_0.AuxInt)
21578 v.reset(OpAMD64MOVQconst)
21579 v.AuxInt = int64ToAuxInt(c << uint64(d))
21580 return true
21581 }
21582
21583
21584 for {
21585 d := auxIntToInt8(v.AuxInt)
21586 if v_0.Op != OpAMD64MOVLconst {
21587 break
21588 }
21589 c := auxIntToInt32(v_0.AuxInt)
21590 v.reset(OpAMD64MOVQconst)
21591 v.AuxInt = int64ToAuxInt(int64(c) << uint64(d))
21592 return true
21593 }
21594 return false
21595 }
21596 func rewriteValueAMD64_OpAMD64SHLXLload(v *Value) bool {
21597 v_2 := v.Args[2]
21598 v_1 := v.Args[1]
21599 v_0 := v.Args[0]
21600 b := v.Block
21601 typ := &b.Func.Config.Types
21602
21603
21604 for {
21605 off := auxIntToInt32(v.AuxInt)
21606 sym := auxToSym(v.Aux)
21607 ptr := v_0
21608 if v_1.Op != OpAMD64MOVLconst {
21609 break
21610 }
21611 c := auxIntToInt32(v_1.AuxInt)
21612 mem := v_2
21613 v.reset(OpAMD64SHLLconst)
21614 v.AuxInt = int8ToAuxInt(int8(c & 31))
21615 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
21616 v0.AuxInt = int32ToAuxInt(off)
21617 v0.Aux = symToAux(sym)
21618 v0.AddArg2(ptr, mem)
21619 v.AddArg(v0)
21620 return true
21621 }
21622 return false
21623 }
21624 func rewriteValueAMD64_OpAMD64SHLXQload(v *Value) bool {
21625 v_2 := v.Args[2]
21626 v_1 := v.Args[1]
21627 v_0 := v.Args[0]
21628 b := v.Block
21629 typ := &b.Func.Config.Types
21630
21631
21632 for {
21633 off := auxIntToInt32(v.AuxInt)
21634 sym := auxToSym(v.Aux)
21635 ptr := v_0
21636 if v_1.Op != OpAMD64MOVQconst {
21637 break
21638 }
21639 c := auxIntToInt64(v_1.AuxInt)
21640 mem := v_2
21641 v.reset(OpAMD64SHLQconst)
21642 v.AuxInt = int8ToAuxInt(int8(c & 63))
21643 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21644 v0.AuxInt = int32ToAuxInt(off)
21645 v0.Aux = symToAux(sym)
21646 v0.AddArg2(ptr, mem)
21647 v.AddArg(v0)
21648 return true
21649 }
21650
21651
21652 for {
21653 off := auxIntToInt32(v.AuxInt)
21654 sym := auxToSym(v.Aux)
21655 ptr := v_0
21656 if v_1.Op != OpAMD64MOVLconst {
21657 break
21658 }
21659 c := auxIntToInt32(v_1.AuxInt)
21660 mem := v_2
21661 v.reset(OpAMD64SHLQconst)
21662 v.AuxInt = int8ToAuxInt(int8(c & 63))
21663 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21664 v0.AuxInt = int32ToAuxInt(off)
21665 v0.Aux = symToAux(sym)
21666 v0.AddArg2(ptr, mem)
21667 v.AddArg(v0)
21668 return true
21669 }
21670 return false
21671 }
21672 func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
21673 v_1 := v.Args[1]
21674 v_0 := v.Args[0]
21675
21676
21677
21678 for {
21679 x := v_0
21680 if v_1.Op != OpAMD64MOVQconst {
21681 break
21682 }
21683 c := auxIntToInt64(v_1.AuxInt)
21684 if !(c&31 < 8) {
21685 break
21686 }
21687 v.reset(OpAMD64SHRBconst)
21688 v.AuxInt = int8ToAuxInt(int8(c & 31))
21689 v.AddArg(x)
21690 return true
21691 }
21692
21693
21694
21695 for {
21696 x := v_0
21697 if v_1.Op != OpAMD64MOVLconst {
21698 break
21699 }
21700 c := auxIntToInt32(v_1.AuxInt)
21701 if !(c&31 < 8) {
21702 break
21703 }
21704 v.reset(OpAMD64SHRBconst)
21705 v.AuxInt = int8ToAuxInt(int8(c & 31))
21706 v.AddArg(x)
21707 return true
21708 }
21709
21710
21711
21712 for {
21713 if v_1.Op != OpAMD64MOVQconst {
21714 break
21715 }
21716 c := auxIntToInt64(v_1.AuxInt)
21717 if !(c&31 >= 8) {
21718 break
21719 }
21720 v.reset(OpAMD64MOVLconst)
21721 v.AuxInt = int32ToAuxInt(0)
21722 return true
21723 }
21724
21725
21726
21727 for {
21728 if v_1.Op != OpAMD64MOVLconst {
21729 break
21730 }
21731 c := auxIntToInt32(v_1.AuxInt)
21732 if !(c&31 >= 8) {
21733 break
21734 }
21735 v.reset(OpAMD64MOVLconst)
21736 v.AuxInt = int32ToAuxInt(0)
21737 return true
21738 }
21739 return false
21740 }
21741 func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
21742 v_0 := v.Args[0]
21743
21744
21745 for {
21746 if auxIntToInt8(v.AuxInt) != 0 {
21747 break
21748 }
21749 x := v_0
21750 v.copyOf(x)
21751 return true
21752 }
21753 return false
21754 }
21755 func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
21756 v_1 := v.Args[1]
21757 v_0 := v.Args[0]
21758 b := v.Block
21759
21760
21761 for {
21762 x := v_0
21763 if v_1.Op != OpAMD64MOVQconst {
21764 break
21765 }
21766 c := auxIntToInt64(v_1.AuxInt)
21767 v.reset(OpAMD64SHRLconst)
21768 v.AuxInt = int8ToAuxInt(int8(c & 31))
21769 v.AddArg(x)
21770 return true
21771 }
21772
21773
21774 for {
21775 x := v_0
21776 if v_1.Op != OpAMD64MOVLconst {
21777 break
21778 }
21779 c := auxIntToInt32(v_1.AuxInt)
21780 v.reset(OpAMD64SHRLconst)
21781 v.AuxInt = int8ToAuxInt(int8(c & 31))
21782 v.AddArg(x)
21783 return true
21784 }
21785
21786
21787
21788 for {
21789 x := v_0
21790 if v_1.Op != OpAMD64ADDQconst {
21791 break
21792 }
21793 c := auxIntToInt32(v_1.AuxInt)
21794 y := v_1.Args[0]
21795 if !(c&31 == 0) {
21796 break
21797 }
21798 v.reset(OpAMD64SHRL)
21799 v.AddArg2(x, y)
21800 return true
21801 }
21802
21803
21804
21805 for {
21806 x := v_0
21807 if v_1.Op != OpAMD64NEGQ {
21808 break
21809 }
21810 t := v_1.Type
21811 v_1_0 := v_1.Args[0]
21812 if v_1_0.Op != OpAMD64ADDQconst {
21813 break
21814 }
21815 c := auxIntToInt32(v_1_0.AuxInt)
21816 y := v_1_0.Args[0]
21817 if !(c&31 == 0) {
21818 break
21819 }
21820 v.reset(OpAMD64SHRL)
21821 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21822 v0.AddArg(y)
21823 v.AddArg2(x, v0)
21824 return true
21825 }
21826
21827
21828
21829 for {
21830 x := v_0
21831 if v_1.Op != OpAMD64ANDQconst {
21832 break
21833 }
21834 c := auxIntToInt32(v_1.AuxInt)
21835 y := v_1.Args[0]
21836 if !(c&31 == 31) {
21837 break
21838 }
21839 v.reset(OpAMD64SHRL)
21840 v.AddArg2(x, y)
21841 return true
21842 }
21843
21844
21845
21846 for {
21847 x := v_0
21848 if v_1.Op != OpAMD64NEGQ {
21849 break
21850 }
21851 t := v_1.Type
21852 v_1_0 := v_1.Args[0]
21853 if v_1_0.Op != OpAMD64ANDQconst {
21854 break
21855 }
21856 c := auxIntToInt32(v_1_0.AuxInt)
21857 y := v_1_0.Args[0]
21858 if !(c&31 == 31) {
21859 break
21860 }
21861 v.reset(OpAMD64SHRL)
21862 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21863 v0.AddArg(y)
21864 v.AddArg2(x, v0)
21865 return true
21866 }
21867
21868
21869
21870 for {
21871 x := v_0
21872 if v_1.Op != OpAMD64ADDLconst {
21873 break
21874 }
21875 c := auxIntToInt32(v_1.AuxInt)
21876 y := v_1.Args[0]
21877 if !(c&31 == 0) {
21878 break
21879 }
21880 v.reset(OpAMD64SHRL)
21881 v.AddArg2(x, y)
21882 return true
21883 }
21884
21885
21886
21887 for {
21888 x := v_0
21889 if v_1.Op != OpAMD64NEGL {
21890 break
21891 }
21892 t := v_1.Type
21893 v_1_0 := v_1.Args[0]
21894 if v_1_0.Op != OpAMD64ADDLconst {
21895 break
21896 }
21897 c := auxIntToInt32(v_1_0.AuxInt)
21898 y := v_1_0.Args[0]
21899 if !(c&31 == 0) {
21900 break
21901 }
21902 v.reset(OpAMD64SHRL)
21903 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21904 v0.AddArg(y)
21905 v.AddArg2(x, v0)
21906 return true
21907 }
21908
21909
21910
21911 for {
21912 x := v_0
21913 if v_1.Op != OpAMD64ANDLconst {
21914 break
21915 }
21916 c := auxIntToInt32(v_1.AuxInt)
21917 y := v_1.Args[0]
21918 if !(c&31 == 31) {
21919 break
21920 }
21921 v.reset(OpAMD64SHRL)
21922 v.AddArg2(x, y)
21923 return true
21924 }
21925
21926
21927
21928 for {
21929 x := v_0
21930 if v_1.Op != OpAMD64NEGL {
21931 break
21932 }
21933 t := v_1.Type
21934 v_1_0 := v_1.Args[0]
21935 if v_1_0.Op != OpAMD64ANDLconst {
21936 break
21937 }
21938 c := auxIntToInt32(v_1_0.AuxInt)
21939 y := v_1_0.Args[0]
21940 if !(c&31 == 31) {
21941 break
21942 }
21943 v.reset(OpAMD64SHRL)
21944 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21945 v0.AddArg(y)
21946 v.AddArg2(x, v0)
21947 return true
21948 }
21949
21950
21951
21952 for {
21953 l := v_0
21954 if l.Op != OpAMD64MOVLload {
21955 break
21956 }
21957 off := auxIntToInt32(l.AuxInt)
21958 sym := auxToSym(l.Aux)
21959 mem := l.Args[1]
21960 ptr := l.Args[0]
21961 x := v_1
21962 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
21963 break
21964 }
21965 v.reset(OpAMD64SHRXLload)
21966 v.AuxInt = int32ToAuxInt(off)
21967 v.Aux = symToAux(sym)
21968 v.AddArg3(ptr, x, mem)
21969 return true
21970 }
21971 return false
21972 }
21973 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
21974 v_0 := v.Args[0]
21975
21976
21977 for {
21978 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64ADDL {
21979 break
21980 }
21981 x := v_0.Args[1]
21982 if x != v_0.Args[0] {
21983 break
21984 }
21985 v.reset(OpAMD64ANDLconst)
21986 v.AuxInt = int32ToAuxInt(0x7fffffff)
21987 v.AddArg(x)
21988 return true
21989 }
21990
21991
21992 for {
21993 if auxIntToInt8(v.AuxInt) != 0 {
21994 break
21995 }
21996 x := v_0
21997 v.copyOf(x)
21998 return true
21999 }
22000 return false
22001 }
22002 func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
22003 v_1 := v.Args[1]
22004 v_0 := v.Args[0]
22005 b := v.Block
22006
22007
22008 for {
22009 x := v_0
22010 if v_1.Op != OpAMD64MOVQconst {
22011 break
22012 }
22013 c := auxIntToInt64(v_1.AuxInt)
22014 v.reset(OpAMD64SHRQconst)
22015 v.AuxInt = int8ToAuxInt(int8(c & 63))
22016 v.AddArg(x)
22017 return true
22018 }
22019
22020
22021 for {
22022 x := v_0
22023 if v_1.Op != OpAMD64MOVLconst {
22024 break
22025 }
22026 c := auxIntToInt32(v_1.AuxInt)
22027 v.reset(OpAMD64SHRQconst)
22028 v.AuxInt = int8ToAuxInt(int8(c & 63))
22029 v.AddArg(x)
22030 return true
22031 }
22032
22033
22034
22035 for {
22036 x := v_0
22037 if v_1.Op != OpAMD64ADDQconst {
22038 break
22039 }
22040 c := auxIntToInt32(v_1.AuxInt)
22041 y := v_1.Args[0]
22042 if !(c&63 == 0) {
22043 break
22044 }
22045 v.reset(OpAMD64SHRQ)
22046 v.AddArg2(x, y)
22047 return true
22048 }
22049
22050
22051
22052 for {
22053 x := v_0
22054 if v_1.Op != OpAMD64NEGQ {
22055 break
22056 }
22057 t := v_1.Type
22058 v_1_0 := v_1.Args[0]
22059 if v_1_0.Op != OpAMD64ADDQconst {
22060 break
22061 }
22062 c := auxIntToInt32(v_1_0.AuxInt)
22063 y := v_1_0.Args[0]
22064 if !(c&63 == 0) {
22065 break
22066 }
22067 v.reset(OpAMD64SHRQ)
22068 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
22069 v0.AddArg(y)
22070 v.AddArg2(x, v0)
22071 return true
22072 }
22073
22074
22075
22076 for {
22077 x := v_0
22078 if v_1.Op != OpAMD64ANDQconst {
22079 break
22080 }
22081 c := auxIntToInt32(v_1.AuxInt)
22082 y := v_1.Args[0]
22083 if !(c&63 == 63) {
22084 break
22085 }
22086 v.reset(OpAMD64SHRQ)
22087 v.AddArg2(x, y)
22088 return true
22089 }
22090
22091
22092
22093 for {
22094 x := v_0
22095 if v_1.Op != OpAMD64NEGQ {
22096 break
22097 }
22098 t := v_1.Type
22099 v_1_0 := v_1.Args[0]
22100 if v_1_0.Op != OpAMD64ANDQconst {
22101 break
22102 }
22103 c := auxIntToInt32(v_1_0.AuxInt)
22104 y := v_1_0.Args[0]
22105 if !(c&63 == 63) {
22106 break
22107 }
22108 v.reset(OpAMD64SHRQ)
22109 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
22110 v0.AddArg(y)
22111 v.AddArg2(x, v0)
22112 return true
22113 }
22114
22115
22116
22117 for {
22118 x := v_0
22119 if v_1.Op != OpAMD64ADDLconst {
22120 break
22121 }
22122 c := auxIntToInt32(v_1.AuxInt)
22123 y := v_1.Args[0]
22124 if !(c&63 == 0) {
22125 break
22126 }
22127 v.reset(OpAMD64SHRQ)
22128 v.AddArg2(x, y)
22129 return true
22130 }
22131
22132
22133
22134 for {
22135 x := v_0
22136 if v_1.Op != OpAMD64NEGL {
22137 break
22138 }
22139 t := v_1.Type
22140 v_1_0 := v_1.Args[0]
22141 if v_1_0.Op != OpAMD64ADDLconst {
22142 break
22143 }
22144 c := auxIntToInt32(v_1_0.AuxInt)
22145 y := v_1_0.Args[0]
22146 if !(c&63 == 0) {
22147 break
22148 }
22149 v.reset(OpAMD64SHRQ)
22150 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
22151 v0.AddArg(y)
22152 v.AddArg2(x, v0)
22153 return true
22154 }
22155
22156
22157
22158 for {
22159 x := v_0
22160 if v_1.Op != OpAMD64ANDLconst {
22161 break
22162 }
22163 c := auxIntToInt32(v_1.AuxInt)
22164 y := v_1.Args[0]
22165 if !(c&63 == 63) {
22166 break
22167 }
22168 v.reset(OpAMD64SHRQ)
22169 v.AddArg2(x, y)
22170 return true
22171 }
22172
22173
22174
22175 for {
22176 x := v_0
22177 if v_1.Op != OpAMD64NEGL {
22178 break
22179 }
22180 t := v_1.Type
22181 v_1_0 := v_1.Args[0]
22182 if v_1_0.Op != OpAMD64ANDLconst {
22183 break
22184 }
22185 c := auxIntToInt32(v_1_0.AuxInt)
22186 y := v_1_0.Args[0]
22187 if !(c&63 == 63) {
22188 break
22189 }
22190 v.reset(OpAMD64SHRQ)
22191 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
22192 v0.AddArg(y)
22193 v.AddArg2(x, v0)
22194 return true
22195 }
22196
22197
22198
22199 for {
22200 l := v_0
22201 if l.Op != OpAMD64MOVQload {
22202 break
22203 }
22204 off := auxIntToInt32(l.AuxInt)
22205 sym := auxToSym(l.Aux)
22206 mem := l.Args[1]
22207 ptr := l.Args[0]
22208 x := v_1
22209 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
22210 break
22211 }
22212 v.reset(OpAMD64SHRXQload)
22213 v.AuxInt = int32ToAuxInt(off)
22214 v.Aux = symToAux(sym)
22215 v.AddArg3(ptr, x, mem)
22216 return true
22217 }
22218 return false
22219 }
22220 func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
22221 v_0 := v.Args[0]
22222
22223
22224 for {
22225 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64ADDQ {
22226 break
22227 }
22228 x := v_0.Args[1]
22229 if x != v_0.Args[0] {
22230 break
22231 }
22232 v.reset(OpAMD64BTRQconst)
22233 v.AuxInt = int8ToAuxInt(63)
22234 v.AddArg(x)
22235 return true
22236 }
22237
22238
22239 for {
22240 if auxIntToInt8(v.AuxInt) != 0 {
22241 break
22242 }
22243 x := v_0
22244 v.copyOf(x)
22245 return true
22246 }
22247 return false
22248 }
22249 func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool {
22250 v_1 := v.Args[1]
22251 v_0 := v.Args[0]
22252
22253
22254
22255 for {
22256 x := v_0
22257 if v_1.Op != OpAMD64MOVQconst {
22258 break
22259 }
22260 c := auxIntToInt64(v_1.AuxInt)
22261 if !(c&31 < 16) {
22262 break
22263 }
22264 v.reset(OpAMD64SHRWconst)
22265 v.AuxInt = int8ToAuxInt(int8(c & 31))
22266 v.AddArg(x)
22267 return true
22268 }
22269
22270
22271
22272 for {
22273 x := v_0
22274 if v_1.Op != OpAMD64MOVLconst {
22275 break
22276 }
22277 c := auxIntToInt32(v_1.AuxInt)
22278 if !(c&31 < 16) {
22279 break
22280 }
22281 v.reset(OpAMD64SHRWconst)
22282 v.AuxInt = int8ToAuxInt(int8(c & 31))
22283 v.AddArg(x)
22284 return true
22285 }
22286
22287
22288
22289 for {
22290 if v_1.Op != OpAMD64MOVQconst {
22291 break
22292 }
22293 c := auxIntToInt64(v_1.AuxInt)
22294 if !(c&31 >= 16) {
22295 break
22296 }
22297 v.reset(OpAMD64MOVLconst)
22298 v.AuxInt = int32ToAuxInt(0)
22299 return true
22300 }
22301
22302
22303
22304 for {
22305 if v_1.Op != OpAMD64MOVLconst {
22306 break
22307 }
22308 c := auxIntToInt32(v_1.AuxInt)
22309 if !(c&31 >= 16) {
22310 break
22311 }
22312 v.reset(OpAMD64MOVLconst)
22313 v.AuxInt = int32ToAuxInt(0)
22314 return true
22315 }
22316 return false
22317 }
22318 func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
22319 v_0 := v.Args[0]
22320
22321
22322 for {
22323 if auxIntToInt8(v.AuxInt) != 0 {
22324 break
22325 }
22326 x := v_0
22327 v.copyOf(x)
22328 return true
22329 }
22330 return false
22331 }
22332 func rewriteValueAMD64_OpAMD64SHRXLload(v *Value) bool {
22333 v_2 := v.Args[2]
22334 v_1 := v.Args[1]
22335 v_0 := v.Args[0]
22336 b := v.Block
22337 typ := &b.Func.Config.Types
22338
22339
22340 for {
22341 off := auxIntToInt32(v.AuxInt)
22342 sym := auxToSym(v.Aux)
22343 ptr := v_0
22344 if v_1.Op != OpAMD64MOVLconst {
22345 break
22346 }
22347 c := auxIntToInt32(v_1.AuxInt)
22348 mem := v_2
22349 v.reset(OpAMD64SHRLconst)
22350 v.AuxInt = int8ToAuxInt(int8(c & 31))
22351 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
22352 v0.AuxInt = int32ToAuxInt(off)
22353 v0.Aux = symToAux(sym)
22354 v0.AddArg2(ptr, mem)
22355 v.AddArg(v0)
22356 return true
22357 }
22358 return false
22359 }
22360 func rewriteValueAMD64_OpAMD64SHRXQload(v *Value) bool {
22361 v_2 := v.Args[2]
22362 v_1 := v.Args[1]
22363 v_0 := v.Args[0]
22364 b := v.Block
22365 typ := &b.Func.Config.Types
22366
22367
22368 for {
22369 off := auxIntToInt32(v.AuxInt)
22370 sym := auxToSym(v.Aux)
22371 ptr := v_0
22372 if v_1.Op != OpAMD64MOVQconst {
22373 break
22374 }
22375 c := auxIntToInt64(v_1.AuxInt)
22376 mem := v_2
22377 v.reset(OpAMD64SHRQconst)
22378 v.AuxInt = int8ToAuxInt(int8(c & 63))
22379 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
22380 v0.AuxInt = int32ToAuxInt(off)
22381 v0.Aux = symToAux(sym)
22382 v0.AddArg2(ptr, mem)
22383 v.AddArg(v0)
22384 return true
22385 }
22386
22387
22388 for {
22389 off := auxIntToInt32(v.AuxInt)
22390 sym := auxToSym(v.Aux)
22391 ptr := v_0
22392 if v_1.Op != OpAMD64MOVLconst {
22393 break
22394 }
22395 c := auxIntToInt32(v_1.AuxInt)
22396 mem := v_2
22397 v.reset(OpAMD64SHRQconst)
22398 v.AuxInt = int8ToAuxInt(int8(c & 63))
22399 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
22400 v0.AuxInt = int32ToAuxInt(off)
22401 v0.Aux = symToAux(sym)
22402 v0.AddArg2(ptr, mem)
22403 v.AddArg(v0)
22404 return true
22405 }
22406 return false
22407 }
22408 func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
22409 v_1 := v.Args[1]
22410 v_0 := v.Args[0]
22411 b := v.Block
22412
22413
22414 for {
22415 x := v_0
22416 if v_1.Op != OpAMD64MOVLconst {
22417 break
22418 }
22419 c := auxIntToInt32(v_1.AuxInt)
22420 v.reset(OpAMD64SUBLconst)
22421 v.AuxInt = int32ToAuxInt(c)
22422 v.AddArg(x)
22423 return true
22424 }
22425
22426
22427 for {
22428 if v_0.Op != OpAMD64MOVLconst {
22429 break
22430 }
22431 c := auxIntToInt32(v_0.AuxInt)
22432 x := v_1
22433 v.reset(OpAMD64NEGL)
22434 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
22435 v0.AuxInt = int32ToAuxInt(c)
22436 v0.AddArg(x)
22437 v.AddArg(v0)
22438 return true
22439 }
22440
22441
22442 for {
22443 x := v_0
22444 if x != v_1 {
22445 break
22446 }
22447 v.reset(OpAMD64MOVLconst)
22448 v.AuxInt = int32ToAuxInt(0)
22449 return true
22450 }
22451
22452
22453
22454 for {
22455 x := v_0
22456 l := v_1
22457 if l.Op != OpAMD64MOVLload {
22458 break
22459 }
22460 off := auxIntToInt32(l.AuxInt)
22461 sym := auxToSym(l.Aux)
22462 mem := l.Args[1]
22463 ptr := l.Args[0]
22464 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22465 break
22466 }
22467 v.reset(OpAMD64SUBLload)
22468 v.AuxInt = int32ToAuxInt(off)
22469 v.Aux = symToAux(sym)
22470 v.AddArg3(x, ptr, mem)
22471 return true
22472 }
22473 return false
22474 }
22475 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
22476 v_0 := v.Args[0]
22477
22478
22479
22480 for {
22481 c := auxIntToInt32(v.AuxInt)
22482 x := v_0
22483 if !(c == 0) {
22484 break
22485 }
22486 v.copyOf(x)
22487 return true
22488 }
22489
22490
22491 for {
22492 c := auxIntToInt32(v.AuxInt)
22493 x := v_0
22494 v.reset(OpAMD64ADDLconst)
22495 v.AuxInt = int32ToAuxInt(-c)
22496 v.AddArg(x)
22497 return true
22498 }
22499 }
22500 func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
22501 v_2 := v.Args[2]
22502 v_1 := v.Args[1]
22503 v_0 := v.Args[0]
22504 b := v.Block
22505 typ := &b.Func.Config.Types
22506
22507
22508
22509 for {
22510 off1 := auxIntToInt32(v.AuxInt)
22511 sym := auxToSym(v.Aux)
22512 val := v_0
22513 if v_1.Op != OpAMD64ADDQconst {
22514 break
22515 }
22516 off2 := auxIntToInt32(v_1.AuxInt)
22517 base := v_1.Args[0]
22518 mem := v_2
22519 if !(is32Bit(int64(off1) + int64(off2))) {
22520 break
22521 }
22522 v.reset(OpAMD64SUBLload)
22523 v.AuxInt = int32ToAuxInt(off1 + off2)
22524 v.Aux = symToAux(sym)
22525 v.AddArg3(val, base, mem)
22526 return true
22527 }
22528
22529
22530
22531 for {
22532 off1 := auxIntToInt32(v.AuxInt)
22533 sym1 := auxToSym(v.Aux)
22534 val := v_0
22535 if v_1.Op != OpAMD64LEAQ {
22536 break
22537 }
22538 off2 := auxIntToInt32(v_1.AuxInt)
22539 sym2 := auxToSym(v_1.Aux)
22540 base := v_1.Args[0]
22541 mem := v_2
22542 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22543 break
22544 }
22545 v.reset(OpAMD64SUBLload)
22546 v.AuxInt = int32ToAuxInt(off1 + off2)
22547 v.Aux = symToAux(mergeSym(sym1, sym2))
22548 v.AddArg3(val, base, mem)
22549 return true
22550 }
22551
22552
22553 for {
22554 off := auxIntToInt32(v.AuxInt)
22555 sym := auxToSym(v.Aux)
22556 x := v_0
22557 ptr := v_1
22558 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22559 break
22560 }
22561 y := v_2.Args[1]
22562 if ptr != v_2.Args[0] {
22563 break
22564 }
22565 v.reset(OpAMD64SUBL)
22566 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
22567 v0.AddArg(y)
22568 v.AddArg2(x, v0)
22569 return true
22570 }
22571 return false
22572 }
22573 func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
22574 v_2 := v.Args[2]
22575 v_1 := v.Args[1]
22576 v_0 := v.Args[0]
22577
22578
22579
22580 for {
22581 off1 := auxIntToInt32(v.AuxInt)
22582 sym := auxToSym(v.Aux)
22583 if v_0.Op != OpAMD64ADDQconst {
22584 break
22585 }
22586 off2 := auxIntToInt32(v_0.AuxInt)
22587 base := v_0.Args[0]
22588 val := v_1
22589 mem := v_2
22590 if !(is32Bit(int64(off1) + int64(off2))) {
22591 break
22592 }
22593 v.reset(OpAMD64SUBLmodify)
22594 v.AuxInt = int32ToAuxInt(off1 + off2)
22595 v.Aux = symToAux(sym)
22596 v.AddArg3(base, val, mem)
22597 return true
22598 }
22599
22600
22601
22602 for {
22603 off1 := auxIntToInt32(v.AuxInt)
22604 sym1 := auxToSym(v.Aux)
22605 if v_0.Op != OpAMD64LEAQ {
22606 break
22607 }
22608 off2 := auxIntToInt32(v_0.AuxInt)
22609 sym2 := auxToSym(v_0.Aux)
22610 base := v_0.Args[0]
22611 val := v_1
22612 mem := v_2
22613 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22614 break
22615 }
22616 v.reset(OpAMD64SUBLmodify)
22617 v.AuxInt = int32ToAuxInt(off1 + off2)
22618 v.Aux = symToAux(mergeSym(sym1, sym2))
22619 v.AddArg3(base, val, mem)
22620 return true
22621 }
22622 return false
22623 }
22624 func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
22625 v_1 := v.Args[1]
22626 v_0 := v.Args[0]
22627 b := v.Block
22628
22629
22630
22631 for {
22632 x := v_0
22633 if v_1.Op != OpAMD64MOVQconst {
22634 break
22635 }
22636 c := auxIntToInt64(v_1.AuxInt)
22637 if !(is32Bit(c)) {
22638 break
22639 }
22640 v.reset(OpAMD64SUBQconst)
22641 v.AuxInt = int32ToAuxInt(int32(c))
22642 v.AddArg(x)
22643 return true
22644 }
22645
22646
22647
22648 for {
22649 if v_0.Op != OpAMD64MOVQconst {
22650 break
22651 }
22652 c := auxIntToInt64(v_0.AuxInt)
22653 x := v_1
22654 if !(is32Bit(c)) {
22655 break
22656 }
22657 v.reset(OpAMD64NEGQ)
22658 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
22659 v0.AuxInt = int32ToAuxInt(int32(c))
22660 v0.AddArg(x)
22661 v.AddArg(v0)
22662 return true
22663 }
22664
22665
22666 for {
22667 x := v_0
22668 if x != v_1 {
22669 break
22670 }
22671 v.reset(OpAMD64MOVQconst)
22672 v.AuxInt = int64ToAuxInt(0)
22673 return true
22674 }
22675
22676
22677
22678 for {
22679 x := v_0
22680 l := v_1
22681 if l.Op != OpAMD64MOVQload {
22682 break
22683 }
22684 off := auxIntToInt32(l.AuxInt)
22685 sym := auxToSym(l.Aux)
22686 mem := l.Args[1]
22687 ptr := l.Args[0]
22688 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22689 break
22690 }
22691 v.reset(OpAMD64SUBQload)
22692 v.AuxInt = int32ToAuxInt(off)
22693 v.Aux = symToAux(sym)
22694 v.AddArg3(x, ptr, mem)
22695 return true
22696 }
22697 return false
22698 }
22699 func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool {
22700 v_1 := v.Args[1]
22701 v_0 := v.Args[0]
22702
22703
22704
22705 for {
22706 x := v_0
22707 if v_1.Op != OpAMD64MOVQconst {
22708 break
22709 }
22710 c := auxIntToInt64(v_1.AuxInt)
22711 if !(is32Bit(c)) {
22712 break
22713 }
22714 v.reset(OpAMD64SUBQconstborrow)
22715 v.AuxInt = int32ToAuxInt(int32(c))
22716 v.AddArg(x)
22717 return true
22718 }
22719 return false
22720 }
22721 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
22722 v_0 := v.Args[0]
22723
22724
22725 for {
22726 if auxIntToInt32(v.AuxInt) != 0 {
22727 break
22728 }
22729 x := v_0
22730 v.copyOf(x)
22731 return true
22732 }
22733
22734
22735
22736 for {
22737 c := auxIntToInt32(v.AuxInt)
22738 x := v_0
22739 if !(c != -(1 << 31)) {
22740 break
22741 }
22742 v.reset(OpAMD64ADDQconst)
22743 v.AuxInt = int32ToAuxInt(-c)
22744 v.AddArg(x)
22745 return true
22746 }
22747
22748
22749 for {
22750 c := auxIntToInt32(v.AuxInt)
22751 if v_0.Op != OpAMD64MOVQconst {
22752 break
22753 }
22754 d := auxIntToInt64(v_0.AuxInt)
22755 v.reset(OpAMD64MOVQconst)
22756 v.AuxInt = int64ToAuxInt(d - int64(c))
22757 return true
22758 }
22759
22760
22761
22762 for {
22763 c := auxIntToInt32(v.AuxInt)
22764 if v_0.Op != OpAMD64SUBQconst {
22765 break
22766 }
22767 d := auxIntToInt32(v_0.AuxInt)
22768 x := v_0.Args[0]
22769 if !(is32Bit(int64(-c) - int64(d))) {
22770 break
22771 }
22772 v.reset(OpAMD64ADDQconst)
22773 v.AuxInt = int32ToAuxInt(-c - d)
22774 v.AddArg(x)
22775 return true
22776 }
22777 return false
22778 }
22779 func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
22780 v_2 := v.Args[2]
22781 v_1 := v.Args[1]
22782 v_0 := v.Args[0]
22783 b := v.Block
22784 typ := &b.Func.Config.Types
22785
22786
22787
22788 for {
22789 off1 := auxIntToInt32(v.AuxInt)
22790 sym := auxToSym(v.Aux)
22791 val := v_0
22792 if v_1.Op != OpAMD64ADDQconst {
22793 break
22794 }
22795 off2 := auxIntToInt32(v_1.AuxInt)
22796 base := v_1.Args[0]
22797 mem := v_2
22798 if !(is32Bit(int64(off1) + int64(off2))) {
22799 break
22800 }
22801 v.reset(OpAMD64SUBQload)
22802 v.AuxInt = int32ToAuxInt(off1 + off2)
22803 v.Aux = symToAux(sym)
22804 v.AddArg3(val, base, mem)
22805 return true
22806 }
22807
22808
22809
22810 for {
22811 off1 := auxIntToInt32(v.AuxInt)
22812 sym1 := auxToSym(v.Aux)
22813 val := v_0
22814 if v_1.Op != OpAMD64LEAQ {
22815 break
22816 }
22817 off2 := auxIntToInt32(v_1.AuxInt)
22818 sym2 := auxToSym(v_1.Aux)
22819 base := v_1.Args[0]
22820 mem := v_2
22821 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22822 break
22823 }
22824 v.reset(OpAMD64SUBQload)
22825 v.AuxInt = int32ToAuxInt(off1 + off2)
22826 v.Aux = symToAux(mergeSym(sym1, sym2))
22827 v.AddArg3(val, base, mem)
22828 return true
22829 }
22830
22831
22832 for {
22833 off := auxIntToInt32(v.AuxInt)
22834 sym := auxToSym(v.Aux)
22835 x := v_0
22836 ptr := v_1
22837 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22838 break
22839 }
22840 y := v_2.Args[1]
22841 if ptr != v_2.Args[0] {
22842 break
22843 }
22844 v.reset(OpAMD64SUBQ)
22845 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
22846 v0.AddArg(y)
22847 v.AddArg2(x, v0)
22848 return true
22849 }
22850 return false
22851 }
22852 func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
22853 v_2 := v.Args[2]
22854 v_1 := v.Args[1]
22855 v_0 := v.Args[0]
22856
22857
22858
22859 for {
22860 off1 := auxIntToInt32(v.AuxInt)
22861 sym := auxToSym(v.Aux)
22862 if v_0.Op != OpAMD64ADDQconst {
22863 break
22864 }
22865 off2 := auxIntToInt32(v_0.AuxInt)
22866 base := v_0.Args[0]
22867 val := v_1
22868 mem := v_2
22869 if !(is32Bit(int64(off1) + int64(off2))) {
22870 break
22871 }
22872 v.reset(OpAMD64SUBQmodify)
22873 v.AuxInt = int32ToAuxInt(off1 + off2)
22874 v.Aux = symToAux(sym)
22875 v.AddArg3(base, val, mem)
22876 return true
22877 }
22878
22879
22880
22881 for {
22882 off1 := auxIntToInt32(v.AuxInt)
22883 sym1 := auxToSym(v.Aux)
22884 if v_0.Op != OpAMD64LEAQ {
22885 break
22886 }
22887 off2 := auxIntToInt32(v_0.AuxInt)
22888 sym2 := auxToSym(v_0.Aux)
22889 base := v_0.Args[0]
22890 val := v_1
22891 mem := v_2
22892 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22893 break
22894 }
22895 v.reset(OpAMD64SUBQmodify)
22896 v.AuxInt = int32ToAuxInt(off1 + off2)
22897 v.Aux = symToAux(mergeSym(sym1, sym2))
22898 v.AddArg3(base, val, mem)
22899 return true
22900 }
22901 return false
22902 }
22903 func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
22904 v_1 := v.Args[1]
22905 v_0 := v.Args[0]
22906
22907
22908
22909 for {
22910 x := v_0
22911 l := v_1
22912 if l.Op != OpAMD64MOVSDload {
22913 break
22914 }
22915 off := auxIntToInt32(l.AuxInt)
22916 sym := auxToSym(l.Aux)
22917 mem := l.Args[1]
22918 ptr := l.Args[0]
22919 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22920 break
22921 }
22922 v.reset(OpAMD64SUBSDload)
22923 v.AuxInt = int32ToAuxInt(off)
22924 v.Aux = symToAux(sym)
22925 v.AddArg3(x, ptr, mem)
22926 return true
22927 }
22928 return false
22929 }
22930 func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
22931 v_2 := v.Args[2]
22932 v_1 := v.Args[1]
22933 v_0 := v.Args[0]
22934 b := v.Block
22935 typ := &b.Func.Config.Types
22936
22937
22938
22939 for {
22940 off1 := auxIntToInt32(v.AuxInt)
22941 sym := auxToSym(v.Aux)
22942 val := v_0
22943 if v_1.Op != OpAMD64ADDQconst {
22944 break
22945 }
22946 off2 := auxIntToInt32(v_1.AuxInt)
22947 base := v_1.Args[0]
22948 mem := v_2
22949 if !(is32Bit(int64(off1) + int64(off2))) {
22950 break
22951 }
22952 v.reset(OpAMD64SUBSDload)
22953 v.AuxInt = int32ToAuxInt(off1 + off2)
22954 v.Aux = symToAux(sym)
22955 v.AddArg3(val, base, mem)
22956 return true
22957 }
22958
22959
22960
22961 for {
22962 off1 := auxIntToInt32(v.AuxInt)
22963 sym1 := auxToSym(v.Aux)
22964 val := v_0
22965 if v_1.Op != OpAMD64LEAQ {
22966 break
22967 }
22968 off2 := auxIntToInt32(v_1.AuxInt)
22969 sym2 := auxToSym(v_1.Aux)
22970 base := v_1.Args[0]
22971 mem := v_2
22972 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22973 break
22974 }
22975 v.reset(OpAMD64SUBSDload)
22976 v.AuxInt = int32ToAuxInt(off1 + off2)
22977 v.Aux = symToAux(mergeSym(sym1, sym2))
22978 v.AddArg3(val, base, mem)
22979 return true
22980 }
22981
22982
22983 for {
22984 off := auxIntToInt32(v.AuxInt)
22985 sym := auxToSym(v.Aux)
22986 x := v_0
22987 ptr := v_1
22988 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22989 break
22990 }
22991 y := v_2.Args[1]
22992 if ptr != v_2.Args[0] {
22993 break
22994 }
22995 v.reset(OpAMD64SUBSD)
22996 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
22997 v0.AddArg(y)
22998 v.AddArg2(x, v0)
22999 return true
23000 }
23001 return false
23002 }
23003 func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
23004 v_1 := v.Args[1]
23005 v_0 := v.Args[0]
23006
23007
23008
23009 for {
23010 x := v_0
23011 l := v_1
23012 if l.Op != OpAMD64MOVSSload {
23013 break
23014 }
23015 off := auxIntToInt32(l.AuxInt)
23016 sym := auxToSym(l.Aux)
23017 mem := l.Args[1]
23018 ptr := l.Args[0]
23019 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
23020 break
23021 }
23022 v.reset(OpAMD64SUBSSload)
23023 v.AuxInt = int32ToAuxInt(off)
23024 v.Aux = symToAux(sym)
23025 v.AddArg3(x, ptr, mem)
23026 return true
23027 }
23028 return false
23029 }
23030 func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
23031 v_2 := v.Args[2]
23032 v_1 := v.Args[1]
23033 v_0 := v.Args[0]
23034 b := v.Block
23035 typ := &b.Func.Config.Types
23036
23037
23038
23039 for {
23040 off1 := auxIntToInt32(v.AuxInt)
23041 sym := auxToSym(v.Aux)
23042 val := v_0
23043 if v_1.Op != OpAMD64ADDQconst {
23044 break
23045 }
23046 off2 := auxIntToInt32(v_1.AuxInt)
23047 base := v_1.Args[0]
23048 mem := v_2
23049 if !(is32Bit(int64(off1) + int64(off2))) {
23050 break
23051 }
23052 v.reset(OpAMD64SUBSSload)
23053 v.AuxInt = int32ToAuxInt(off1 + off2)
23054 v.Aux = symToAux(sym)
23055 v.AddArg3(val, base, mem)
23056 return true
23057 }
23058
23059
23060
23061 for {
23062 off1 := auxIntToInt32(v.AuxInt)
23063 sym1 := auxToSym(v.Aux)
23064 val := v_0
23065 if v_1.Op != OpAMD64LEAQ {
23066 break
23067 }
23068 off2 := auxIntToInt32(v_1.AuxInt)
23069 sym2 := auxToSym(v_1.Aux)
23070 base := v_1.Args[0]
23071 mem := v_2
23072 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23073 break
23074 }
23075 v.reset(OpAMD64SUBSSload)
23076 v.AuxInt = int32ToAuxInt(off1 + off2)
23077 v.Aux = symToAux(mergeSym(sym1, sym2))
23078 v.AddArg3(val, base, mem)
23079 return true
23080 }
23081
23082
23083 for {
23084 off := auxIntToInt32(v.AuxInt)
23085 sym := auxToSym(v.Aux)
23086 x := v_0
23087 ptr := v_1
23088 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
23089 break
23090 }
23091 y := v_2.Args[1]
23092 if ptr != v_2.Args[0] {
23093 break
23094 }
23095 v.reset(OpAMD64SUBSS)
23096 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
23097 v0.AddArg(y)
23098 v.AddArg2(x, v0)
23099 return true
23100 }
23101 return false
23102 }
23103 func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
23104 v_1 := v.Args[1]
23105 v_0 := v.Args[0]
23106 b := v.Block
23107
23108
23109 for {
23110 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23111 if v_0.Op != OpAMD64MOVLconst {
23112 continue
23113 }
23114 c := auxIntToInt32(v_0.AuxInt)
23115 x := v_1
23116 v.reset(OpAMD64TESTBconst)
23117 v.AuxInt = int8ToAuxInt(int8(c))
23118 v.AddArg(x)
23119 return true
23120 }
23121 break
23122 }
23123
23124
23125
23126 for {
23127 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23128 l := v_0
23129 if l.Op != OpAMD64MOVBload {
23130 continue
23131 }
23132 off := auxIntToInt32(l.AuxInt)
23133 sym := auxToSym(l.Aux)
23134 mem := l.Args[1]
23135 ptr := l.Args[0]
23136 l2 := v_1
23137 if !(l == l2 && l.Uses == 2 && clobber(l)) {
23138 continue
23139 }
23140 b = l.Block
23141 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
23142 v.copyOf(v0)
23143 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
23144 v0.Aux = symToAux(sym)
23145 v0.AddArg2(ptr, mem)
23146 return true
23147 }
23148 break
23149 }
23150 return false
23151 }
23152 func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool {
23153 v_0 := v.Args[0]
23154
23155
23156
23157 for {
23158 if auxIntToInt8(v.AuxInt) != -1 {
23159 break
23160 }
23161 x := v_0
23162 if !(x.Op != OpAMD64MOVLconst) {
23163 break
23164 }
23165 v.reset(OpAMD64TESTB)
23166 v.AddArg2(x, x)
23167 return true
23168 }
23169 return false
23170 }
23171 func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
23172 v_1 := v.Args[1]
23173 v_0 := v.Args[0]
23174 b := v.Block
23175
23176
23177 for {
23178 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23179 if v_0.Op != OpAMD64MOVLconst {
23180 continue
23181 }
23182 c := auxIntToInt32(v_0.AuxInt)
23183 x := v_1
23184 v.reset(OpAMD64TESTLconst)
23185 v.AuxInt = int32ToAuxInt(c)
23186 v.AddArg(x)
23187 return true
23188 }
23189 break
23190 }
23191
23192
23193
23194 for {
23195 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23196 l := v_0
23197 if l.Op != OpAMD64MOVLload {
23198 continue
23199 }
23200 off := auxIntToInt32(l.AuxInt)
23201 sym := auxToSym(l.Aux)
23202 mem := l.Args[1]
23203 ptr := l.Args[0]
23204 l2 := v_1
23205 if !(l == l2 && l.Uses == 2 && clobber(l)) {
23206 continue
23207 }
23208 b = l.Block
23209 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
23210 v.copyOf(v0)
23211 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
23212 v0.Aux = symToAux(sym)
23213 v0.AddArg2(ptr, mem)
23214 return true
23215 }
23216 break
23217 }
23218
23219
23220
23221 for {
23222 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23223 a := v_0
23224 if a.Op != OpAMD64ANDLload {
23225 continue
23226 }
23227 off := auxIntToInt32(a.AuxInt)
23228 sym := auxToSym(a.Aux)
23229 mem := a.Args[2]
23230 x := a.Args[0]
23231 ptr := a.Args[1]
23232 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
23233 continue
23234 }
23235 v.reset(OpAMD64TESTL)
23236 v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type)
23237 v0.AuxInt = int32ToAuxInt(off)
23238 v0.Aux = symToAux(sym)
23239 v0.AddArg2(ptr, mem)
23240 v.AddArg2(v0, x)
23241 return true
23242 }
23243 break
23244 }
23245 return false
23246 }
23247 func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
23248 v_0 := v.Args[0]
23249
23250
23251
23252 for {
23253 c := auxIntToInt32(v.AuxInt)
23254 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
23255 break
23256 }
23257 v.reset(OpAMD64FlagEQ)
23258 return true
23259 }
23260
23261
23262
23263 for {
23264 c := auxIntToInt32(v.AuxInt)
23265 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
23266 break
23267 }
23268 v.reset(OpAMD64FlagLT_UGT)
23269 return true
23270 }
23271
23272
23273
23274 for {
23275 c := auxIntToInt32(v.AuxInt)
23276 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
23277 break
23278 }
23279 v.reset(OpAMD64FlagGT_UGT)
23280 return true
23281 }
23282
23283
23284
23285 for {
23286 if auxIntToInt32(v.AuxInt) != -1 {
23287 break
23288 }
23289 x := v_0
23290 if !(x.Op != OpAMD64MOVLconst) {
23291 break
23292 }
23293 v.reset(OpAMD64TESTL)
23294 v.AddArg2(x, x)
23295 return true
23296 }
23297 return false
23298 }
23299 func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
23300 v_1 := v.Args[1]
23301 v_0 := v.Args[0]
23302 b := v.Block
23303
23304
23305
23306 for {
23307 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23308 if v_0.Op != OpAMD64MOVQconst {
23309 continue
23310 }
23311 c := auxIntToInt64(v_0.AuxInt)
23312 x := v_1
23313 if !(is32Bit(c)) {
23314 continue
23315 }
23316 v.reset(OpAMD64TESTQconst)
23317 v.AuxInt = int32ToAuxInt(int32(c))
23318 v.AddArg(x)
23319 return true
23320 }
23321 break
23322 }
23323
23324
23325
23326 for {
23327 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23328 l := v_0
23329 if l.Op != OpAMD64MOVQload {
23330 continue
23331 }
23332 off := auxIntToInt32(l.AuxInt)
23333 sym := auxToSym(l.Aux)
23334 mem := l.Args[1]
23335 ptr := l.Args[0]
23336 l2 := v_1
23337 if !(l == l2 && l.Uses == 2 && clobber(l)) {
23338 continue
23339 }
23340 b = l.Block
23341 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
23342 v.copyOf(v0)
23343 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
23344 v0.Aux = symToAux(sym)
23345 v0.AddArg2(ptr, mem)
23346 return true
23347 }
23348 break
23349 }
23350
23351
23352
23353 for {
23354 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23355 a := v_0
23356 if a.Op != OpAMD64ANDQload {
23357 continue
23358 }
23359 off := auxIntToInt32(a.AuxInt)
23360 sym := auxToSym(a.Aux)
23361 mem := a.Args[2]
23362 x := a.Args[0]
23363 ptr := a.Args[1]
23364 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
23365 continue
23366 }
23367 v.reset(OpAMD64TESTQ)
23368 v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type)
23369 v0.AuxInt = int32ToAuxInt(off)
23370 v0.Aux = symToAux(sym)
23371 v0.AddArg2(ptr, mem)
23372 v.AddArg2(v0, x)
23373 return true
23374 }
23375 break
23376 }
23377 return false
23378 }
23379 func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
23380 v_0 := v.Args[0]
23381
23382
23383
23384 for {
23385 c := auxIntToInt32(v.AuxInt)
23386 if v_0.Op != OpAMD64MOVQconst {
23387 break
23388 }
23389 d := auxIntToInt64(v_0.AuxInt)
23390 if !(int64(c) == d && c == 0) {
23391 break
23392 }
23393 v.reset(OpAMD64FlagEQ)
23394 return true
23395 }
23396
23397
23398
23399 for {
23400 c := auxIntToInt32(v.AuxInt)
23401 if v_0.Op != OpAMD64MOVQconst {
23402 break
23403 }
23404 d := auxIntToInt64(v_0.AuxInt)
23405 if !(int64(c) == d && c < 0) {
23406 break
23407 }
23408 v.reset(OpAMD64FlagLT_UGT)
23409 return true
23410 }
23411
23412
23413
23414 for {
23415 c := auxIntToInt32(v.AuxInt)
23416 if v_0.Op != OpAMD64MOVQconst {
23417 break
23418 }
23419 d := auxIntToInt64(v_0.AuxInt)
23420 if !(int64(c) == d && c > 0) {
23421 break
23422 }
23423 v.reset(OpAMD64FlagGT_UGT)
23424 return true
23425 }
23426
23427
23428
23429 for {
23430 if auxIntToInt32(v.AuxInt) != -1 {
23431 break
23432 }
23433 x := v_0
23434 if !(x.Op != OpAMD64MOVQconst) {
23435 break
23436 }
23437 v.reset(OpAMD64TESTQ)
23438 v.AddArg2(x, x)
23439 return true
23440 }
23441 return false
23442 }
23443 func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
23444 v_1 := v.Args[1]
23445 v_0 := v.Args[0]
23446 b := v.Block
23447
23448
23449 for {
23450 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23451 if v_0.Op != OpAMD64MOVLconst {
23452 continue
23453 }
23454 c := auxIntToInt32(v_0.AuxInt)
23455 x := v_1
23456 v.reset(OpAMD64TESTWconst)
23457 v.AuxInt = int16ToAuxInt(int16(c))
23458 v.AddArg(x)
23459 return true
23460 }
23461 break
23462 }
23463
23464
23465
23466 for {
23467 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23468 l := v_0
23469 if l.Op != OpAMD64MOVWload {
23470 continue
23471 }
23472 off := auxIntToInt32(l.AuxInt)
23473 sym := auxToSym(l.Aux)
23474 mem := l.Args[1]
23475 ptr := l.Args[0]
23476 l2 := v_1
23477 if !(l == l2 && l.Uses == 2 && clobber(l)) {
23478 continue
23479 }
23480 b = l.Block
23481 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
23482 v.copyOf(v0)
23483 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
23484 v0.Aux = symToAux(sym)
23485 v0.AddArg2(ptr, mem)
23486 return true
23487 }
23488 break
23489 }
23490 return false
23491 }
23492 func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
23493 v_0 := v.Args[0]
23494
23495
23496
23497 for {
23498 if auxIntToInt16(v.AuxInt) != -1 {
23499 break
23500 }
23501 x := v_0
23502 if !(x.Op != OpAMD64MOVLconst) {
23503 break
23504 }
23505 v.reset(OpAMD64TESTW)
23506 v.AddArg2(x, x)
23507 return true
23508 }
23509 return false
23510 }
23511 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
23512 v_2 := v.Args[2]
23513 v_1 := v.Args[1]
23514 v_0 := v.Args[0]
23515
23516
23517
23518 for {
23519 off1 := auxIntToInt32(v.AuxInt)
23520 sym := auxToSym(v.Aux)
23521 val := v_0
23522 if v_1.Op != OpAMD64ADDQconst {
23523 break
23524 }
23525 off2 := auxIntToInt32(v_1.AuxInt)
23526 ptr := v_1.Args[0]
23527 mem := v_2
23528 if !(is32Bit(int64(off1) + int64(off2))) {
23529 break
23530 }
23531 v.reset(OpAMD64XADDLlock)
23532 v.AuxInt = int32ToAuxInt(off1 + off2)
23533 v.Aux = symToAux(sym)
23534 v.AddArg3(val, ptr, mem)
23535 return true
23536 }
23537 return false
23538 }
23539 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
23540 v_2 := v.Args[2]
23541 v_1 := v.Args[1]
23542 v_0 := v.Args[0]
23543
23544
23545
23546 for {
23547 off1 := auxIntToInt32(v.AuxInt)
23548 sym := auxToSym(v.Aux)
23549 val := v_0
23550 if v_1.Op != OpAMD64ADDQconst {
23551 break
23552 }
23553 off2 := auxIntToInt32(v_1.AuxInt)
23554 ptr := v_1.Args[0]
23555 mem := v_2
23556 if !(is32Bit(int64(off1) + int64(off2))) {
23557 break
23558 }
23559 v.reset(OpAMD64XADDQlock)
23560 v.AuxInt = int32ToAuxInt(off1 + off2)
23561 v.Aux = symToAux(sym)
23562 v.AddArg3(val, ptr, mem)
23563 return true
23564 }
23565 return false
23566 }
23567 func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
23568 v_2 := v.Args[2]
23569 v_1 := v.Args[1]
23570 v_0 := v.Args[0]
23571
23572
23573
23574 for {
23575 off1 := auxIntToInt32(v.AuxInt)
23576 sym := auxToSym(v.Aux)
23577 val := v_0
23578 if v_1.Op != OpAMD64ADDQconst {
23579 break
23580 }
23581 off2 := auxIntToInt32(v_1.AuxInt)
23582 ptr := v_1.Args[0]
23583 mem := v_2
23584 if !(is32Bit(int64(off1) + int64(off2))) {
23585 break
23586 }
23587 v.reset(OpAMD64XCHGL)
23588 v.AuxInt = int32ToAuxInt(off1 + off2)
23589 v.Aux = symToAux(sym)
23590 v.AddArg3(val, ptr, mem)
23591 return true
23592 }
23593
23594
23595
23596 for {
23597 off1 := auxIntToInt32(v.AuxInt)
23598 sym1 := auxToSym(v.Aux)
23599 val := v_0
23600 if v_1.Op != OpAMD64LEAQ {
23601 break
23602 }
23603 off2 := auxIntToInt32(v_1.AuxInt)
23604 sym2 := auxToSym(v_1.Aux)
23605 ptr := v_1.Args[0]
23606 mem := v_2
23607 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
23608 break
23609 }
23610 v.reset(OpAMD64XCHGL)
23611 v.AuxInt = int32ToAuxInt(off1 + off2)
23612 v.Aux = symToAux(mergeSym(sym1, sym2))
23613 v.AddArg3(val, ptr, mem)
23614 return true
23615 }
23616 return false
23617 }
23618 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
23619 v_2 := v.Args[2]
23620 v_1 := v.Args[1]
23621 v_0 := v.Args[0]
23622
23623
23624
23625 for {
23626 off1 := auxIntToInt32(v.AuxInt)
23627 sym := auxToSym(v.Aux)
23628 val := v_0
23629 if v_1.Op != OpAMD64ADDQconst {
23630 break
23631 }
23632 off2 := auxIntToInt32(v_1.AuxInt)
23633 ptr := v_1.Args[0]
23634 mem := v_2
23635 if !(is32Bit(int64(off1) + int64(off2))) {
23636 break
23637 }
23638 v.reset(OpAMD64XCHGQ)
23639 v.AuxInt = int32ToAuxInt(off1 + off2)
23640 v.Aux = symToAux(sym)
23641 v.AddArg3(val, ptr, mem)
23642 return true
23643 }
23644
23645
23646
23647 for {
23648 off1 := auxIntToInt32(v.AuxInt)
23649 sym1 := auxToSym(v.Aux)
23650 val := v_0
23651 if v_1.Op != OpAMD64LEAQ {
23652 break
23653 }
23654 off2 := auxIntToInt32(v_1.AuxInt)
23655 sym2 := auxToSym(v_1.Aux)
23656 ptr := v_1.Args[0]
23657 mem := v_2
23658 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
23659 break
23660 }
23661 v.reset(OpAMD64XCHGQ)
23662 v.AuxInt = int32ToAuxInt(off1 + off2)
23663 v.Aux = symToAux(mergeSym(sym1, sym2))
23664 v.AddArg3(val, ptr, mem)
23665 return true
23666 }
23667 return false
23668 }
23669 func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
23670 v_1 := v.Args[1]
23671 v_0 := v.Args[0]
23672
23673
23674 for {
23675 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23676 if v_0.Op != OpAMD64SHLL {
23677 continue
23678 }
23679 y := v_0.Args[1]
23680 v_0_0 := v_0.Args[0]
23681 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
23682 continue
23683 }
23684 x := v_1
23685 v.reset(OpAMD64BTCL)
23686 v.AddArg2(x, y)
23687 return true
23688 }
23689 break
23690 }
23691
23692
23693 for {
23694 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23695 x := v_0
23696 if v_1.Op != OpAMD64MOVLconst {
23697 continue
23698 }
23699 c := auxIntToInt32(v_1.AuxInt)
23700 v.reset(OpAMD64XORLconst)
23701 v.AuxInt = int32ToAuxInt(c)
23702 v.AddArg(x)
23703 return true
23704 }
23705 break
23706 }
23707
23708
23709 for {
23710 x := v_0
23711 if x != v_1 {
23712 break
23713 }
23714 v.reset(OpAMD64MOVLconst)
23715 v.AuxInt = int32ToAuxInt(0)
23716 return true
23717 }
23718
23719
23720
23721 for {
23722 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23723 x := v_0
23724 l := v_1
23725 if l.Op != OpAMD64MOVLload {
23726 continue
23727 }
23728 off := auxIntToInt32(l.AuxInt)
23729 sym := auxToSym(l.Aux)
23730 mem := l.Args[1]
23731 ptr := l.Args[0]
23732 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
23733 continue
23734 }
23735 v.reset(OpAMD64XORLload)
23736 v.AuxInt = int32ToAuxInt(off)
23737 v.Aux = symToAux(sym)
23738 v.AddArg3(x, ptr, mem)
23739 return true
23740 }
23741 break
23742 }
23743
23744
23745
23746 for {
23747 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23748 x := v_0
23749 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
23750 continue
23751 }
23752 v.reset(OpAMD64BLSMSKL)
23753 v.AddArg(x)
23754 return true
23755 }
23756 break
23757 }
23758 return false
23759 }
23760 func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
23761 v_0 := v.Args[0]
23762
23763
23764 for {
23765 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE {
23766 break
23767 }
23768 x := v_0.Args[0]
23769 v.reset(OpAMD64SETEQ)
23770 v.AddArg(x)
23771 return true
23772 }
23773
23774
23775 for {
23776 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ {
23777 break
23778 }
23779 x := v_0.Args[0]
23780 v.reset(OpAMD64SETNE)
23781 v.AddArg(x)
23782 return true
23783 }
23784
23785
23786 for {
23787 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL {
23788 break
23789 }
23790 x := v_0.Args[0]
23791 v.reset(OpAMD64SETGE)
23792 v.AddArg(x)
23793 return true
23794 }
23795
23796
23797 for {
23798 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE {
23799 break
23800 }
23801 x := v_0.Args[0]
23802 v.reset(OpAMD64SETL)
23803 v.AddArg(x)
23804 return true
23805 }
23806
23807
23808 for {
23809 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE {
23810 break
23811 }
23812 x := v_0.Args[0]
23813 v.reset(OpAMD64SETG)
23814 v.AddArg(x)
23815 return true
23816 }
23817
23818
23819 for {
23820 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG {
23821 break
23822 }
23823 x := v_0.Args[0]
23824 v.reset(OpAMD64SETLE)
23825 v.AddArg(x)
23826 return true
23827 }
23828
23829
23830 for {
23831 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB {
23832 break
23833 }
23834 x := v_0.Args[0]
23835 v.reset(OpAMD64SETAE)
23836 v.AddArg(x)
23837 return true
23838 }
23839
23840
23841 for {
23842 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE {
23843 break
23844 }
23845 x := v_0.Args[0]
23846 v.reset(OpAMD64SETB)
23847 v.AddArg(x)
23848 return true
23849 }
23850
23851
23852 for {
23853 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE {
23854 break
23855 }
23856 x := v_0.Args[0]
23857 v.reset(OpAMD64SETA)
23858 v.AddArg(x)
23859 return true
23860 }
23861
23862
23863 for {
23864 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA {
23865 break
23866 }
23867 x := v_0.Args[0]
23868 v.reset(OpAMD64SETBE)
23869 v.AddArg(x)
23870 return true
23871 }
23872
23873
23874 for {
23875 c := auxIntToInt32(v.AuxInt)
23876 if v_0.Op != OpAMD64XORLconst {
23877 break
23878 }
23879 d := auxIntToInt32(v_0.AuxInt)
23880 x := v_0.Args[0]
23881 v.reset(OpAMD64XORLconst)
23882 v.AuxInt = int32ToAuxInt(c ^ d)
23883 v.AddArg(x)
23884 return true
23885 }
23886
23887
23888
23889 for {
23890 c := auxIntToInt32(v.AuxInt)
23891 x := v_0
23892 if !(c == 0) {
23893 break
23894 }
23895 v.copyOf(x)
23896 return true
23897 }
23898
23899
23900 for {
23901 c := auxIntToInt32(v.AuxInt)
23902 if v_0.Op != OpAMD64MOVLconst {
23903 break
23904 }
23905 d := auxIntToInt32(v_0.AuxInt)
23906 v.reset(OpAMD64MOVLconst)
23907 v.AuxInt = int32ToAuxInt(c ^ d)
23908 return true
23909 }
23910 return false
23911 }
23912 func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
23913 v_1 := v.Args[1]
23914 v_0 := v.Args[0]
23915
23916
23917
23918 for {
23919 valoff1 := auxIntToValAndOff(v.AuxInt)
23920 sym := auxToSym(v.Aux)
23921 if v_0.Op != OpAMD64ADDQconst {
23922 break
23923 }
23924 off2 := auxIntToInt32(v_0.AuxInt)
23925 base := v_0.Args[0]
23926 mem := v_1
23927 if !(ValAndOff(valoff1).canAdd32(off2)) {
23928 break
23929 }
23930 v.reset(OpAMD64XORLconstmodify)
23931 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23932 v.Aux = symToAux(sym)
23933 v.AddArg2(base, mem)
23934 return true
23935 }
23936
23937
23938
23939 for {
23940 valoff1 := auxIntToValAndOff(v.AuxInt)
23941 sym1 := auxToSym(v.Aux)
23942 if v_0.Op != OpAMD64LEAQ {
23943 break
23944 }
23945 off2 := auxIntToInt32(v_0.AuxInt)
23946 sym2 := auxToSym(v_0.Aux)
23947 base := v_0.Args[0]
23948 mem := v_1
23949 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
23950 break
23951 }
23952 v.reset(OpAMD64XORLconstmodify)
23953 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23954 v.Aux = symToAux(mergeSym(sym1, sym2))
23955 v.AddArg2(base, mem)
23956 return true
23957 }
23958 return false
23959 }
23960 func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
23961 v_2 := v.Args[2]
23962 v_1 := v.Args[1]
23963 v_0 := v.Args[0]
23964 b := v.Block
23965 typ := &b.Func.Config.Types
23966
23967
23968
23969 for {
23970 off1 := auxIntToInt32(v.AuxInt)
23971 sym := auxToSym(v.Aux)
23972 val := v_0
23973 if v_1.Op != OpAMD64ADDQconst {
23974 break
23975 }
23976 off2 := auxIntToInt32(v_1.AuxInt)
23977 base := v_1.Args[0]
23978 mem := v_2
23979 if !(is32Bit(int64(off1) + int64(off2))) {
23980 break
23981 }
23982 v.reset(OpAMD64XORLload)
23983 v.AuxInt = int32ToAuxInt(off1 + off2)
23984 v.Aux = symToAux(sym)
23985 v.AddArg3(val, base, mem)
23986 return true
23987 }
23988
23989
23990
23991 for {
23992 off1 := auxIntToInt32(v.AuxInt)
23993 sym1 := auxToSym(v.Aux)
23994 val := v_0
23995 if v_1.Op != OpAMD64LEAQ {
23996 break
23997 }
23998 off2 := auxIntToInt32(v_1.AuxInt)
23999 sym2 := auxToSym(v_1.Aux)
24000 base := v_1.Args[0]
24001 mem := v_2
24002 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
24003 break
24004 }
24005 v.reset(OpAMD64XORLload)
24006 v.AuxInt = int32ToAuxInt(off1 + off2)
24007 v.Aux = symToAux(mergeSym(sym1, sym2))
24008 v.AddArg3(val, base, mem)
24009 return true
24010 }
24011
24012
24013 for {
24014 off := auxIntToInt32(v.AuxInt)
24015 sym := auxToSym(v.Aux)
24016 x := v_0
24017 ptr := v_1
24018 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
24019 break
24020 }
24021 y := v_2.Args[1]
24022 if ptr != v_2.Args[0] {
24023 break
24024 }
24025 v.reset(OpAMD64XORL)
24026 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
24027 v0.AddArg(y)
24028 v.AddArg2(x, v0)
24029 return true
24030 }
24031 return false
24032 }
24033 func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
24034 v_2 := v.Args[2]
24035 v_1 := v.Args[1]
24036 v_0 := v.Args[0]
24037
24038
24039
24040 for {
24041 off1 := auxIntToInt32(v.AuxInt)
24042 sym := auxToSym(v.Aux)
24043 if v_0.Op != OpAMD64ADDQconst {
24044 break
24045 }
24046 off2 := auxIntToInt32(v_0.AuxInt)
24047 base := v_0.Args[0]
24048 val := v_1
24049 mem := v_2
24050 if !(is32Bit(int64(off1) + int64(off2))) {
24051 break
24052 }
24053 v.reset(OpAMD64XORLmodify)
24054 v.AuxInt = int32ToAuxInt(off1 + off2)
24055 v.Aux = symToAux(sym)
24056 v.AddArg3(base, val, mem)
24057 return true
24058 }
24059
24060
24061
24062 for {
24063 off1 := auxIntToInt32(v.AuxInt)
24064 sym1 := auxToSym(v.Aux)
24065 if v_0.Op != OpAMD64LEAQ {
24066 break
24067 }
24068 off2 := auxIntToInt32(v_0.AuxInt)
24069 sym2 := auxToSym(v_0.Aux)
24070 base := v_0.Args[0]
24071 val := v_1
24072 mem := v_2
24073 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
24074 break
24075 }
24076 v.reset(OpAMD64XORLmodify)
24077 v.AuxInt = int32ToAuxInt(off1 + off2)
24078 v.Aux = symToAux(mergeSym(sym1, sym2))
24079 v.AddArg3(base, val, mem)
24080 return true
24081 }
24082 return false
24083 }
24084 func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
24085 v_1 := v.Args[1]
24086 v_0 := v.Args[0]
24087
24088
24089 for {
24090 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
24091 if v_0.Op != OpAMD64SHLQ {
24092 continue
24093 }
24094 y := v_0.Args[1]
24095 v_0_0 := v_0.Args[0]
24096 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
24097 continue
24098 }
24099 x := v_1
24100 v.reset(OpAMD64BTCQ)
24101 v.AddArg2(x, y)
24102 return true
24103 }
24104 break
24105 }
24106
24107
24108
24109 for {
24110 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
24111 if v_0.Op != OpAMD64MOVQconst {
24112 continue
24113 }
24114 c := auxIntToInt64(v_0.AuxInt)
24115 x := v_1
24116 if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
24117 continue
24118 }
24119 v.reset(OpAMD64BTCQconst)
24120 v.AuxInt = int8ToAuxInt(int8(log64(c)))
24121 v.AddArg(x)
24122 return true
24123 }
24124 break
24125 }
24126
24127
24128
24129 for {
24130 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
24131 x := v_0
24132 if v_1.Op != OpAMD64MOVQconst {
24133 continue
24134 }
24135 c := auxIntToInt64(v_1.AuxInt)
24136 if !(is32Bit(c)) {
24137 continue
24138 }
24139 v.reset(OpAMD64XORQconst)
24140 v.AuxInt = int32ToAuxInt(int32(c))
24141 v.AddArg(x)
24142 return true
24143 }
24144 break
24145 }
24146
24147
24148 for {
24149 x := v_0
24150 if x != v_1 {
24151 break
24152 }
24153 v.reset(OpAMD64MOVQconst)
24154 v.AuxInt = int64ToAuxInt(0)
24155 return true
24156 }
24157
24158
24159
24160 for {
24161 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
24162 x := v_0
24163 l := v_1
24164 if l.Op != OpAMD64MOVQload {
24165 continue
24166 }
24167 off := auxIntToInt32(l.AuxInt)
24168 sym := auxToSym(l.Aux)
24169 mem := l.Args[1]
24170 ptr := l.Args[0]
24171 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
24172 continue
24173 }
24174 v.reset(OpAMD64XORQload)
24175 v.AuxInt = int32ToAuxInt(off)
24176 v.Aux = symToAux(sym)
24177 v.AddArg3(x, ptr, mem)
24178 return true
24179 }
24180 break
24181 }
24182
24183
24184
24185 for {
24186 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
24187 x := v_0
24188 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
24189 continue
24190 }
24191 v.reset(OpAMD64BLSMSKQ)
24192 v.AddArg(x)
24193 return true
24194 }
24195 break
24196 }
24197 return false
24198 }
24199 func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
24200 v_0 := v.Args[0]
24201
24202
24203 for {
24204 c := auxIntToInt32(v.AuxInt)
24205 if v_0.Op != OpAMD64XORQconst {
24206 break
24207 }
24208 d := auxIntToInt32(v_0.AuxInt)
24209 x := v_0.Args[0]
24210 v.reset(OpAMD64XORQconst)
24211 v.AuxInt = int32ToAuxInt(c ^ d)
24212 v.AddArg(x)
24213 return true
24214 }
24215
24216
24217 for {
24218 if auxIntToInt32(v.AuxInt) != 0 {
24219 break
24220 }
24221 x := v_0
24222 v.copyOf(x)
24223 return true
24224 }
24225
24226
24227 for {
24228 c := auxIntToInt32(v.AuxInt)
24229 if v_0.Op != OpAMD64MOVQconst {
24230 break
24231 }
24232 d := auxIntToInt64(v_0.AuxInt)
24233 v.reset(OpAMD64MOVQconst)
24234 v.AuxInt = int64ToAuxInt(int64(c) ^ d)
24235 return true
24236 }
24237 return false
24238 }
24239 func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
24240 v_1 := v.Args[1]
24241 v_0 := v.Args[0]
24242
24243
24244
24245 for {
24246 valoff1 := auxIntToValAndOff(v.AuxInt)
24247 sym := auxToSym(v.Aux)
24248 if v_0.Op != OpAMD64ADDQconst {
24249 break
24250 }
24251 off2 := auxIntToInt32(v_0.AuxInt)
24252 base := v_0.Args[0]
24253 mem := v_1
24254 if !(ValAndOff(valoff1).canAdd32(off2)) {
24255 break
24256 }
24257 v.reset(OpAMD64XORQconstmodify)
24258 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
24259 v.Aux = symToAux(sym)
24260 v.AddArg2(base, mem)
24261 return true
24262 }
24263
24264
24265
24266 for {
24267 valoff1 := auxIntToValAndOff(v.AuxInt)
24268 sym1 := auxToSym(v.Aux)
24269 if v_0.Op != OpAMD64LEAQ {
24270 break
24271 }
24272 off2 := auxIntToInt32(v_0.AuxInt)
24273 sym2 := auxToSym(v_0.Aux)
24274 base := v_0.Args[0]
24275 mem := v_1
24276 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
24277 break
24278 }
24279 v.reset(OpAMD64XORQconstmodify)
24280 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
24281 v.Aux = symToAux(mergeSym(sym1, sym2))
24282 v.AddArg2(base, mem)
24283 return true
24284 }
24285 return false
24286 }
24287 func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
24288 v_2 := v.Args[2]
24289 v_1 := v.Args[1]
24290 v_0 := v.Args[0]
24291 b := v.Block
24292 typ := &b.Func.Config.Types
24293
24294
24295
24296 for {
24297 off1 := auxIntToInt32(v.AuxInt)
24298 sym := auxToSym(v.Aux)
24299 val := v_0
24300 if v_1.Op != OpAMD64ADDQconst {
24301 break
24302 }
24303 off2 := auxIntToInt32(v_1.AuxInt)
24304 base := v_1.Args[0]
24305 mem := v_2
24306 if !(is32Bit(int64(off1) + int64(off2))) {
24307 break
24308 }
24309 v.reset(OpAMD64XORQload)
24310 v.AuxInt = int32ToAuxInt(off1 + off2)
24311 v.Aux = symToAux(sym)
24312 v.AddArg3(val, base, mem)
24313 return true
24314 }
24315
24316
24317
24318 for {
24319 off1 := auxIntToInt32(v.AuxInt)
24320 sym1 := auxToSym(v.Aux)
24321 val := v_0
24322 if v_1.Op != OpAMD64LEAQ {
24323 break
24324 }
24325 off2 := auxIntToInt32(v_1.AuxInt)
24326 sym2 := auxToSym(v_1.Aux)
24327 base := v_1.Args[0]
24328 mem := v_2
24329 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
24330 break
24331 }
24332 v.reset(OpAMD64XORQload)
24333 v.AuxInt = int32ToAuxInt(off1 + off2)
24334 v.Aux = symToAux(mergeSym(sym1, sym2))
24335 v.AddArg3(val, base, mem)
24336 return true
24337 }
24338
24339
24340 for {
24341 off := auxIntToInt32(v.AuxInt)
24342 sym := auxToSym(v.Aux)
24343 x := v_0
24344 ptr := v_1
24345 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
24346 break
24347 }
24348 y := v_2.Args[1]
24349 if ptr != v_2.Args[0] {
24350 break
24351 }
24352 v.reset(OpAMD64XORQ)
24353 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
24354 v0.AddArg(y)
24355 v.AddArg2(x, v0)
24356 return true
24357 }
24358 return false
24359 }
24360 func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
24361 v_2 := v.Args[2]
24362 v_1 := v.Args[1]
24363 v_0 := v.Args[0]
24364
24365
24366
24367 for {
24368 off1 := auxIntToInt32(v.AuxInt)
24369 sym := auxToSym(v.Aux)
24370 if v_0.Op != OpAMD64ADDQconst {
24371 break
24372 }
24373 off2 := auxIntToInt32(v_0.AuxInt)
24374 base := v_0.Args[0]
24375 val := v_1
24376 mem := v_2
24377 if !(is32Bit(int64(off1) + int64(off2))) {
24378 break
24379 }
24380 v.reset(OpAMD64XORQmodify)
24381 v.AuxInt = int32ToAuxInt(off1 + off2)
24382 v.Aux = symToAux(sym)
24383 v.AddArg3(base, val, mem)
24384 return true
24385 }
24386
24387
24388
24389 for {
24390 off1 := auxIntToInt32(v.AuxInt)
24391 sym1 := auxToSym(v.Aux)
24392 if v_0.Op != OpAMD64LEAQ {
24393 break
24394 }
24395 off2 := auxIntToInt32(v_0.AuxInt)
24396 sym2 := auxToSym(v_0.Aux)
24397 base := v_0.Args[0]
24398 val := v_1
24399 mem := v_2
24400 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
24401 break
24402 }
24403 v.reset(OpAMD64XORQmodify)
24404 v.AuxInt = int32ToAuxInt(off1 + off2)
24405 v.Aux = symToAux(mergeSym(sym1, sym2))
24406 v.AddArg3(base, val, mem)
24407 return true
24408 }
24409 return false
24410 }
24411 func rewriteValueAMD64_OpAddr(v *Value) bool {
24412 v_0 := v.Args[0]
24413
24414
24415 for {
24416 sym := auxToSym(v.Aux)
24417 base := v_0
24418 v.reset(OpAMD64LEAQ)
24419 v.Aux = symToAux(sym)
24420 v.AddArg(base)
24421 return true
24422 }
24423 }
24424 func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
24425 v_2 := v.Args[2]
24426 v_1 := v.Args[1]
24427 v_0 := v.Args[0]
24428 b := v.Block
24429 typ := &b.Func.Config.Types
24430
24431
24432 for {
24433 ptr := v_0
24434 val := v_1
24435 mem := v_2
24436 v.reset(OpAMD64AddTupleFirst32)
24437 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
24438 v0.AddArg3(val, ptr, mem)
24439 v.AddArg2(val, v0)
24440 return true
24441 }
24442 }
24443 func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
24444 v_2 := v.Args[2]
24445 v_1 := v.Args[1]
24446 v_0 := v.Args[0]
24447 b := v.Block
24448 typ := &b.Func.Config.Types
24449
24450
24451 for {
24452 ptr := v_0
24453 val := v_1
24454 mem := v_2
24455 v.reset(OpAMD64AddTupleFirst64)
24456 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
24457 v0.AddArg3(val, ptr, mem)
24458 v.AddArg2(val, v0)
24459 return true
24460 }
24461 }
24462 func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool {
24463 v_2 := v.Args[2]
24464 v_1 := v.Args[1]
24465 v_0 := v.Args[0]
24466
24467
24468 for {
24469 ptr := v_0
24470 val := v_1
24471 mem := v_2
24472 v.reset(OpAMD64ANDLlock)
24473 v.AddArg3(ptr, val, mem)
24474 return true
24475 }
24476 }
24477 func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool {
24478 v_2 := v.Args[2]
24479 v_1 := v.Args[1]
24480 v_0 := v.Args[0]
24481
24482
24483 for {
24484 ptr := v_0
24485 val := v_1
24486 mem := v_2
24487 v.reset(OpAMD64LoweredAtomicAnd32)
24488 v.AddArg3(ptr, val, mem)
24489 return true
24490 }
24491 }
24492 func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool {
24493 v_2 := v.Args[2]
24494 v_1 := v.Args[1]
24495 v_0 := v.Args[0]
24496
24497
24498 for {
24499 ptr := v_0
24500 val := v_1
24501 mem := v_2
24502 v.reset(OpAMD64LoweredAtomicAnd64)
24503 v.AddArg3(ptr, val, mem)
24504 return true
24505 }
24506 }
24507 func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
24508 v_2 := v.Args[2]
24509 v_1 := v.Args[1]
24510 v_0 := v.Args[0]
24511
24512
24513 for {
24514 ptr := v_0
24515 val := v_1
24516 mem := v_2
24517 v.reset(OpAMD64ANDBlock)
24518 v.AddArg3(ptr, val, mem)
24519 return true
24520 }
24521 }
24522 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool {
24523 v_3 := v.Args[3]
24524 v_2 := v.Args[2]
24525 v_1 := v.Args[1]
24526 v_0 := v.Args[0]
24527
24528
24529 for {
24530 ptr := v_0
24531 old := v_1
24532 new_ := v_2
24533 mem := v_3
24534 v.reset(OpAMD64CMPXCHGLlock)
24535 v.AddArg4(ptr, old, new_, mem)
24536 return true
24537 }
24538 }
24539 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool {
24540 v_3 := v.Args[3]
24541 v_2 := v.Args[2]
24542 v_1 := v.Args[1]
24543 v_0 := v.Args[0]
24544
24545
24546 for {
24547 ptr := v_0
24548 old := v_1
24549 new_ := v_2
24550 mem := v_3
24551 v.reset(OpAMD64CMPXCHGQlock)
24552 v.AddArg4(ptr, old, new_, mem)
24553 return true
24554 }
24555 }
24556 func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
24557 v_2 := v.Args[2]
24558 v_1 := v.Args[1]
24559 v_0 := v.Args[0]
24560
24561
24562 for {
24563 ptr := v_0
24564 val := v_1
24565 mem := v_2
24566 v.reset(OpAMD64XCHGL)
24567 v.AddArg3(val, ptr, mem)
24568 return true
24569 }
24570 }
24571 func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
24572 v_2 := v.Args[2]
24573 v_1 := v.Args[1]
24574 v_0 := v.Args[0]
24575
24576
24577 for {
24578 ptr := v_0
24579 val := v_1
24580 mem := v_2
24581 v.reset(OpAMD64XCHGQ)
24582 v.AddArg3(val, ptr, mem)
24583 return true
24584 }
24585 }
24586 func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool {
24587 v_2 := v.Args[2]
24588 v_1 := v.Args[1]
24589 v_0 := v.Args[0]
24590
24591
24592 for {
24593 ptr := v_0
24594 val := v_1
24595 mem := v_2
24596 v.reset(OpAMD64XCHGB)
24597 v.AddArg3(val, ptr, mem)
24598 return true
24599 }
24600 }
24601 func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool {
24602 v_1 := v.Args[1]
24603 v_0 := v.Args[0]
24604
24605
24606 for {
24607 ptr := v_0
24608 mem := v_1
24609 v.reset(OpAMD64MOVLatomicload)
24610 v.AddArg2(ptr, mem)
24611 return true
24612 }
24613 }
24614 func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool {
24615 v_1 := v.Args[1]
24616 v_0 := v.Args[0]
24617
24618
24619 for {
24620 ptr := v_0
24621 mem := v_1
24622 v.reset(OpAMD64MOVQatomicload)
24623 v.AddArg2(ptr, mem)
24624 return true
24625 }
24626 }
24627 func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool {
24628 v_1 := v.Args[1]
24629 v_0 := v.Args[0]
24630
24631
24632 for {
24633 ptr := v_0
24634 mem := v_1
24635 v.reset(OpAMD64MOVBatomicload)
24636 v.AddArg2(ptr, mem)
24637 return true
24638 }
24639 }
24640 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
24641 v_1 := v.Args[1]
24642 v_0 := v.Args[0]
24643
24644
24645 for {
24646 ptr := v_0
24647 mem := v_1
24648 v.reset(OpAMD64MOVQatomicload)
24649 v.AddArg2(ptr, mem)
24650 return true
24651 }
24652 }
24653 func rewriteValueAMD64_OpAtomicOr32(v *Value) bool {
24654 v_2 := v.Args[2]
24655 v_1 := v.Args[1]
24656 v_0 := v.Args[0]
24657
24658
24659 for {
24660 ptr := v_0
24661 val := v_1
24662 mem := v_2
24663 v.reset(OpAMD64ORLlock)
24664 v.AddArg3(ptr, val, mem)
24665 return true
24666 }
24667 }
24668 func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool {
24669 v_2 := v.Args[2]
24670 v_1 := v.Args[1]
24671 v_0 := v.Args[0]
24672
24673
24674 for {
24675 ptr := v_0
24676 val := v_1
24677 mem := v_2
24678 v.reset(OpAMD64LoweredAtomicOr32)
24679 v.AddArg3(ptr, val, mem)
24680 return true
24681 }
24682 }
24683 func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool {
24684 v_2 := v.Args[2]
24685 v_1 := v.Args[1]
24686 v_0 := v.Args[0]
24687
24688
24689 for {
24690 ptr := v_0
24691 val := v_1
24692 mem := v_2
24693 v.reset(OpAMD64LoweredAtomicOr64)
24694 v.AddArg3(ptr, val, mem)
24695 return true
24696 }
24697 }
24698 func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
24699 v_2 := v.Args[2]
24700 v_1 := v.Args[1]
24701 v_0 := v.Args[0]
24702
24703
24704 for {
24705 ptr := v_0
24706 val := v_1
24707 mem := v_2
24708 v.reset(OpAMD64ORBlock)
24709 v.AddArg3(ptr, val, mem)
24710 return true
24711 }
24712 }
24713 func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
24714 v_2 := v.Args[2]
24715 v_1 := v.Args[1]
24716 v_0 := v.Args[0]
24717 b := v.Block
24718 typ := &b.Func.Config.Types
24719
24720
24721 for {
24722 ptr := v_0
24723 val := v_1
24724 mem := v_2
24725 v.reset(OpSelect1)
24726 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
24727 v0.AddArg3(val, ptr, mem)
24728 v.AddArg(v0)
24729 return true
24730 }
24731 }
24732 func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
24733 v_2 := v.Args[2]
24734 v_1 := v.Args[1]
24735 v_0 := v.Args[0]
24736 b := v.Block
24737 typ := &b.Func.Config.Types
24738
24739
24740 for {
24741 ptr := v_0
24742 val := v_1
24743 mem := v_2
24744 v.reset(OpSelect1)
24745 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
24746 v0.AddArg3(val, ptr, mem)
24747 v.AddArg(v0)
24748 return true
24749 }
24750 }
24751 func rewriteValueAMD64_OpAtomicStore8(v *Value) bool {
24752 v_2 := v.Args[2]
24753 v_1 := v.Args[1]
24754 v_0 := v.Args[0]
24755 b := v.Block
24756 typ := &b.Func.Config.Types
24757
24758
24759 for {
24760 ptr := v_0
24761 val := v_1
24762 mem := v_2
24763 v.reset(OpSelect1)
24764 v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem))
24765 v0.AddArg3(val, ptr, mem)
24766 v.AddArg(v0)
24767 return true
24768 }
24769 }
24770 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
24771 v_2 := v.Args[2]
24772 v_1 := v.Args[1]
24773 v_0 := v.Args[0]
24774 b := v.Block
24775 typ := &b.Func.Config.Types
24776
24777
24778 for {
24779 ptr := v_0
24780 val := v_1
24781 mem := v_2
24782 v.reset(OpSelect1)
24783 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
24784 v0.AddArg3(val, ptr, mem)
24785 v.AddArg(v0)
24786 return true
24787 }
24788 }
24789 func rewriteValueAMD64_OpBitLen16(v *Value) bool {
24790 v_0 := v.Args[0]
24791 b := v.Block
24792 typ := &b.Func.Config.Types
24793
24794
24795
24796 for {
24797 x := v_0
24798 if !(buildcfg.GOAMD64 < 3) {
24799 break
24800 }
24801 v.reset(OpAMD64BSRL)
24802 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
24803 v0.AuxInt = int32ToAuxInt(1)
24804 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
24805 v1.AddArg(x)
24806 v0.AddArg2(v1, v1)
24807 v.AddArg(v0)
24808 return true
24809 }
24810
24811
24812
24813 for {
24814 t := v.Type
24815 x := v_0
24816 if !(buildcfg.GOAMD64 >= 3) {
24817 break
24818 }
24819 v.reset(OpAMD64NEGQ)
24820 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24821 v0.AuxInt = int32ToAuxInt(-32)
24822 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24823 v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type)
24824 v2.AddArg(x)
24825 v1.AddArg(v2)
24826 v0.AddArg(v1)
24827 v.AddArg(v0)
24828 return true
24829 }
24830 return false
24831 }
24832 func rewriteValueAMD64_OpBitLen32(v *Value) bool {
24833 v_0 := v.Args[0]
24834 b := v.Block
24835 typ := &b.Func.Config.Types
24836
24837
24838
24839 for {
24840 x := v_0
24841 if !(buildcfg.GOAMD64 < 3) {
24842 break
24843 }
24844 v.reset(OpSelect0)
24845 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
24846 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
24847 v1.AuxInt = int32ToAuxInt(1)
24848 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
24849 v2.AddArg(x)
24850 v1.AddArg2(v2, v2)
24851 v0.AddArg(v1)
24852 v.AddArg(v0)
24853 return true
24854 }
24855
24856
24857
24858 for {
24859 t := v.Type
24860 x := v_0
24861 if !(buildcfg.GOAMD64 >= 3) {
24862 break
24863 }
24864 v.reset(OpAMD64NEGQ)
24865 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24866 v0.AuxInt = int32ToAuxInt(-32)
24867 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24868 v1.AddArg(x)
24869 v0.AddArg(v1)
24870 v.AddArg(v0)
24871 return true
24872 }
24873 return false
24874 }
24875 func rewriteValueAMD64_OpBitLen64(v *Value) bool {
24876 v_0 := v.Args[0]
24877 b := v.Block
24878 typ := &b.Func.Config.Types
24879
24880
24881
24882 for {
24883 t := v.Type
24884 x := v_0
24885 if !(buildcfg.GOAMD64 < 3) {
24886 break
24887 }
24888 v.reset(OpAMD64ADDQconst)
24889 v.AuxInt = int32ToAuxInt(1)
24890 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
24891 v1 := b.NewValue0(v.Pos, OpSelect0, t)
24892 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
24893 v2.AddArg(x)
24894 v1.AddArg(v2)
24895 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
24896 v3.AuxInt = int64ToAuxInt(-1)
24897 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
24898 v4.AddArg(v2)
24899 v0.AddArg3(v1, v3, v4)
24900 v.AddArg(v0)
24901 return true
24902 }
24903
24904
24905
24906 for {
24907 t := v.Type
24908 x := v_0
24909 if !(buildcfg.GOAMD64 >= 3) {
24910 break
24911 }
24912 v.reset(OpAMD64NEGQ)
24913 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24914 v0.AuxInt = int32ToAuxInt(-64)
24915 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64)
24916 v1.AddArg(x)
24917 v0.AddArg(v1)
24918 v.AddArg(v0)
24919 return true
24920 }
24921 return false
24922 }
24923 func rewriteValueAMD64_OpBitLen8(v *Value) bool {
24924 v_0 := v.Args[0]
24925 b := v.Block
24926 typ := &b.Func.Config.Types
24927
24928
24929
24930 for {
24931 x := v_0
24932 if !(buildcfg.GOAMD64 < 3) {
24933 break
24934 }
24935 v.reset(OpAMD64BSRL)
24936 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
24937 v0.AuxInt = int32ToAuxInt(1)
24938 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
24939 v1.AddArg(x)
24940 v0.AddArg2(v1, v1)
24941 v.AddArg(v0)
24942 return true
24943 }
24944
24945
24946
24947 for {
24948 t := v.Type
24949 x := v_0
24950 if !(buildcfg.GOAMD64 >= 3) {
24951 break
24952 }
24953 v.reset(OpAMD64NEGQ)
24954 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24955 v0.AuxInt = int32ToAuxInt(-32)
24956 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24957 v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type)
24958 v2.AddArg(x)
24959 v1.AddArg(v2)
24960 v0.AddArg(v1)
24961 v.AddArg(v0)
24962 return true
24963 }
24964 return false
24965 }
24966 func rewriteValueAMD64_OpBswap16(v *Value) bool {
24967 v_0 := v.Args[0]
24968
24969
24970 for {
24971 x := v_0
24972 v.reset(OpAMD64ROLWconst)
24973 v.AuxInt = int8ToAuxInt(8)
24974 v.AddArg(x)
24975 return true
24976 }
24977 }
24978 func rewriteValueAMD64_OpCeil(v *Value) bool {
24979 v_0 := v.Args[0]
24980
24981
24982 for {
24983 x := v_0
24984 v.reset(OpAMD64ROUNDSD)
24985 v.AuxInt = int8ToAuxInt(2)
24986 v.AddArg(x)
24987 return true
24988 }
24989 }
24990 func rewriteValueAMD64_OpCondSelect(v *Value) bool {
24991 v_2 := v.Args[2]
24992 v_1 := v.Args[1]
24993 v_0 := v.Args[0]
24994 b := v.Block
24995 typ := &b.Func.Config.Types
24996
24997
24998
24999 for {
25000 t := v.Type
25001 x := v_0
25002 y := v_1
25003 if v_2.Op != OpAMD64SETEQ {
25004 break
25005 }
25006 cond := v_2.Args[0]
25007 if !(is64BitInt(t) || isPtr(t)) {
25008 break
25009 }
25010 v.reset(OpAMD64CMOVQEQ)
25011 v.AddArg3(y, x, cond)
25012 return true
25013 }
25014
25015
25016
25017 for {
25018 t := v.Type
25019 x := v_0
25020 y := v_1
25021 if v_2.Op != OpAMD64SETNE {
25022 break
25023 }
25024 cond := v_2.Args[0]
25025 if !(is64BitInt(t) || isPtr(t)) {
25026 break
25027 }
25028 v.reset(OpAMD64CMOVQNE)
25029 v.AddArg3(y, x, cond)
25030 return true
25031 }
25032
25033
25034
25035 for {
25036 t := v.Type
25037 x := v_0
25038 y := v_1
25039 if v_2.Op != OpAMD64SETL {
25040 break
25041 }
25042 cond := v_2.Args[0]
25043 if !(is64BitInt(t) || isPtr(t)) {
25044 break
25045 }
25046 v.reset(OpAMD64CMOVQLT)
25047 v.AddArg3(y, x, cond)
25048 return true
25049 }
25050
25051
25052
25053 for {
25054 t := v.Type
25055 x := v_0
25056 y := v_1
25057 if v_2.Op != OpAMD64SETG {
25058 break
25059 }
25060 cond := v_2.Args[0]
25061 if !(is64BitInt(t) || isPtr(t)) {
25062 break
25063 }
25064 v.reset(OpAMD64CMOVQGT)
25065 v.AddArg3(y, x, cond)
25066 return true
25067 }
25068
25069
25070
25071 for {
25072 t := v.Type
25073 x := v_0
25074 y := v_1
25075 if v_2.Op != OpAMD64SETLE {
25076 break
25077 }
25078 cond := v_2.Args[0]
25079 if !(is64BitInt(t) || isPtr(t)) {
25080 break
25081 }
25082 v.reset(OpAMD64CMOVQLE)
25083 v.AddArg3(y, x, cond)
25084 return true
25085 }
25086
25087
25088
25089 for {
25090 t := v.Type
25091 x := v_0
25092 y := v_1
25093 if v_2.Op != OpAMD64SETGE {
25094 break
25095 }
25096 cond := v_2.Args[0]
25097 if !(is64BitInt(t) || isPtr(t)) {
25098 break
25099 }
25100 v.reset(OpAMD64CMOVQGE)
25101 v.AddArg3(y, x, cond)
25102 return true
25103 }
25104
25105
25106
25107 for {
25108 t := v.Type
25109 x := v_0
25110 y := v_1
25111 if v_2.Op != OpAMD64SETA {
25112 break
25113 }
25114 cond := v_2.Args[0]
25115 if !(is64BitInt(t) || isPtr(t)) {
25116 break
25117 }
25118 v.reset(OpAMD64CMOVQHI)
25119 v.AddArg3(y, x, cond)
25120 return true
25121 }
25122
25123
25124
25125 for {
25126 t := v.Type
25127 x := v_0
25128 y := v_1
25129 if v_2.Op != OpAMD64SETB {
25130 break
25131 }
25132 cond := v_2.Args[0]
25133 if !(is64BitInt(t) || isPtr(t)) {
25134 break
25135 }
25136 v.reset(OpAMD64CMOVQCS)
25137 v.AddArg3(y, x, cond)
25138 return true
25139 }
25140
25141
25142
25143 for {
25144 t := v.Type
25145 x := v_0
25146 y := v_1
25147 if v_2.Op != OpAMD64SETAE {
25148 break
25149 }
25150 cond := v_2.Args[0]
25151 if !(is64BitInt(t) || isPtr(t)) {
25152 break
25153 }
25154 v.reset(OpAMD64CMOVQCC)
25155 v.AddArg3(y, x, cond)
25156 return true
25157 }
25158
25159
25160
25161 for {
25162 t := v.Type
25163 x := v_0
25164 y := v_1
25165 if v_2.Op != OpAMD64SETBE {
25166 break
25167 }
25168 cond := v_2.Args[0]
25169 if !(is64BitInt(t) || isPtr(t)) {
25170 break
25171 }
25172 v.reset(OpAMD64CMOVQLS)
25173 v.AddArg3(y, x, cond)
25174 return true
25175 }
25176
25177
25178
25179 for {
25180 t := v.Type
25181 x := v_0
25182 y := v_1
25183 if v_2.Op != OpAMD64SETEQF {
25184 break
25185 }
25186 cond := v_2.Args[0]
25187 if !(is64BitInt(t) || isPtr(t)) {
25188 break
25189 }
25190 v.reset(OpAMD64CMOVQEQF)
25191 v.AddArg3(y, x, cond)
25192 return true
25193 }
25194
25195
25196
25197 for {
25198 t := v.Type
25199 x := v_0
25200 y := v_1
25201 if v_2.Op != OpAMD64SETNEF {
25202 break
25203 }
25204 cond := v_2.Args[0]
25205 if !(is64BitInt(t) || isPtr(t)) {
25206 break
25207 }
25208 v.reset(OpAMD64CMOVQNEF)
25209 v.AddArg3(y, x, cond)
25210 return true
25211 }
25212
25213
25214
25215 for {
25216 t := v.Type
25217 x := v_0
25218 y := v_1
25219 if v_2.Op != OpAMD64SETGF {
25220 break
25221 }
25222 cond := v_2.Args[0]
25223 if !(is64BitInt(t) || isPtr(t)) {
25224 break
25225 }
25226 v.reset(OpAMD64CMOVQGTF)
25227 v.AddArg3(y, x, cond)
25228 return true
25229 }
25230
25231
25232
25233 for {
25234 t := v.Type
25235 x := v_0
25236 y := v_1
25237 if v_2.Op != OpAMD64SETGEF {
25238 break
25239 }
25240 cond := v_2.Args[0]
25241 if !(is64BitInt(t) || isPtr(t)) {
25242 break
25243 }
25244 v.reset(OpAMD64CMOVQGEF)
25245 v.AddArg3(y, x, cond)
25246 return true
25247 }
25248
25249
25250
25251 for {
25252 t := v.Type
25253 x := v_0
25254 y := v_1
25255 if v_2.Op != OpAMD64SETEQ {
25256 break
25257 }
25258 cond := v_2.Args[0]
25259 if !(is32BitInt(t)) {
25260 break
25261 }
25262 v.reset(OpAMD64CMOVLEQ)
25263 v.AddArg3(y, x, cond)
25264 return true
25265 }
25266
25267
25268
25269 for {
25270 t := v.Type
25271 x := v_0
25272 y := v_1
25273 if v_2.Op != OpAMD64SETNE {
25274 break
25275 }
25276 cond := v_2.Args[0]
25277 if !(is32BitInt(t)) {
25278 break
25279 }
25280 v.reset(OpAMD64CMOVLNE)
25281 v.AddArg3(y, x, cond)
25282 return true
25283 }
25284
25285
25286
25287 for {
25288 t := v.Type
25289 x := v_0
25290 y := v_1
25291 if v_2.Op != OpAMD64SETL {
25292 break
25293 }
25294 cond := v_2.Args[0]
25295 if !(is32BitInt(t)) {
25296 break
25297 }
25298 v.reset(OpAMD64CMOVLLT)
25299 v.AddArg3(y, x, cond)
25300 return true
25301 }
25302
25303
25304
25305 for {
25306 t := v.Type
25307 x := v_0
25308 y := v_1
25309 if v_2.Op != OpAMD64SETG {
25310 break
25311 }
25312 cond := v_2.Args[0]
25313 if !(is32BitInt(t)) {
25314 break
25315 }
25316 v.reset(OpAMD64CMOVLGT)
25317 v.AddArg3(y, x, cond)
25318 return true
25319 }
25320
25321
25322
25323 for {
25324 t := v.Type
25325 x := v_0
25326 y := v_1
25327 if v_2.Op != OpAMD64SETLE {
25328 break
25329 }
25330 cond := v_2.Args[0]
25331 if !(is32BitInt(t)) {
25332 break
25333 }
25334 v.reset(OpAMD64CMOVLLE)
25335 v.AddArg3(y, x, cond)
25336 return true
25337 }
25338
25339
25340
25341 for {
25342 t := v.Type
25343 x := v_0
25344 y := v_1
25345 if v_2.Op != OpAMD64SETGE {
25346 break
25347 }
25348 cond := v_2.Args[0]
25349 if !(is32BitInt(t)) {
25350 break
25351 }
25352 v.reset(OpAMD64CMOVLGE)
25353 v.AddArg3(y, x, cond)
25354 return true
25355 }
25356
25357
25358
25359 for {
25360 t := v.Type
25361 x := v_0
25362 y := v_1
25363 if v_2.Op != OpAMD64SETA {
25364 break
25365 }
25366 cond := v_2.Args[0]
25367 if !(is32BitInt(t)) {
25368 break
25369 }
25370 v.reset(OpAMD64CMOVLHI)
25371 v.AddArg3(y, x, cond)
25372 return true
25373 }
25374
25375
25376
25377 for {
25378 t := v.Type
25379 x := v_0
25380 y := v_1
25381 if v_2.Op != OpAMD64SETB {
25382 break
25383 }
25384 cond := v_2.Args[0]
25385 if !(is32BitInt(t)) {
25386 break
25387 }
25388 v.reset(OpAMD64CMOVLCS)
25389 v.AddArg3(y, x, cond)
25390 return true
25391 }
25392
25393
25394
25395 for {
25396 t := v.Type
25397 x := v_0
25398 y := v_1
25399 if v_2.Op != OpAMD64SETAE {
25400 break
25401 }
25402 cond := v_2.Args[0]
25403 if !(is32BitInt(t)) {
25404 break
25405 }
25406 v.reset(OpAMD64CMOVLCC)
25407 v.AddArg3(y, x, cond)
25408 return true
25409 }
25410
25411
25412
25413 for {
25414 t := v.Type
25415 x := v_0
25416 y := v_1
25417 if v_2.Op != OpAMD64SETBE {
25418 break
25419 }
25420 cond := v_2.Args[0]
25421 if !(is32BitInt(t)) {
25422 break
25423 }
25424 v.reset(OpAMD64CMOVLLS)
25425 v.AddArg3(y, x, cond)
25426 return true
25427 }
25428
25429
25430
25431 for {
25432 t := v.Type
25433 x := v_0
25434 y := v_1
25435 if v_2.Op != OpAMD64SETEQF {
25436 break
25437 }
25438 cond := v_2.Args[0]
25439 if !(is32BitInt(t)) {
25440 break
25441 }
25442 v.reset(OpAMD64CMOVLEQF)
25443 v.AddArg3(y, x, cond)
25444 return true
25445 }
25446
25447
25448
25449 for {
25450 t := v.Type
25451 x := v_0
25452 y := v_1
25453 if v_2.Op != OpAMD64SETNEF {
25454 break
25455 }
25456 cond := v_2.Args[0]
25457 if !(is32BitInt(t)) {
25458 break
25459 }
25460 v.reset(OpAMD64CMOVLNEF)
25461 v.AddArg3(y, x, cond)
25462 return true
25463 }
25464
25465
25466
25467 for {
25468 t := v.Type
25469 x := v_0
25470 y := v_1
25471 if v_2.Op != OpAMD64SETGF {
25472 break
25473 }
25474 cond := v_2.Args[0]
25475 if !(is32BitInt(t)) {
25476 break
25477 }
25478 v.reset(OpAMD64CMOVLGTF)
25479 v.AddArg3(y, x, cond)
25480 return true
25481 }
25482
25483
25484
25485 for {
25486 t := v.Type
25487 x := v_0
25488 y := v_1
25489 if v_2.Op != OpAMD64SETGEF {
25490 break
25491 }
25492 cond := v_2.Args[0]
25493 if !(is32BitInt(t)) {
25494 break
25495 }
25496 v.reset(OpAMD64CMOVLGEF)
25497 v.AddArg3(y, x, cond)
25498 return true
25499 }
25500
25501
25502
25503 for {
25504 t := v.Type
25505 x := v_0
25506 y := v_1
25507 if v_2.Op != OpAMD64SETEQ {
25508 break
25509 }
25510 cond := v_2.Args[0]
25511 if !(is16BitInt(t)) {
25512 break
25513 }
25514 v.reset(OpAMD64CMOVWEQ)
25515 v.AddArg3(y, x, cond)
25516 return true
25517 }
25518
25519
25520
25521 for {
25522 t := v.Type
25523 x := v_0
25524 y := v_1
25525 if v_2.Op != OpAMD64SETNE {
25526 break
25527 }
25528 cond := v_2.Args[0]
25529 if !(is16BitInt(t)) {
25530 break
25531 }
25532 v.reset(OpAMD64CMOVWNE)
25533 v.AddArg3(y, x, cond)
25534 return true
25535 }
25536
25537
25538
25539 for {
25540 t := v.Type
25541 x := v_0
25542 y := v_1
25543 if v_2.Op != OpAMD64SETL {
25544 break
25545 }
25546 cond := v_2.Args[0]
25547 if !(is16BitInt(t)) {
25548 break
25549 }
25550 v.reset(OpAMD64CMOVWLT)
25551 v.AddArg3(y, x, cond)
25552 return true
25553 }
25554
25555
25556
25557 for {
25558 t := v.Type
25559 x := v_0
25560 y := v_1
25561 if v_2.Op != OpAMD64SETG {
25562 break
25563 }
25564 cond := v_2.Args[0]
25565 if !(is16BitInt(t)) {
25566 break
25567 }
25568 v.reset(OpAMD64CMOVWGT)
25569 v.AddArg3(y, x, cond)
25570 return true
25571 }
25572
25573
25574
25575 for {
25576 t := v.Type
25577 x := v_0
25578 y := v_1
25579 if v_2.Op != OpAMD64SETLE {
25580 break
25581 }
25582 cond := v_2.Args[0]
25583 if !(is16BitInt(t)) {
25584 break
25585 }
25586 v.reset(OpAMD64CMOVWLE)
25587 v.AddArg3(y, x, cond)
25588 return true
25589 }
25590
25591
25592
25593 for {
25594 t := v.Type
25595 x := v_0
25596 y := v_1
25597 if v_2.Op != OpAMD64SETGE {
25598 break
25599 }
25600 cond := v_2.Args[0]
25601 if !(is16BitInt(t)) {
25602 break
25603 }
25604 v.reset(OpAMD64CMOVWGE)
25605 v.AddArg3(y, x, cond)
25606 return true
25607 }
25608
25609
25610
25611 for {
25612 t := v.Type
25613 x := v_0
25614 y := v_1
25615 if v_2.Op != OpAMD64SETA {
25616 break
25617 }
25618 cond := v_2.Args[0]
25619 if !(is16BitInt(t)) {
25620 break
25621 }
25622 v.reset(OpAMD64CMOVWHI)
25623 v.AddArg3(y, x, cond)
25624 return true
25625 }
25626
25627
25628
25629 for {
25630 t := v.Type
25631 x := v_0
25632 y := v_1
25633 if v_2.Op != OpAMD64SETB {
25634 break
25635 }
25636 cond := v_2.Args[0]
25637 if !(is16BitInt(t)) {
25638 break
25639 }
25640 v.reset(OpAMD64CMOVWCS)
25641 v.AddArg3(y, x, cond)
25642 return true
25643 }
25644
25645
25646
25647 for {
25648 t := v.Type
25649 x := v_0
25650 y := v_1
25651 if v_2.Op != OpAMD64SETAE {
25652 break
25653 }
25654 cond := v_2.Args[0]
25655 if !(is16BitInt(t)) {
25656 break
25657 }
25658 v.reset(OpAMD64CMOVWCC)
25659 v.AddArg3(y, x, cond)
25660 return true
25661 }
25662
25663
25664
25665 for {
25666 t := v.Type
25667 x := v_0
25668 y := v_1
25669 if v_2.Op != OpAMD64SETBE {
25670 break
25671 }
25672 cond := v_2.Args[0]
25673 if !(is16BitInt(t)) {
25674 break
25675 }
25676 v.reset(OpAMD64CMOVWLS)
25677 v.AddArg3(y, x, cond)
25678 return true
25679 }
25680
25681
25682
25683 for {
25684 t := v.Type
25685 x := v_0
25686 y := v_1
25687 if v_2.Op != OpAMD64SETEQF {
25688 break
25689 }
25690 cond := v_2.Args[0]
25691 if !(is16BitInt(t)) {
25692 break
25693 }
25694 v.reset(OpAMD64CMOVWEQF)
25695 v.AddArg3(y, x, cond)
25696 return true
25697 }
25698
25699
25700
25701 for {
25702 t := v.Type
25703 x := v_0
25704 y := v_1
25705 if v_2.Op != OpAMD64SETNEF {
25706 break
25707 }
25708 cond := v_2.Args[0]
25709 if !(is16BitInt(t)) {
25710 break
25711 }
25712 v.reset(OpAMD64CMOVWNEF)
25713 v.AddArg3(y, x, cond)
25714 return true
25715 }
25716
25717
25718
25719 for {
25720 t := v.Type
25721 x := v_0
25722 y := v_1
25723 if v_2.Op != OpAMD64SETGF {
25724 break
25725 }
25726 cond := v_2.Args[0]
25727 if !(is16BitInt(t)) {
25728 break
25729 }
25730 v.reset(OpAMD64CMOVWGTF)
25731 v.AddArg3(y, x, cond)
25732 return true
25733 }
25734
25735
25736
25737 for {
25738 t := v.Type
25739 x := v_0
25740 y := v_1
25741 if v_2.Op != OpAMD64SETGEF {
25742 break
25743 }
25744 cond := v_2.Args[0]
25745 if !(is16BitInt(t)) {
25746 break
25747 }
25748 v.reset(OpAMD64CMOVWGEF)
25749 v.AddArg3(y, x, cond)
25750 return true
25751 }
25752
25753
25754
25755 for {
25756 t := v.Type
25757 x := v_0
25758 y := v_1
25759 check := v_2
25760 if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
25761 break
25762 }
25763 v.reset(OpCondSelect)
25764 v.Type = t
25765 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
25766 v0.AddArg(check)
25767 v.AddArg3(x, y, v0)
25768 return true
25769 }
25770
25771
25772
25773 for {
25774 t := v.Type
25775 x := v_0
25776 y := v_1
25777 check := v_2
25778 if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
25779 break
25780 }
25781 v.reset(OpCondSelect)
25782 v.Type = t
25783 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
25784 v0.AddArg(check)
25785 v.AddArg3(x, y, v0)
25786 return true
25787 }
25788
25789
25790
25791 for {
25792 t := v.Type
25793 x := v_0
25794 y := v_1
25795 check := v_2
25796 if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
25797 break
25798 }
25799 v.reset(OpCondSelect)
25800 v.Type = t
25801 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
25802 v0.AddArg(check)
25803 v.AddArg3(x, y, v0)
25804 return true
25805 }
25806
25807
25808
25809 for {
25810 t := v.Type
25811 x := v_0
25812 y := v_1
25813 check := v_2
25814 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
25815 break
25816 }
25817 v.reset(OpAMD64CMOVQNE)
25818 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25819 v0.AuxInt = int32ToAuxInt(0)
25820 v0.AddArg(check)
25821 v.AddArg3(y, x, v0)
25822 return true
25823 }
25824
25825
25826
25827 for {
25828 t := v.Type
25829 x := v_0
25830 y := v_1
25831 check := v_2
25832 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
25833 break
25834 }
25835 v.reset(OpAMD64CMOVLNE)
25836 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25837 v0.AuxInt = int32ToAuxInt(0)
25838 v0.AddArg(check)
25839 v.AddArg3(y, x, v0)
25840 return true
25841 }
25842
25843
25844
25845 for {
25846 t := v.Type
25847 x := v_0
25848 y := v_1
25849 check := v_2
25850 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
25851 break
25852 }
25853 v.reset(OpAMD64CMOVWNE)
25854 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25855 v0.AuxInt = int32ToAuxInt(0)
25856 v0.AddArg(check)
25857 v.AddArg3(y, x, v0)
25858 return true
25859 }
25860 return false
25861 }
25862 func rewriteValueAMD64_OpConst16(v *Value) bool {
25863
25864
25865 for {
25866 c := auxIntToInt16(v.AuxInt)
25867 v.reset(OpAMD64MOVLconst)
25868 v.AuxInt = int32ToAuxInt(int32(c))
25869 return true
25870 }
25871 }
25872 func rewriteValueAMD64_OpConst8(v *Value) bool {
25873
25874
25875 for {
25876 c := auxIntToInt8(v.AuxInt)
25877 v.reset(OpAMD64MOVLconst)
25878 v.AuxInt = int32ToAuxInt(int32(c))
25879 return true
25880 }
25881 }
25882 func rewriteValueAMD64_OpConstBool(v *Value) bool {
25883
25884
25885 for {
25886 c := auxIntToBool(v.AuxInt)
25887 v.reset(OpAMD64MOVLconst)
25888 v.AuxInt = int32ToAuxInt(b2i32(c))
25889 return true
25890 }
25891 }
25892 func rewriteValueAMD64_OpConstNil(v *Value) bool {
25893
25894
25895 for {
25896 v.reset(OpAMD64MOVQconst)
25897 v.AuxInt = int64ToAuxInt(0)
25898 return true
25899 }
25900 }
25901 func rewriteValueAMD64_OpCtz16(v *Value) bool {
25902 v_0 := v.Args[0]
25903 b := v.Block
25904 typ := &b.Func.Config.Types
25905
25906
25907 for {
25908 x := v_0
25909 v.reset(OpAMD64BSFL)
25910 v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
25911 v0.AuxInt = int32ToAuxInt(1 << 16)
25912 v0.AddArg(x)
25913 v.AddArg(v0)
25914 return true
25915 }
25916 }
25917 func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool {
25918 v_0 := v.Args[0]
25919
25920
25921
25922 for {
25923 x := v_0
25924 if !(buildcfg.GOAMD64 >= 3) {
25925 break
25926 }
25927 v.reset(OpAMD64TZCNTL)
25928 v.AddArg(x)
25929 return true
25930 }
25931
25932
25933
25934 for {
25935 x := v_0
25936 if !(buildcfg.GOAMD64 < 3) {
25937 break
25938 }
25939 v.reset(OpAMD64BSFL)
25940 v.AddArg(x)
25941 return true
25942 }
25943 return false
25944 }
25945 func rewriteValueAMD64_OpCtz32(v *Value) bool {
25946 v_0 := v.Args[0]
25947 b := v.Block
25948 typ := &b.Func.Config.Types
25949
25950
25951
25952 for {
25953 x := v_0
25954 if !(buildcfg.GOAMD64 >= 3) {
25955 break
25956 }
25957 v.reset(OpAMD64TZCNTL)
25958 v.AddArg(x)
25959 return true
25960 }
25961
25962
25963
25964 for {
25965 x := v_0
25966 if !(buildcfg.GOAMD64 < 3) {
25967 break
25968 }
25969 v.reset(OpSelect0)
25970 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25971 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
25972 v1.AuxInt = int8ToAuxInt(32)
25973 v1.AddArg(x)
25974 v0.AddArg(v1)
25975 v.AddArg(v0)
25976 return true
25977 }
25978 return false
25979 }
25980 func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool {
25981 v_0 := v.Args[0]
25982
25983
25984
25985 for {
25986 x := v_0
25987 if !(buildcfg.GOAMD64 >= 3) {
25988 break
25989 }
25990 v.reset(OpAMD64TZCNTL)
25991 v.AddArg(x)
25992 return true
25993 }
25994
25995
25996
25997 for {
25998 x := v_0
25999 if !(buildcfg.GOAMD64 < 3) {
26000 break
26001 }
26002 v.reset(OpAMD64BSFL)
26003 v.AddArg(x)
26004 return true
26005 }
26006 return false
26007 }
26008 func rewriteValueAMD64_OpCtz64(v *Value) bool {
26009 v_0 := v.Args[0]
26010 b := v.Block
26011 typ := &b.Func.Config.Types
26012
26013
26014
26015 for {
26016 x := v_0
26017 if !(buildcfg.GOAMD64 >= 3) {
26018 break
26019 }
26020 v.reset(OpAMD64TZCNTQ)
26021 v.AddArg(x)
26022 return true
26023 }
26024
26025
26026
26027 for {
26028 t := v.Type
26029 x := v_0
26030 if !(buildcfg.GOAMD64 < 3) {
26031 break
26032 }
26033 v.reset(OpAMD64CMOVQEQ)
26034 v0 := b.NewValue0(v.Pos, OpSelect0, t)
26035 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
26036 v1.AddArg(x)
26037 v0.AddArg(v1)
26038 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
26039 v2.AuxInt = int64ToAuxInt(64)
26040 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
26041 v3.AddArg(v1)
26042 v.AddArg3(v0, v2, v3)
26043 return true
26044 }
26045 return false
26046 }
26047 func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool {
26048 v_0 := v.Args[0]
26049 b := v.Block
26050 typ := &b.Func.Config.Types
26051
26052
26053
26054 for {
26055 x := v_0
26056 if !(buildcfg.GOAMD64 >= 3) {
26057 break
26058 }
26059 v.reset(OpAMD64TZCNTQ)
26060 v.AddArg(x)
26061 return true
26062 }
26063
26064
26065
26066 for {
26067 x := v_0
26068 if !(buildcfg.GOAMD64 < 3) {
26069 break
26070 }
26071 v.reset(OpSelect0)
26072 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
26073 v0.AddArg(x)
26074 v.AddArg(v0)
26075 return true
26076 }
26077 return false
26078 }
26079 func rewriteValueAMD64_OpCtz8(v *Value) bool {
26080 v_0 := v.Args[0]
26081 b := v.Block
26082 typ := &b.Func.Config.Types
26083
26084
26085 for {
26086 x := v_0
26087 v.reset(OpAMD64BSFL)
26088 v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
26089 v0.AuxInt = int32ToAuxInt(1 << 8)
26090 v0.AddArg(x)
26091 v.AddArg(v0)
26092 return true
26093 }
26094 }
26095 func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool {
26096 v_0 := v.Args[0]
26097
26098
26099
26100 for {
26101 x := v_0
26102 if !(buildcfg.GOAMD64 >= 3) {
26103 break
26104 }
26105 v.reset(OpAMD64TZCNTL)
26106 v.AddArg(x)
26107 return true
26108 }
26109
26110
26111
26112 for {
26113 x := v_0
26114 if !(buildcfg.GOAMD64 < 3) {
26115 break
26116 }
26117 v.reset(OpAMD64BSFL)
26118 v.AddArg(x)
26119 return true
26120 }
26121 return false
26122 }
26123 func rewriteValueAMD64_OpDiv16(v *Value) bool {
26124 v_1 := v.Args[1]
26125 v_0 := v.Args[0]
26126 b := v.Block
26127 typ := &b.Func.Config.Types
26128
26129
26130 for {
26131 a := auxIntToBool(v.AuxInt)
26132 x := v_0
26133 y := v_1
26134 v.reset(OpSelect0)
26135 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
26136 v0.AuxInt = boolToAuxInt(a)
26137 v0.AddArg2(x, y)
26138 v.AddArg(v0)
26139 return true
26140 }
26141 }
26142 func rewriteValueAMD64_OpDiv16u(v *Value) bool {
26143 v_1 := v.Args[1]
26144 v_0 := v.Args[0]
26145 b := v.Block
26146 typ := &b.Func.Config.Types
26147
26148
26149 for {
26150 x := v_0
26151 y := v_1
26152 v.reset(OpSelect0)
26153 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
26154 v0.AddArg2(x, y)
26155 v.AddArg(v0)
26156 return true
26157 }
26158 }
26159 func rewriteValueAMD64_OpDiv32(v *Value) bool {
26160 v_1 := v.Args[1]
26161 v_0 := v.Args[0]
26162 b := v.Block
26163 typ := &b.Func.Config.Types
26164
26165
26166 for {
26167 a := auxIntToBool(v.AuxInt)
26168 x := v_0
26169 y := v_1
26170 v.reset(OpSelect0)
26171 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
26172 v0.AuxInt = boolToAuxInt(a)
26173 v0.AddArg2(x, y)
26174 v.AddArg(v0)
26175 return true
26176 }
26177 }
26178 func rewriteValueAMD64_OpDiv32u(v *Value) bool {
26179 v_1 := v.Args[1]
26180 v_0 := v.Args[0]
26181 b := v.Block
26182 typ := &b.Func.Config.Types
26183
26184
26185 for {
26186 x := v_0
26187 y := v_1
26188 v.reset(OpSelect0)
26189 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
26190 v0.AddArg2(x, y)
26191 v.AddArg(v0)
26192 return true
26193 }
26194 }
26195 func rewriteValueAMD64_OpDiv64(v *Value) bool {
26196 v_1 := v.Args[1]
26197 v_0 := v.Args[0]
26198 b := v.Block
26199 typ := &b.Func.Config.Types
26200
26201
26202 for {
26203 a := auxIntToBool(v.AuxInt)
26204 x := v_0
26205 y := v_1
26206 v.reset(OpSelect0)
26207 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
26208 v0.AuxInt = boolToAuxInt(a)
26209 v0.AddArg2(x, y)
26210 v.AddArg(v0)
26211 return true
26212 }
26213 }
26214 func rewriteValueAMD64_OpDiv64u(v *Value) bool {
26215 v_1 := v.Args[1]
26216 v_0 := v.Args[0]
26217 b := v.Block
26218 typ := &b.Func.Config.Types
26219
26220
26221 for {
26222 x := v_0
26223 y := v_1
26224 v.reset(OpSelect0)
26225 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
26226 v0.AddArg2(x, y)
26227 v.AddArg(v0)
26228 return true
26229 }
26230 }
26231 func rewriteValueAMD64_OpDiv8(v *Value) bool {
26232 v_1 := v.Args[1]
26233 v_0 := v.Args[0]
26234 b := v.Block
26235 typ := &b.Func.Config.Types
26236
26237
26238 for {
26239 x := v_0
26240 y := v_1
26241 v.reset(OpSelect0)
26242 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
26243 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
26244 v1.AddArg(x)
26245 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
26246 v2.AddArg(y)
26247 v0.AddArg2(v1, v2)
26248 v.AddArg(v0)
26249 return true
26250 }
26251 }
26252 func rewriteValueAMD64_OpDiv8u(v *Value) bool {
26253 v_1 := v.Args[1]
26254 v_0 := v.Args[0]
26255 b := v.Block
26256 typ := &b.Func.Config.Types
26257
26258
26259 for {
26260 x := v_0
26261 y := v_1
26262 v.reset(OpSelect0)
26263 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
26264 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
26265 v1.AddArg(x)
26266 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
26267 v2.AddArg(y)
26268 v0.AddArg2(v1, v2)
26269 v.AddArg(v0)
26270 return true
26271 }
26272 }
26273 func rewriteValueAMD64_OpEq16(v *Value) bool {
26274 v_1 := v.Args[1]
26275 v_0 := v.Args[0]
26276 b := v.Block
26277
26278
26279 for {
26280 x := v_0
26281 y := v_1
26282 v.reset(OpAMD64SETEQ)
26283 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
26284 v0.AddArg2(x, y)
26285 v.AddArg(v0)
26286 return true
26287 }
26288 }
26289 func rewriteValueAMD64_OpEq32(v *Value) bool {
26290 v_1 := v.Args[1]
26291 v_0 := v.Args[0]
26292 b := v.Block
26293
26294
26295 for {
26296 x := v_0
26297 y := v_1
26298 v.reset(OpAMD64SETEQ)
26299 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
26300 v0.AddArg2(x, y)
26301 v.AddArg(v0)
26302 return true
26303 }
26304 }
26305 func rewriteValueAMD64_OpEq32F(v *Value) bool {
26306 v_1 := v.Args[1]
26307 v_0 := v.Args[0]
26308 b := v.Block
26309
26310
26311 for {
26312 x := v_0
26313 y := v_1
26314 v.reset(OpAMD64SETEQF)
26315 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
26316 v0.AddArg2(x, y)
26317 v.AddArg(v0)
26318 return true
26319 }
26320 }
26321 func rewriteValueAMD64_OpEq64(v *Value) bool {
26322 v_1 := v.Args[1]
26323 v_0 := v.Args[0]
26324 b := v.Block
26325
26326
26327 for {
26328 x := v_0
26329 y := v_1
26330 v.reset(OpAMD64SETEQ)
26331 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26332 v0.AddArg2(x, y)
26333 v.AddArg(v0)
26334 return true
26335 }
26336 }
26337 func rewriteValueAMD64_OpEq64F(v *Value) bool {
26338 v_1 := v.Args[1]
26339 v_0 := v.Args[0]
26340 b := v.Block
26341
26342
26343 for {
26344 x := v_0
26345 y := v_1
26346 v.reset(OpAMD64SETEQF)
26347 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
26348 v0.AddArg2(x, y)
26349 v.AddArg(v0)
26350 return true
26351 }
26352 }
26353 func rewriteValueAMD64_OpEq8(v *Value) bool {
26354 v_1 := v.Args[1]
26355 v_0 := v.Args[0]
26356 b := v.Block
26357
26358
26359 for {
26360 x := v_0
26361 y := v_1
26362 v.reset(OpAMD64SETEQ)
26363 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26364 v0.AddArg2(x, y)
26365 v.AddArg(v0)
26366 return true
26367 }
26368 }
26369 func rewriteValueAMD64_OpEqB(v *Value) bool {
26370 v_1 := v.Args[1]
26371 v_0 := v.Args[0]
26372 b := v.Block
26373
26374
26375 for {
26376 x := v_0
26377 y := v_1
26378 v.reset(OpAMD64SETEQ)
26379 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26380 v0.AddArg2(x, y)
26381 v.AddArg(v0)
26382 return true
26383 }
26384 }
26385 func rewriteValueAMD64_OpEqPtr(v *Value) bool {
26386 v_1 := v.Args[1]
26387 v_0 := v.Args[0]
26388 b := v.Block
26389
26390
26391 for {
26392 x := v_0
26393 y := v_1
26394 v.reset(OpAMD64SETEQ)
26395 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26396 v0.AddArg2(x, y)
26397 v.AddArg(v0)
26398 return true
26399 }
26400 }
26401 func rewriteValueAMD64_OpFMA(v *Value) bool {
26402 v_2 := v.Args[2]
26403 v_1 := v.Args[1]
26404 v_0 := v.Args[0]
26405
26406
26407 for {
26408 x := v_0
26409 y := v_1
26410 z := v_2
26411 v.reset(OpAMD64VFMADD231SD)
26412 v.AddArg3(z, x, y)
26413 return true
26414 }
26415 }
26416 func rewriteValueAMD64_OpFloor(v *Value) bool {
26417 v_0 := v.Args[0]
26418
26419
26420 for {
26421 x := v_0
26422 v.reset(OpAMD64ROUNDSD)
26423 v.AuxInt = int8ToAuxInt(1)
26424 v.AddArg(x)
26425 return true
26426 }
26427 }
26428 func rewriteValueAMD64_OpGetG(v *Value) bool {
26429 v_0 := v.Args[0]
26430
26431
26432
26433 for {
26434 mem := v_0
26435 if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) {
26436 break
26437 }
26438 v.reset(OpAMD64LoweredGetG)
26439 v.AddArg(mem)
26440 return true
26441 }
26442 return false
26443 }
26444 func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool {
26445 b := v.Block
26446 typ := &b.Func.Config.Types
26447
26448
26449 for {
26450 s := auxToSym(v.Aux)
26451 v.reset(OpAMD64SETNE)
26452 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26453 v0.AuxInt = int32ToAuxInt(0)
26454 v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64)
26455 v1.Aux = symToAux(s)
26456 v0.AddArg(v1)
26457 v.AddArg(v0)
26458 return true
26459 }
26460 }
26461 func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
26462 v_1 := v.Args[1]
26463 v_0 := v.Args[0]
26464 b := v.Block
26465
26466
26467 for {
26468 idx := v_0
26469 len := v_1
26470 v.reset(OpAMD64SETB)
26471 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26472 v0.AddArg2(idx, len)
26473 v.AddArg(v0)
26474 return true
26475 }
26476 }
26477 func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
26478 v_0 := v.Args[0]
26479 b := v.Block
26480
26481
26482 for {
26483 p := v_0
26484 v.reset(OpAMD64SETNE)
26485 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
26486 v0.AddArg2(p, p)
26487 v.AddArg(v0)
26488 return true
26489 }
26490 }
26491 func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
26492 v_1 := v.Args[1]
26493 v_0 := v.Args[0]
26494 b := v.Block
26495
26496
26497 for {
26498 idx := v_0
26499 len := v_1
26500 v.reset(OpAMD64SETBE)
26501 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26502 v0.AddArg2(idx, len)
26503 v.AddArg(v0)
26504 return true
26505 }
26506 }
26507 func rewriteValueAMD64_OpLeq16(v *Value) bool {
26508 v_1 := v.Args[1]
26509 v_0 := v.Args[0]
26510 b := v.Block
26511
26512
26513 for {
26514 x := v_0
26515 y := v_1
26516 v.reset(OpAMD64SETLE)
26517 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
26518 v0.AddArg2(x, y)
26519 v.AddArg(v0)
26520 return true
26521 }
26522 }
26523 func rewriteValueAMD64_OpLeq16U(v *Value) bool {
26524 v_1 := v.Args[1]
26525 v_0 := v.Args[0]
26526 b := v.Block
26527
26528
26529 for {
26530 x := v_0
26531 y := v_1
26532 v.reset(OpAMD64SETBE)
26533 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
26534 v0.AddArg2(x, y)
26535 v.AddArg(v0)
26536 return true
26537 }
26538 }
26539 func rewriteValueAMD64_OpLeq32(v *Value) bool {
26540 v_1 := v.Args[1]
26541 v_0 := v.Args[0]
26542 b := v.Block
26543
26544
26545 for {
26546 x := v_0
26547 y := v_1
26548 v.reset(OpAMD64SETLE)
26549 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
26550 v0.AddArg2(x, y)
26551 v.AddArg(v0)
26552 return true
26553 }
26554 }
26555 func rewriteValueAMD64_OpLeq32F(v *Value) bool {
26556 v_1 := v.Args[1]
26557 v_0 := v.Args[0]
26558 b := v.Block
26559
26560
26561 for {
26562 x := v_0
26563 y := v_1
26564 v.reset(OpAMD64SETGEF)
26565 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
26566 v0.AddArg2(y, x)
26567 v.AddArg(v0)
26568 return true
26569 }
26570 }
26571 func rewriteValueAMD64_OpLeq32U(v *Value) bool {
26572 v_1 := v.Args[1]
26573 v_0 := v.Args[0]
26574 b := v.Block
26575
26576
26577 for {
26578 x := v_0
26579 y := v_1
26580 v.reset(OpAMD64SETBE)
26581 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
26582 v0.AddArg2(x, y)
26583 v.AddArg(v0)
26584 return true
26585 }
26586 }
26587 func rewriteValueAMD64_OpLeq64(v *Value) bool {
26588 v_1 := v.Args[1]
26589 v_0 := v.Args[0]
26590 b := v.Block
26591
26592
26593 for {
26594 x := v_0
26595 y := v_1
26596 v.reset(OpAMD64SETLE)
26597 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26598 v0.AddArg2(x, y)
26599 v.AddArg(v0)
26600 return true
26601 }
26602 }
26603 func rewriteValueAMD64_OpLeq64F(v *Value) bool {
26604 v_1 := v.Args[1]
26605 v_0 := v.Args[0]
26606 b := v.Block
26607
26608
26609 for {
26610 x := v_0
26611 y := v_1
26612 v.reset(OpAMD64SETGEF)
26613 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
26614 v0.AddArg2(y, x)
26615 v.AddArg(v0)
26616 return true
26617 }
26618 }
26619 func rewriteValueAMD64_OpLeq64U(v *Value) bool {
26620 v_1 := v.Args[1]
26621 v_0 := v.Args[0]
26622 b := v.Block
26623
26624
26625 for {
26626 x := v_0
26627 y := v_1
26628 v.reset(OpAMD64SETBE)
26629 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26630 v0.AddArg2(x, y)
26631 v.AddArg(v0)
26632 return true
26633 }
26634 }
26635 func rewriteValueAMD64_OpLeq8(v *Value) bool {
26636 v_1 := v.Args[1]
26637 v_0 := v.Args[0]
26638 b := v.Block
26639
26640
26641 for {
26642 x := v_0
26643 y := v_1
26644 v.reset(OpAMD64SETLE)
26645 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26646 v0.AddArg2(x, y)
26647 v.AddArg(v0)
26648 return true
26649 }
26650 }
26651 func rewriteValueAMD64_OpLeq8U(v *Value) bool {
26652 v_1 := v.Args[1]
26653 v_0 := v.Args[0]
26654 b := v.Block
26655
26656
26657 for {
26658 x := v_0
26659 y := v_1
26660 v.reset(OpAMD64SETBE)
26661 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26662 v0.AddArg2(x, y)
26663 v.AddArg(v0)
26664 return true
26665 }
26666 }
26667 func rewriteValueAMD64_OpLess16(v *Value) bool {
26668 v_1 := v.Args[1]
26669 v_0 := v.Args[0]
26670 b := v.Block
26671
26672
26673 for {
26674 x := v_0
26675 y := v_1
26676 v.reset(OpAMD64SETL)
26677 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
26678 v0.AddArg2(x, y)
26679 v.AddArg(v0)
26680 return true
26681 }
26682 }
26683 func rewriteValueAMD64_OpLess16U(v *Value) bool {
26684 v_1 := v.Args[1]
26685 v_0 := v.Args[0]
26686 b := v.Block
26687
26688
26689 for {
26690 x := v_0
26691 y := v_1
26692 v.reset(OpAMD64SETB)
26693 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
26694 v0.AddArg2(x, y)
26695 v.AddArg(v0)
26696 return true
26697 }
26698 }
26699 func rewriteValueAMD64_OpLess32(v *Value) bool {
26700 v_1 := v.Args[1]
26701 v_0 := v.Args[0]
26702 b := v.Block
26703
26704
26705 for {
26706 x := v_0
26707 y := v_1
26708 v.reset(OpAMD64SETL)
26709 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
26710 v0.AddArg2(x, y)
26711 v.AddArg(v0)
26712 return true
26713 }
26714 }
26715 func rewriteValueAMD64_OpLess32F(v *Value) bool {
26716 v_1 := v.Args[1]
26717 v_0 := v.Args[0]
26718 b := v.Block
26719
26720
26721 for {
26722 x := v_0
26723 y := v_1
26724 v.reset(OpAMD64SETGF)
26725 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
26726 v0.AddArg2(y, x)
26727 v.AddArg(v0)
26728 return true
26729 }
26730 }
26731 func rewriteValueAMD64_OpLess32U(v *Value) bool {
26732 v_1 := v.Args[1]
26733 v_0 := v.Args[0]
26734 b := v.Block
26735
26736
26737 for {
26738 x := v_0
26739 y := v_1
26740 v.reset(OpAMD64SETB)
26741 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
26742 v0.AddArg2(x, y)
26743 v.AddArg(v0)
26744 return true
26745 }
26746 }
26747 func rewriteValueAMD64_OpLess64(v *Value) bool {
26748 v_1 := v.Args[1]
26749 v_0 := v.Args[0]
26750 b := v.Block
26751
26752
26753 for {
26754 x := v_0
26755 y := v_1
26756 v.reset(OpAMD64SETL)
26757 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26758 v0.AddArg2(x, y)
26759 v.AddArg(v0)
26760 return true
26761 }
26762 }
26763 func rewriteValueAMD64_OpLess64F(v *Value) bool {
26764 v_1 := v.Args[1]
26765 v_0 := v.Args[0]
26766 b := v.Block
26767
26768
26769 for {
26770 x := v_0
26771 y := v_1
26772 v.reset(OpAMD64SETGF)
26773 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
26774 v0.AddArg2(y, x)
26775 v.AddArg(v0)
26776 return true
26777 }
26778 }
26779 func rewriteValueAMD64_OpLess64U(v *Value) bool {
26780 v_1 := v.Args[1]
26781 v_0 := v.Args[0]
26782 b := v.Block
26783
26784
26785 for {
26786 x := v_0
26787 y := v_1
26788 v.reset(OpAMD64SETB)
26789 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26790 v0.AddArg2(x, y)
26791 v.AddArg(v0)
26792 return true
26793 }
26794 }
26795 func rewriteValueAMD64_OpLess8(v *Value) bool {
26796 v_1 := v.Args[1]
26797 v_0 := v.Args[0]
26798 b := v.Block
26799
26800
26801 for {
26802 x := v_0
26803 y := v_1
26804 v.reset(OpAMD64SETL)
26805 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26806 v0.AddArg2(x, y)
26807 v.AddArg(v0)
26808 return true
26809 }
26810 }
26811 func rewriteValueAMD64_OpLess8U(v *Value) bool {
26812 v_1 := v.Args[1]
26813 v_0 := v.Args[0]
26814 b := v.Block
26815
26816
26817 for {
26818 x := v_0
26819 y := v_1
26820 v.reset(OpAMD64SETB)
26821 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26822 v0.AddArg2(x, y)
26823 v.AddArg(v0)
26824 return true
26825 }
26826 }
26827 func rewriteValueAMD64_OpLoad(v *Value) bool {
26828 v_1 := v.Args[1]
26829 v_0 := v.Args[0]
26830
26831
26832
26833 for {
26834 t := v.Type
26835 ptr := v_0
26836 mem := v_1
26837 if !(is64BitInt(t) || isPtr(t)) {
26838 break
26839 }
26840 v.reset(OpAMD64MOVQload)
26841 v.AddArg2(ptr, mem)
26842 return true
26843 }
26844
26845
26846
26847 for {
26848 t := v.Type
26849 ptr := v_0
26850 mem := v_1
26851 if !(is32BitInt(t)) {
26852 break
26853 }
26854 v.reset(OpAMD64MOVLload)
26855 v.AddArg2(ptr, mem)
26856 return true
26857 }
26858
26859
26860
26861 for {
26862 t := v.Type
26863 ptr := v_0
26864 mem := v_1
26865 if !(is16BitInt(t)) {
26866 break
26867 }
26868 v.reset(OpAMD64MOVWload)
26869 v.AddArg2(ptr, mem)
26870 return true
26871 }
26872
26873
26874
26875 for {
26876 t := v.Type
26877 ptr := v_0
26878 mem := v_1
26879 if !(t.IsBoolean() || is8BitInt(t)) {
26880 break
26881 }
26882 v.reset(OpAMD64MOVBload)
26883 v.AddArg2(ptr, mem)
26884 return true
26885 }
26886
26887
26888
26889 for {
26890 t := v.Type
26891 ptr := v_0
26892 mem := v_1
26893 if !(is32BitFloat(t)) {
26894 break
26895 }
26896 v.reset(OpAMD64MOVSSload)
26897 v.AddArg2(ptr, mem)
26898 return true
26899 }
26900
26901
26902
26903 for {
26904 t := v.Type
26905 ptr := v_0
26906 mem := v_1
26907 if !(is64BitFloat(t)) {
26908 break
26909 }
26910 v.reset(OpAMD64MOVSDload)
26911 v.AddArg2(ptr, mem)
26912 return true
26913 }
26914 return false
26915 }
26916 func rewriteValueAMD64_OpLocalAddr(v *Value) bool {
26917 v_1 := v.Args[1]
26918 v_0 := v.Args[0]
26919 b := v.Block
26920 typ := &b.Func.Config.Types
26921
26922
26923
26924 for {
26925 t := v.Type
26926 sym := auxToSym(v.Aux)
26927 base := v_0
26928 mem := v_1
26929 if !(t.Elem().HasPointers()) {
26930 break
26931 }
26932 v.reset(OpAMD64LEAQ)
26933 v.Aux = symToAux(sym)
26934 v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
26935 v0.AddArg2(base, mem)
26936 v.AddArg(v0)
26937 return true
26938 }
26939
26940
26941
26942 for {
26943 t := v.Type
26944 sym := auxToSym(v.Aux)
26945 base := v_0
26946 if !(!t.Elem().HasPointers()) {
26947 break
26948 }
26949 v.reset(OpAMD64LEAQ)
26950 v.Aux = symToAux(sym)
26951 v.AddArg(base)
26952 return true
26953 }
26954 return false
26955 }
26956 func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
26957 v_1 := v.Args[1]
26958 v_0 := v.Args[0]
26959 b := v.Block
26960
26961
26962
26963 for {
26964 t := v.Type
26965 x := v_0
26966 y := v_1
26967 if !(!shiftIsBounded(v)) {
26968 break
26969 }
26970 v.reset(OpAMD64ANDL)
26971 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26972 v0.AddArg2(x, y)
26973 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26974 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26975 v2.AuxInt = int16ToAuxInt(32)
26976 v2.AddArg(y)
26977 v1.AddArg(v2)
26978 v.AddArg2(v0, v1)
26979 return true
26980 }
26981
26982
26983
26984 for {
26985 x := v_0
26986 y := v_1
26987 if !(shiftIsBounded(v)) {
26988 break
26989 }
26990 v.reset(OpAMD64SHLL)
26991 v.AddArg2(x, y)
26992 return true
26993 }
26994 return false
26995 }
26996 func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
26997 v_1 := v.Args[1]
26998 v_0 := v.Args[0]
26999 b := v.Block
27000
27001
27002
27003 for {
27004 t := v.Type
27005 x := v_0
27006 y := v_1
27007 if !(!shiftIsBounded(v)) {
27008 break
27009 }
27010 v.reset(OpAMD64ANDL)
27011 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
27012 v0.AddArg2(x, y)
27013 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27014 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
27015 v2.AuxInt = int32ToAuxInt(32)
27016 v2.AddArg(y)
27017 v1.AddArg(v2)
27018 v.AddArg2(v0, v1)
27019 return true
27020 }
27021
27022
27023
27024 for {
27025 x := v_0
27026 y := v_1
27027 if !(shiftIsBounded(v)) {
27028 break
27029 }
27030 v.reset(OpAMD64SHLL)
27031 v.AddArg2(x, y)
27032 return true
27033 }
27034 return false
27035 }
27036 func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
27037 v_1 := v.Args[1]
27038 v_0 := v.Args[0]
27039 b := v.Block
27040
27041
27042
27043 for {
27044 t := v.Type
27045 x := v_0
27046 y := v_1
27047 if !(!shiftIsBounded(v)) {
27048 break
27049 }
27050 v.reset(OpAMD64ANDL)
27051 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
27052 v0.AddArg2(x, y)
27053 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27054 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
27055 v2.AuxInt = int32ToAuxInt(32)
27056 v2.AddArg(y)
27057 v1.AddArg(v2)
27058 v.AddArg2(v0, v1)
27059 return true
27060 }
27061
27062
27063
27064 for {
27065 x := v_0
27066 y := v_1
27067 if !(shiftIsBounded(v)) {
27068 break
27069 }
27070 v.reset(OpAMD64SHLL)
27071 v.AddArg2(x, y)
27072 return true
27073 }
27074 return false
27075 }
27076 func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
27077 v_1 := v.Args[1]
27078 v_0 := v.Args[0]
27079 b := v.Block
27080
27081
27082
27083 for {
27084 t := v.Type
27085 x := v_0
27086 y := v_1
27087 if !(!shiftIsBounded(v)) {
27088 break
27089 }
27090 v.reset(OpAMD64ANDL)
27091 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
27092 v0.AddArg2(x, y)
27093 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27094 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
27095 v2.AuxInt = int8ToAuxInt(32)
27096 v2.AddArg(y)
27097 v1.AddArg(v2)
27098 v.AddArg2(v0, v1)
27099 return true
27100 }
27101
27102
27103
27104 for {
27105 x := v_0
27106 y := v_1
27107 if !(shiftIsBounded(v)) {
27108 break
27109 }
27110 v.reset(OpAMD64SHLL)
27111 v.AddArg2(x, y)
27112 return true
27113 }
27114 return false
27115 }
27116 func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
27117 v_1 := v.Args[1]
27118 v_0 := v.Args[0]
27119 b := v.Block
27120
27121
27122
27123 for {
27124 t := v.Type
27125 x := v_0
27126 y := v_1
27127 if !(!shiftIsBounded(v)) {
27128 break
27129 }
27130 v.reset(OpAMD64ANDL)
27131 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
27132 v0.AddArg2(x, y)
27133 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27134 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
27135 v2.AuxInt = int16ToAuxInt(32)
27136 v2.AddArg(y)
27137 v1.AddArg(v2)
27138 v.AddArg2(v0, v1)
27139 return true
27140 }
27141
27142
27143
27144 for {
27145 x := v_0
27146 y := v_1
27147 if !(shiftIsBounded(v)) {
27148 break
27149 }
27150 v.reset(OpAMD64SHLL)
27151 v.AddArg2(x, y)
27152 return true
27153 }
27154 return false
27155 }
27156 func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
27157 v_1 := v.Args[1]
27158 v_0 := v.Args[0]
27159 b := v.Block
27160
27161
27162
27163 for {
27164 t := v.Type
27165 x := v_0
27166 y := v_1
27167 if !(!shiftIsBounded(v)) {
27168 break
27169 }
27170 v.reset(OpAMD64ANDL)
27171 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
27172 v0.AddArg2(x, y)
27173 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27174 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
27175 v2.AuxInt = int32ToAuxInt(32)
27176 v2.AddArg(y)
27177 v1.AddArg(v2)
27178 v.AddArg2(v0, v1)
27179 return true
27180 }
27181
27182
27183
27184 for {
27185 x := v_0
27186 y := v_1
27187 if !(shiftIsBounded(v)) {
27188 break
27189 }
27190 v.reset(OpAMD64SHLL)
27191 v.AddArg2(x, y)
27192 return true
27193 }
27194 return false
27195 }
27196 func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
27197 v_1 := v.Args[1]
27198 v_0 := v.Args[0]
27199 b := v.Block
27200
27201
27202
27203 for {
27204 t := v.Type
27205 x := v_0
27206 y := v_1
27207 if !(!shiftIsBounded(v)) {
27208 break
27209 }
27210 v.reset(OpAMD64ANDL)
27211 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
27212 v0.AddArg2(x, y)
27213 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27214 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
27215 v2.AuxInt = int32ToAuxInt(32)
27216 v2.AddArg(y)
27217 v1.AddArg(v2)
27218 v.AddArg2(v0, v1)
27219 return true
27220 }
27221
27222
27223
27224 for {
27225 x := v_0
27226 y := v_1
27227 if !(shiftIsBounded(v)) {
27228 break
27229 }
27230 v.reset(OpAMD64SHLL)
27231 v.AddArg2(x, y)
27232 return true
27233 }
27234 return false
27235 }
27236 func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
27237 v_1 := v.Args[1]
27238 v_0 := v.Args[0]
27239 b := v.Block
27240
27241
27242
27243 for {
27244 t := v.Type
27245 x := v_0
27246 y := v_1
27247 if !(!shiftIsBounded(v)) {
27248 break
27249 }
27250 v.reset(OpAMD64ANDL)
27251 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
27252 v0.AddArg2(x, y)
27253 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27254 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
27255 v2.AuxInt = int8ToAuxInt(32)
27256 v2.AddArg(y)
27257 v1.AddArg(v2)
27258 v.AddArg2(v0, v1)
27259 return true
27260 }
27261
27262
27263
27264 for {
27265 x := v_0
27266 y := v_1
27267 if !(shiftIsBounded(v)) {
27268 break
27269 }
27270 v.reset(OpAMD64SHLL)
27271 v.AddArg2(x, y)
27272 return true
27273 }
27274 return false
27275 }
27276 func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
27277 v_1 := v.Args[1]
27278 v_0 := v.Args[0]
27279 b := v.Block
27280
27281
27282
27283 for {
27284 t := v.Type
27285 x := v_0
27286 y := v_1
27287 if !(!shiftIsBounded(v)) {
27288 break
27289 }
27290 v.reset(OpAMD64ANDQ)
27291 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
27292 v0.AddArg2(x, y)
27293 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
27294 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
27295 v2.AuxInt = int16ToAuxInt(64)
27296 v2.AddArg(y)
27297 v1.AddArg(v2)
27298 v.AddArg2(v0, v1)
27299 return true
27300 }
27301
27302
27303
27304 for {
27305 x := v_0
27306 y := v_1
27307 if !(shiftIsBounded(v)) {
27308 break
27309 }
27310 v.reset(OpAMD64SHLQ)
27311 v.AddArg2(x, y)
27312 return true
27313 }
27314 return false
27315 }
27316 func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
27317 v_1 := v.Args[1]
27318 v_0 := v.Args[0]
27319 b := v.Block
27320
27321
27322
27323 for {
27324 t := v.Type
27325 x := v_0
27326 y := v_1
27327 if !(!shiftIsBounded(v)) {
27328 break
27329 }
27330 v.reset(OpAMD64ANDQ)
27331 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
27332 v0.AddArg2(x, y)
27333 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
27334 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
27335 v2.AuxInt = int32ToAuxInt(64)
27336 v2.AddArg(y)
27337 v1.AddArg(v2)
27338 v.AddArg2(v0, v1)
27339 return true
27340 }
27341
27342
27343
27344 for {
27345 x := v_0
27346 y := v_1
27347 if !(shiftIsBounded(v)) {
27348 break
27349 }
27350 v.reset(OpAMD64SHLQ)
27351 v.AddArg2(x, y)
27352 return true
27353 }
27354 return false
27355 }
27356 func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
27357 v_1 := v.Args[1]
27358 v_0 := v.Args[0]
27359 b := v.Block
27360
27361
27362
27363 for {
27364 t := v.Type
27365 x := v_0
27366 y := v_1
27367 if !(!shiftIsBounded(v)) {
27368 break
27369 }
27370 v.reset(OpAMD64ANDQ)
27371 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
27372 v0.AddArg2(x, y)
27373 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
27374 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
27375 v2.AuxInt = int32ToAuxInt(64)
27376 v2.AddArg(y)
27377 v1.AddArg(v2)
27378 v.AddArg2(v0, v1)
27379 return true
27380 }
27381
27382
27383
27384 for {
27385 x := v_0
27386 y := v_1
27387 if !(shiftIsBounded(v)) {
27388 break
27389 }
27390 v.reset(OpAMD64SHLQ)
27391 v.AddArg2(x, y)
27392 return true
27393 }
27394 return false
27395 }
27396 func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
27397 v_1 := v.Args[1]
27398 v_0 := v.Args[0]
27399 b := v.Block
27400
27401
27402
27403 for {
27404 t := v.Type
27405 x := v_0
27406 y := v_1
27407 if !(!shiftIsBounded(v)) {
27408 break
27409 }
27410 v.reset(OpAMD64ANDQ)
27411 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
27412 v0.AddArg2(x, y)
27413 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
27414 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
27415 v2.AuxInt = int8ToAuxInt(64)
27416 v2.AddArg(y)
27417 v1.AddArg(v2)
27418 v.AddArg2(v0, v1)
27419 return true
27420 }
27421
27422
27423
27424 for {
27425 x := v_0
27426 y := v_1
27427 if !(shiftIsBounded(v)) {
27428 break
27429 }
27430 v.reset(OpAMD64SHLQ)
27431 v.AddArg2(x, y)
27432 return true
27433 }
27434 return false
27435 }
27436 func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
27437 v_1 := v.Args[1]
27438 v_0 := v.Args[0]
27439 b := v.Block
27440
27441
27442
27443 for {
27444 t := v.Type
27445 x := v_0
27446 y := v_1
27447 if !(!shiftIsBounded(v)) {
27448 break
27449 }
27450 v.reset(OpAMD64ANDL)
27451 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
27452 v0.AddArg2(x, y)
27453 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27454 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
27455 v2.AuxInt = int16ToAuxInt(32)
27456 v2.AddArg(y)
27457 v1.AddArg(v2)
27458 v.AddArg2(v0, v1)
27459 return true
27460 }
27461
27462
27463
27464 for {
27465 x := v_0
27466 y := v_1
27467 if !(shiftIsBounded(v)) {
27468 break
27469 }
27470 v.reset(OpAMD64SHLL)
27471 v.AddArg2(x, y)
27472 return true
27473 }
27474 return false
27475 }
27476 func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
27477 v_1 := v.Args[1]
27478 v_0 := v.Args[0]
27479 b := v.Block
27480
27481
27482
27483 for {
27484 t := v.Type
27485 x := v_0
27486 y := v_1
27487 if !(!shiftIsBounded(v)) {
27488 break
27489 }
27490 v.reset(OpAMD64ANDL)
27491 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
27492 v0.AddArg2(x, y)
27493 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27494 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
27495 v2.AuxInt = int32ToAuxInt(32)
27496 v2.AddArg(y)
27497 v1.AddArg(v2)
27498 v.AddArg2(v0, v1)
27499 return true
27500 }
27501
27502
27503
27504 for {
27505 x := v_0
27506 y := v_1
27507 if !(shiftIsBounded(v)) {
27508 break
27509 }
27510 v.reset(OpAMD64SHLL)
27511 v.AddArg2(x, y)
27512 return true
27513 }
27514 return false
27515 }
27516 func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
27517 v_1 := v.Args[1]
27518 v_0 := v.Args[0]
27519 b := v.Block
27520
27521
27522
27523 for {
27524 t := v.Type
27525 x := v_0
27526 y := v_1
27527 if !(!shiftIsBounded(v)) {
27528 break
27529 }
27530 v.reset(OpAMD64ANDL)
27531 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
27532 v0.AddArg2(x, y)
27533 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27534 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
27535 v2.AuxInt = int32ToAuxInt(32)
27536 v2.AddArg(y)
27537 v1.AddArg(v2)
27538 v.AddArg2(v0, v1)
27539 return true
27540 }
27541
27542
27543
27544 for {
27545 x := v_0
27546 y := v_1
27547 if !(shiftIsBounded(v)) {
27548 break
27549 }
27550 v.reset(OpAMD64SHLL)
27551 v.AddArg2(x, y)
27552 return true
27553 }
27554 return false
27555 }
27556 func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
27557 v_1 := v.Args[1]
27558 v_0 := v.Args[0]
27559 b := v.Block
27560
27561
27562
27563 for {
27564 t := v.Type
27565 x := v_0
27566 y := v_1
27567 if !(!shiftIsBounded(v)) {
27568 break
27569 }
27570 v.reset(OpAMD64ANDL)
27571 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
27572 v0.AddArg2(x, y)
27573 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27574 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
27575 v2.AuxInt = int8ToAuxInt(32)
27576 v2.AddArg(y)
27577 v1.AddArg(v2)
27578 v.AddArg2(v0, v1)
27579 return true
27580 }
27581
27582
27583
27584 for {
27585 x := v_0
27586 y := v_1
27587 if !(shiftIsBounded(v)) {
27588 break
27589 }
27590 v.reset(OpAMD64SHLL)
27591 v.AddArg2(x, y)
27592 return true
27593 }
27594 return false
27595 }
27596 func rewriteValueAMD64_OpMax32F(v *Value) bool {
27597 v_1 := v.Args[1]
27598 v_0 := v.Args[0]
27599 b := v.Block
27600
27601
27602 for {
27603 t := v.Type
27604 x := v_0
27605 y := v_1
27606 v.reset(OpNeg32F)
27607 v.Type = t
27608 v0 := b.NewValue0(v.Pos, OpMin32F, t)
27609 v1 := b.NewValue0(v.Pos, OpNeg32F, t)
27610 v1.AddArg(x)
27611 v2 := b.NewValue0(v.Pos, OpNeg32F, t)
27612 v2.AddArg(y)
27613 v0.AddArg2(v1, v2)
27614 v.AddArg(v0)
27615 return true
27616 }
27617 }
27618 func rewriteValueAMD64_OpMax64F(v *Value) bool {
27619 v_1 := v.Args[1]
27620 v_0 := v.Args[0]
27621 b := v.Block
27622
27623
27624 for {
27625 t := v.Type
27626 x := v_0
27627 y := v_1
27628 v.reset(OpNeg64F)
27629 v.Type = t
27630 v0 := b.NewValue0(v.Pos, OpMin64F, t)
27631 v1 := b.NewValue0(v.Pos, OpNeg64F, t)
27632 v1.AddArg(x)
27633 v2 := b.NewValue0(v.Pos, OpNeg64F, t)
27634 v2.AddArg(y)
27635 v0.AddArg2(v1, v2)
27636 v.AddArg(v0)
27637 return true
27638 }
27639 }
27640 func rewriteValueAMD64_OpMin32F(v *Value) bool {
27641 v_1 := v.Args[1]
27642 v_0 := v.Args[0]
27643 b := v.Block
27644
27645
27646 for {
27647 t := v.Type
27648 x := v_0
27649 y := v_1
27650 v.reset(OpAMD64POR)
27651 v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
27652 v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
27653 v1.AddArg2(x, y)
27654 v0.AddArg2(v1, x)
27655 v.AddArg2(v0, v1)
27656 return true
27657 }
27658 }
27659 func rewriteValueAMD64_OpMin64F(v *Value) bool {
27660 v_1 := v.Args[1]
27661 v_0 := v.Args[0]
27662 b := v.Block
27663
27664
27665 for {
27666 t := v.Type
27667 x := v_0
27668 y := v_1
27669 v.reset(OpAMD64POR)
27670 v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
27671 v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
27672 v1.AddArg2(x, y)
27673 v0.AddArg2(v1, x)
27674 v.AddArg2(v0, v1)
27675 return true
27676 }
27677 }
27678 func rewriteValueAMD64_OpMod16(v *Value) bool {
27679 v_1 := v.Args[1]
27680 v_0 := v.Args[0]
27681 b := v.Block
27682 typ := &b.Func.Config.Types
27683
27684
27685 for {
27686 a := auxIntToBool(v.AuxInt)
27687 x := v_0
27688 y := v_1
27689 v.reset(OpSelect1)
27690 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
27691 v0.AuxInt = boolToAuxInt(a)
27692 v0.AddArg2(x, y)
27693 v.AddArg(v0)
27694 return true
27695 }
27696 }
27697 func rewriteValueAMD64_OpMod16u(v *Value) bool {
27698 v_1 := v.Args[1]
27699 v_0 := v.Args[0]
27700 b := v.Block
27701 typ := &b.Func.Config.Types
27702
27703
27704 for {
27705 x := v_0
27706 y := v_1
27707 v.reset(OpSelect1)
27708 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
27709 v0.AddArg2(x, y)
27710 v.AddArg(v0)
27711 return true
27712 }
27713 }
27714 func rewriteValueAMD64_OpMod32(v *Value) bool {
27715 v_1 := v.Args[1]
27716 v_0 := v.Args[0]
27717 b := v.Block
27718 typ := &b.Func.Config.Types
27719
27720
27721 for {
27722 a := auxIntToBool(v.AuxInt)
27723 x := v_0
27724 y := v_1
27725 v.reset(OpSelect1)
27726 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
27727 v0.AuxInt = boolToAuxInt(a)
27728 v0.AddArg2(x, y)
27729 v.AddArg(v0)
27730 return true
27731 }
27732 }
27733 func rewriteValueAMD64_OpMod32u(v *Value) bool {
27734 v_1 := v.Args[1]
27735 v_0 := v.Args[0]
27736 b := v.Block
27737 typ := &b.Func.Config.Types
27738
27739
27740 for {
27741 x := v_0
27742 y := v_1
27743 v.reset(OpSelect1)
27744 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
27745 v0.AddArg2(x, y)
27746 v.AddArg(v0)
27747 return true
27748 }
27749 }
27750 func rewriteValueAMD64_OpMod64(v *Value) bool {
27751 v_1 := v.Args[1]
27752 v_0 := v.Args[0]
27753 b := v.Block
27754 typ := &b.Func.Config.Types
27755
27756
27757 for {
27758 a := auxIntToBool(v.AuxInt)
27759 x := v_0
27760 y := v_1
27761 v.reset(OpSelect1)
27762 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
27763 v0.AuxInt = boolToAuxInt(a)
27764 v0.AddArg2(x, y)
27765 v.AddArg(v0)
27766 return true
27767 }
27768 }
27769 func rewriteValueAMD64_OpMod64u(v *Value) bool {
27770 v_1 := v.Args[1]
27771 v_0 := v.Args[0]
27772 b := v.Block
27773 typ := &b.Func.Config.Types
27774
27775
27776 for {
27777 x := v_0
27778 y := v_1
27779 v.reset(OpSelect1)
27780 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
27781 v0.AddArg2(x, y)
27782 v.AddArg(v0)
27783 return true
27784 }
27785 }
27786 func rewriteValueAMD64_OpMod8(v *Value) bool {
27787 v_1 := v.Args[1]
27788 v_0 := v.Args[0]
27789 b := v.Block
27790 typ := &b.Func.Config.Types
27791
27792
27793 for {
27794 x := v_0
27795 y := v_1
27796 v.reset(OpSelect1)
27797 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
27798 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
27799 v1.AddArg(x)
27800 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
27801 v2.AddArg(y)
27802 v0.AddArg2(v1, v2)
27803 v.AddArg(v0)
27804 return true
27805 }
27806 }
27807 func rewriteValueAMD64_OpMod8u(v *Value) bool {
27808 v_1 := v.Args[1]
27809 v_0 := v.Args[0]
27810 b := v.Block
27811 typ := &b.Func.Config.Types
27812
27813
27814 for {
27815 x := v_0
27816 y := v_1
27817 v.reset(OpSelect1)
27818 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
27819 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
27820 v1.AddArg(x)
27821 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
27822 v2.AddArg(y)
27823 v0.AddArg2(v1, v2)
27824 v.AddArg(v0)
27825 return true
27826 }
27827 }
27828 func rewriteValueAMD64_OpMove(v *Value) bool {
27829 v_2 := v.Args[2]
27830 v_1 := v.Args[1]
27831 v_0 := v.Args[0]
27832 b := v.Block
27833 typ := &b.Func.Config.Types
27834
27835
27836 for {
27837 if auxIntToInt64(v.AuxInt) != 0 {
27838 break
27839 }
27840 mem := v_2
27841 v.copyOf(mem)
27842 return true
27843 }
27844
27845
27846 for {
27847 if auxIntToInt64(v.AuxInt) != 1 {
27848 break
27849 }
27850 dst := v_0
27851 src := v_1
27852 mem := v_2
27853 v.reset(OpAMD64MOVBstore)
27854 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27855 v0.AddArg2(src, mem)
27856 v.AddArg3(dst, v0, mem)
27857 return true
27858 }
27859
27860
27861 for {
27862 if auxIntToInt64(v.AuxInt) != 2 {
27863 break
27864 }
27865 dst := v_0
27866 src := v_1
27867 mem := v_2
27868 v.reset(OpAMD64MOVWstore)
27869 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27870 v0.AddArg2(src, mem)
27871 v.AddArg3(dst, v0, mem)
27872 return true
27873 }
27874
27875
27876 for {
27877 if auxIntToInt64(v.AuxInt) != 4 {
27878 break
27879 }
27880 dst := v_0
27881 src := v_1
27882 mem := v_2
27883 v.reset(OpAMD64MOVLstore)
27884 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27885 v0.AddArg2(src, mem)
27886 v.AddArg3(dst, v0, mem)
27887 return true
27888 }
27889
27890
27891 for {
27892 if auxIntToInt64(v.AuxInt) != 8 {
27893 break
27894 }
27895 dst := v_0
27896 src := v_1
27897 mem := v_2
27898 v.reset(OpAMD64MOVQstore)
27899 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27900 v0.AddArg2(src, mem)
27901 v.AddArg3(dst, v0, mem)
27902 return true
27903 }
27904
27905
27906 for {
27907 if auxIntToInt64(v.AuxInt) != 16 {
27908 break
27909 }
27910 dst := v_0
27911 src := v_1
27912 mem := v_2
27913 v.reset(OpAMD64MOVOstore)
27914 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
27915 v0.AddArg2(src, mem)
27916 v.AddArg3(dst, v0, mem)
27917 return true
27918 }
27919
27920
27921 for {
27922 if auxIntToInt64(v.AuxInt) != 32 {
27923 break
27924 }
27925 dst := v_0
27926 src := v_1
27927 mem := v_2
27928 v.reset(OpMove)
27929 v.AuxInt = int64ToAuxInt(16)
27930 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27931 v0.AuxInt = int64ToAuxInt(16)
27932 v0.AddArg(dst)
27933 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27934 v1.AuxInt = int64ToAuxInt(16)
27935 v1.AddArg(src)
27936 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27937 v2.AuxInt = int64ToAuxInt(16)
27938 v2.AddArg3(dst, src, mem)
27939 v.AddArg3(v0, v1, v2)
27940 return true
27941 }
27942
27943
27944 for {
27945 if auxIntToInt64(v.AuxInt) != 48 {
27946 break
27947 }
27948 dst := v_0
27949 src := v_1
27950 mem := v_2
27951 v.reset(OpMove)
27952 v.AuxInt = int64ToAuxInt(32)
27953 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27954 v0.AuxInt = int64ToAuxInt(16)
27955 v0.AddArg(dst)
27956 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27957 v1.AuxInt = int64ToAuxInt(16)
27958 v1.AddArg(src)
27959 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27960 v2.AuxInt = int64ToAuxInt(16)
27961 v2.AddArg3(dst, src, mem)
27962 v.AddArg3(v0, v1, v2)
27963 return true
27964 }
27965
27966
27967 for {
27968 if auxIntToInt64(v.AuxInt) != 64 {
27969 break
27970 }
27971 dst := v_0
27972 src := v_1
27973 mem := v_2
27974 v.reset(OpMove)
27975 v.AuxInt = int64ToAuxInt(32)
27976 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27977 v0.AuxInt = int64ToAuxInt(32)
27978 v0.AddArg(dst)
27979 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27980 v1.AuxInt = int64ToAuxInt(32)
27981 v1.AddArg(src)
27982 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27983 v2.AuxInt = int64ToAuxInt(32)
27984 v2.AddArg3(dst, src, mem)
27985 v.AddArg3(v0, v1, v2)
27986 return true
27987 }
27988
27989
27990 for {
27991 if auxIntToInt64(v.AuxInt) != 3 {
27992 break
27993 }
27994 dst := v_0
27995 src := v_1
27996 mem := v_2
27997 v.reset(OpAMD64MOVBstore)
27998 v.AuxInt = int32ToAuxInt(2)
27999 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
28000 v0.AuxInt = int32ToAuxInt(2)
28001 v0.AddArg2(src, mem)
28002 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
28003 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
28004 v2.AddArg2(src, mem)
28005 v1.AddArg3(dst, v2, mem)
28006 v.AddArg3(dst, v0, v1)
28007 return true
28008 }
28009
28010
28011 for {
28012 if auxIntToInt64(v.AuxInt) != 5 {
28013 break
28014 }
28015 dst := v_0
28016 src := v_1
28017 mem := v_2
28018 v.reset(OpAMD64MOVBstore)
28019 v.AuxInt = int32ToAuxInt(4)
28020 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
28021 v0.AuxInt = int32ToAuxInt(4)
28022 v0.AddArg2(src, mem)
28023 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
28024 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
28025 v2.AddArg2(src, mem)
28026 v1.AddArg3(dst, v2, mem)
28027 v.AddArg3(dst, v0, v1)
28028 return true
28029 }
28030
28031
28032 for {
28033 if auxIntToInt64(v.AuxInt) != 6 {
28034 break
28035 }
28036 dst := v_0
28037 src := v_1
28038 mem := v_2
28039 v.reset(OpAMD64MOVWstore)
28040 v.AuxInt = int32ToAuxInt(4)
28041 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
28042 v0.AuxInt = int32ToAuxInt(4)
28043 v0.AddArg2(src, mem)
28044 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
28045 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
28046 v2.AddArg2(src, mem)
28047 v1.AddArg3(dst, v2, mem)
28048 v.AddArg3(dst, v0, v1)
28049 return true
28050 }
28051
28052
28053 for {
28054 if auxIntToInt64(v.AuxInt) != 7 {
28055 break
28056 }
28057 dst := v_0
28058 src := v_1
28059 mem := v_2
28060 v.reset(OpAMD64MOVLstore)
28061 v.AuxInt = int32ToAuxInt(3)
28062 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
28063 v0.AuxInt = int32ToAuxInt(3)
28064 v0.AddArg2(src, mem)
28065 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
28066 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
28067 v2.AddArg2(src, mem)
28068 v1.AddArg3(dst, v2, mem)
28069 v.AddArg3(dst, v0, v1)
28070 return true
28071 }
28072
28073
28074 for {
28075 if auxIntToInt64(v.AuxInt) != 9 {
28076 break
28077 }
28078 dst := v_0
28079 src := v_1
28080 mem := v_2
28081 v.reset(OpAMD64MOVBstore)
28082 v.AuxInt = int32ToAuxInt(8)
28083 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
28084 v0.AuxInt = int32ToAuxInt(8)
28085 v0.AddArg2(src, mem)
28086 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
28087 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
28088 v2.AddArg2(src, mem)
28089 v1.AddArg3(dst, v2, mem)
28090 v.AddArg3(dst, v0, v1)
28091 return true
28092 }
28093
28094
28095 for {
28096 if auxIntToInt64(v.AuxInt) != 10 {
28097 break
28098 }
28099 dst := v_0
28100 src := v_1
28101 mem := v_2
28102 v.reset(OpAMD64MOVWstore)
28103 v.AuxInt = int32ToAuxInt(8)
28104 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
28105 v0.AuxInt = int32ToAuxInt(8)
28106 v0.AddArg2(src, mem)
28107 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
28108 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
28109 v2.AddArg2(src, mem)
28110 v1.AddArg3(dst, v2, mem)
28111 v.AddArg3(dst, v0, v1)
28112 return true
28113 }
28114
28115
28116 for {
28117 if auxIntToInt64(v.AuxInt) != 11 {
28118 break
28119 }
28120 dst := v_0
28121 src := v_1
28122 mem := v_2
28123 v.reset(OpAMD64MOVLstore)
28124 v.AuxInt = int32ToAuxInt(7)
28125 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
28126 v0.AuxInt = int32ToAuxInt(7)
28127 v0.AddArg2(src, mem)
28128 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
28129 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
28130 v2.AddArg2(src, mem)
28131 v1.AddArg3(dst, v2, mem)
28132 v.AddArg3(dst, v0, v1)
28133 return true
28134 }
28135
28136
28137 for {
28138 if auxIntToInt64(v.AuxInt) != 12 {
28139 break
28140 }
28141 dst := v_0
28142 src := v_1
28143 mem := v_2
28144 v.reset(OpAMD64MOVLstore)
28145 v.AuxInt = int32ToAuxInt(8)
28146 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
28147 v0.AuxInt = int32ToAuxInt(8)
28148 v0.AddArg2(src, mem)
28149 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
28150 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
28151 v2.AddArg2(src, mem)
28152 v1.AddArg3(dst, v2, mem)
28153 v.AddArg3(dst, v0, v1)
28154 return true
28155 }
28156
28157
28158
28159 for {
28160 s := auxIntToInt64(v.AuxInt)
28161 dst := v_0
28162 src := v_1
28163 mem := v_2
28164 if !(s >= 13 && s <= 15) {
28165 break
28166 }
28167 v.reset(OpAMD64MOVQstore)
28168 v.AuxInt = int32ToAuxInt(int32(s - 8))
28169 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
28170 v0.AuxInt = int32ToAuxInt(int32(s - 8))
28171 v0.AddArg2(src, mem)
28172 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
28173 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
28174 v2.AddArg2(src, mem)
28175 v1.AddArg3(dst, v2, mem)
28176 v.AddArg3(dst, v0, v1)
28177 return true
28178 }
28179
28180
28181
28182 for {
28183 s := auxIntToInt64(v.AuxInt)
28184 dst := v_0
28185 src := v_1
28186 mem := v_2
28187 if !(s > 16 && s%16 != 0 && s%16 <= 8) {
28188 break
28189 }
28190 v.reset(OpMove)
28191 v.AuxInt = int64ToAuxInt(s - s%16)
28192 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
28193 v0.AuxInt = int64ToAuxInt(s % 16)
28194 v0.AddArg(dst)
28195 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
28196 v1.AuxInt = int64ToAuxInt(s % 16)
28197 v1.AddArg(src)
28198 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
28199 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
28200 v3.AddArg2(src, mem)
28201 v2.AddArg3(dst, v3, mem)
28202 v.AddArg3(v0, v1, v2)
28203 return true
28204 }
28205
28206
28207
28208 for {
28209 s := auxIntToInt64(v.AuxInt)
28210 dst := v_0
28211 src := v_1
28212 mem := v_2
28213 if !(s > 16 && s%16 != 0 && s%16 > 8) {
28214 break
28215 }
28216 v.reset(OpMove)
28217 v.AuxInt = int64ToAuxInt(s - s%16)
28218 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
28219 v0.AuxInt = int64ToAuxInt(s % 16)
28220 v0.AddArg(dst)
28221 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
28222 v1.AuxInt = int64ToAuxInt(s % 16)
28223 v1.AddArg(src)
28224 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
28225 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
28226 v3.AddArg2(src, mem)
28227 v2.AddArg3(dst, v3, mem)
28228 v.AddArg3(v0, v1, v2)
28229 return true
28230 }
28231
28232
28233
28234 for {
28235 s := auxIntToInt64(v.AuxInt)
28236 dst := v_0
28237 src := v_1
28238 mem := v_2
28239 if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) {
28240 break
28241 }
28242 v.reset(OpAMD64DUFFCOPY)
28243 v.AuxInt = int64ToAuxInt(s)
28244 v.AddArg3(dst, src, mem)
28245 return true
28246 }
28247
28248
28249
28250 for {
28251 s := auxIntToInt64(v.AuxInt)
28252 dst := v_0
28253 src := v_1
28254 mem := v_2
28255 if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) {
28256 break
28257 }
28258 v.reset(OpAMD64REPMOVSQ)
28259 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
28260 v0.AuxInt = int64ToAuxInt(s / 8)
28261 v.AddArg4(dst, src, v0, mem)
28262 return true
28263 }
28264 return false
28265 }
28266 func rewriteValueAMD64_OpNeg32F(v *Value) bool {
28267 v_0 := v.Args[0]
28268 b := v.Block
28269 typ := &b.Func.Config.Types
28270
28271
28272 for {
28273 x := v_0
28274 v.reset(OpAMD64PXOR)
28275 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
28276 v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
28277 v.AddArg2(x, v0)
28278 return true
28279 }
28280 }
28281 func rewriteValueAMD64_OpNeg64F(v *Value) bool {
28282 v_0 := v.Args[0]
28283 b := v.Block
28284 typ := &b.Func.Config.Types
28285
28286
28287 for {
28288 x := v_0
28289 v.reset(OpAMD64PXOR)
28290 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
28291 v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
28292 v.AddArg2(x, v0)
28293 return true
28294 }
28295 }
28296 func rewriteValueAMD64_OpNeq16(v *Value) bool {
28297 v_1 := v.Args[1]
28298 v_0 := v.Args[0]
28299 b := v.Block
28300
28301
28302 for {
28303 x := v_0
28304 y := v_1
28305 v.reset(OpAMD64SETNE)
28306 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
28307 v0.AddArg2(x, y)
28308 v.AddArg(v0)
28309 return true
28310 }
28311 }
28312 func rewriteValueAMD64_OpNeq32(v *Value) bool {
28313 v_1 := v.Args[1]
28314 v_0 := v.Args[0]
28315 b := v.Block
28316
28317
28318 for {
28319 x := v_0
28320 y := v_1
28321 v.reset(OpAMD64SETNE)
28322 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
28323 v0.AddArg2(x, y)
28324 v.AddArg(v0)
28325 return true
28326 }
28327 }
28328 func rewriteValueAMD64_OpNeq32F(v *Value) bool {
28329 v_1 := v.Args[1]
28330 v_0 := v.Args[0]
28331 b := v.Block
28332
28333
28334 for {
28335 x := v_0
28336 y := v_1
28337 v.reset(OpAMD64SETNEF)
28338 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
28339 v0.AddArg2(x, y)
28340 v.AddArg(v0)
28341 return true
28342 }
28343 }
28344 func rewriteValueAMD64_OpNeq64(v *Value) bool {
28345 v_1 := v.Args[1]
28346 v_0 := v.Args[0]
28347 b := v.Block
28348
28349
28350 for {
28351 x := v_0
28352 y := v_1
28353 v.reset(OpAMD64SETNE)
28354 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
28355 v0.AddArg2(x, y)
28356 v.AddArg(v0)
28357 return true
28358 }
28359 }
28360 func rewriteValueAMD64_OpNeq64F(v *Value) bool {
28361 v_1 := v.Args[1]
28362 v_0 := v.Args[0]
28363 b := v.Block
28364
28365
28366 for {
28367 x := v_0
28368 y := v_1
28369 v.reset(OpAMD64SETNEF)
28370 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
28371 v0.AddArg2(x, y)
28372 v.AddArg(v0)
28373 return true
28374 }
28375 }
28376 func rewriteValueAMD64_OpNeq8(v *Value) bool {
28377 v_1 := v.Args[1]
28378 v_0 := v.Args[0]
28379 b := v.Block
28380
28381
28382 for {
28383 x := v_0
28384 y := v_1
28385 v.reset(OpAMD64SETNE)
28386 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
28387 v0.AddArg2(x, y)
28388 v.AddArg(v0)
28389 return true
28390 }
28391 }
28392 func rewriteValueAMD64_OpNeqB(v *Value) bool {
28393 v_1 := v.Args[1]
28394 v_0 := v.Args[0]
28395 b := v.Block
28396
28397
28398 for {
28399 x := v_0
28400 y := v_1
28401 v.reset(OpAMD64SETNE)
28402 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
28403 v0.AddArg2(x, y)
28404 v.AddArg(v0)
28405 return true
28406 }
28407 }
28408 func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
28409 v_1 := v.Args[1]
28410 v_0 := v.Args[0]
28411 b := v.Block
28412
28413
28414 for {
28415 x := v_0
28416 y := v_1
28417 v.reset(OpAMD64SETNE)
28418 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
28419 v0.AddArg2(x, y)
28420 v.AddArg(v0)
28421 return true
28422 }
28423 }
28424 func rewriteValueAMD64_OpNot(v *Value) bool {
28425 v_0 := v.Args[0]
28426
28427
28428 for {
28429 x := v_0
28430 v.reset(OpAMD64XORLconst)
28431 v.AuxInt = int32ToAuxInt(1)
28432 v.AddArg(x)
28433 return true
28434 }
28435 }
28436 func rewriteValueAMD64_OpOffPtr(v *Value) bool {
28437 v_0 := v.Args[0]
28438 b := v.Block
28439 typ := &b.Func.Config.Types
28440
28441
28442
28443 for {
28444 off := auxIntToInt64(v.AuxInt)
28445 ptr := v_0
28446 if !(is32Bit(off)) {
28447 break
28448 }
28449 v.reset(OpAMD64ADDQconst)
28450 v.AuxInt = int32ToAuxInt(int32(off))
28451 v.AddArg(ptr)
28452 return true
28453 }
28454
28455
28456 for {
28457 off := auxIntToInt64(v.AuxInt)
28458 ptr := v_0
28459 v.reset(OpAMD64ADDQ)
28460 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
28461 v0.AuxInt = int64ToAuxInt(off)
28462 v.AddArg2(v0, ptr)
28463 return true
28464 }
28465 }
28466 func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
28467 v_2 := v.Args[2]
28468 v_1 := v.Args[1]
28469 v_0 := v.Args[0]
28470
28471
28472
28473 for {
28474 kind := auxIntToInt64(v.AuxInt)
28475 x := v_0
28476 y := v_1
28477 mem := v_2
28478 if !(boundsABI(kind) == 0) {
28479 break
28480 }
28481 v.reset(OpAMD64LoweredPanicBoundsA)
28482 v.AuxInt = int64ToAuxInt(kind)
28483 v.AddArg3(x, y, mem)
28484 return true
28485 }
28486
28487
28488
28489 for {
28490 kind := auxIntToInt64(v.AuxInt)
28491 x := v_0
28492 y := v_1
28493 mem := v_2
28494 if !(boundsABI(kind) == 1) {
28495 break
28496 }
28497 v.reset(OpAMD64LoweredPanicBoundsB)
28498 v.AuxInt = int64ToAuxInt(kind)
28499 v.AddArg3(x, y, mem)
28500 return true
28501 }
28502
28503
28504
28505 for {
28506 kind := auxIntToInt64(v.AuxInt)
28507 x := v_0
28508 y := v_1
28509 mem := v_2
28510 if !(boundsABI(kind) == 2) {
28511 break
28512 }
28513 v.reset(OpAMD64LoweredPanicBoundsC)
28514 v.AuxInt = int64ToAuxInt(kind)
28515 v.AddArg3(x, y, mem)
28516 return true
28517 }
28518 return false
28519 }
28520 func rewriteValueAMD64_OpPopCount16(v *Value) bool {
28521 v_0 := v.Args[0]
28522 b := v.Block
28523 typ := &b.Func.Config.Types
28524
28525
28526 for {
28527 x := v_0
28528 v.reset(OpAMD64POPCNTL)
28529 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
28530 v0.AddArg(x)
28531 v.AddArg(v0)
28532 return true
28533 }
28534 }
28535 func rewriteValueAMD64_OpPopCount8(v *Value) bool {
28536 v_0 := v.Args[0]
28537 b := v.Block
28538 typ := &b.Func.Config.Types
28539
28540
28541 for {
28542 x := v_0
28543 v.reset(OpAMD64POPCNTL)
28544 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
28545 v0.AddArg(x)
28546 v.AddArg(v0)
28547 return true
28548 }
28549 }
28550 func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
28551 v_0 := v.Args[0]
28552
28553
28554 for {
28555 x := v_0
28556 v.reset(OpAMD64ROUNDSD)
28557 v.AuxInt = int8ToAuxInt(0)
28558 v.AddArg(x)
28559 return true
28560 }
28561 }
28562 func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
28563 v_1 := v.Args[1]
28564 v_0 := v.Args[0]
28565 b := v.Block
28566
28567
28568
28569 for {
28570 t := v.Type
28571 x := v_0
28572 y := v_1
28573 if !(!shiftIsBounded(v)) {
28574 break
28575 }
28576 v.reset(OpAMD64ANDL)
28577 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28578 v0.AddArg2(x, y)
28579 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28580 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28581 v2.AuxInt = int16ToAuxInt(16)
28582 v2.AddArg(y)
28583 v1.AddArg(v2)
28584 v.AddArg2(v0, v1)
28585 return true
28586 }
28587
28588
28589
28590 for {
28591 x := v_0
28592 y := v_1
28593 if !(shiftIsBounded(v)) {
28594 break
28595 }
28596 v.reset(OpAMD64SHRW)
28597 v.AddArg2(x, y)
28598 return true
28599 }
28600 return false
28601 }
28602 func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
28603 v_1 := v.Args[1]
28604 v_0 := v.Args[0]
28605 b := v.Block
28606
28607
28608
28609 for {
28610 t := v.Type
28611 x := v_0
28612 y := v_1
28613 if !(!shiftIsBounded(v)) {
28614 break
28615 }
28616 v.reset(OpAMD64ANDL)
28617 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28618 v0.AddArg2(x, y)
28619 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28620 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28621 v2.AuxInt = int32ToAuxInt(16)
28622 v2.AddArg(y)
28623 v1.AddArg(v2)
28624 v.AddArg2(v0, v1)
28625 return true
28626 }
28627
28628
28629
28630 for {
28631 x := v_0
28632 y := v_1
28633 if !(shiftIsBounded(v)) {
28634 break
28635 }
28636 v.reset(OpAMD64SHRW)
28637 v.AddArg2(x, y)
28638 return true
28639 }
28640 return false
28641 }
28642 func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
28643 v_1 := v.Args[1]
28644 v_0 := v.Args[0]
28645 b := v.Block
28646
28647
28648
28649 for {
28650 t := v.Type
28651 x := v_0
28652 y := v_1
28653 if !(!shiftIsBounded(v)) {
28654 break
28655 }
28656 v.reset(OpAMD64ANDL)
28657 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28658 v0.AddArg2(x, y)
28659 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28660 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28661 v2.AuxInt = int32ToAuxInt(16)
28662 v2.AddArg(y)
28663 v1.AddArg(v2)
28664 v.AddArg2(v0, v1)
28665 return true
28666 }
28667
28668
28669
28670 for {
28671 x := v_0
28672 y := v_1
28673 if !(shiftIsBounded(v)) {
28674 break
28675 }
28676 v.reset(OpAMD64SHRW)
28677 v.AddArg2(x, y)
28678 return true
28679 }
28680 return false
28681 }
28682 func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
28683 v_1 := v.Args[1]
28684 v_0 := v.Args[0]
28685 b := v.Block
28686
28687
28688
28689 for {
28690 t := v.Type
28691 x := v_0
28692 y := v_1
28693 if !(!shiftIsBounded(v)) {
28694 break
28695 }
28696 v.reset(OpAMD64ANDL)
28697 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28698 v0.AddArg2(x, y)
28699 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28700 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28701 v2.AuxInt = int8ToAuxInt(16)
28702 v2.AddArg(y)
28703 v1.AddArg(v2)
28704 v.AddArg2(v0, v1)
28705 return true
28706 }
28707
28708
28709
28710 for {
28711 x := v_0
28712 y := v_1
28713 if !(shiftIsBounded(v)) {
28714 break
28715 }
28716 v.reset(OpAMD64SHRW)
28717 v.AddArg2(x, y)
28718 return true
28719 }
28720 return false
28721 }
28722 func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
28723 v_1 := v.Args[1]
28724 v_0 := v.Args[0]
28725 b := v.Block
28726
28727
28728
28729 for {
28730 t := v.Type
28731 x := v_0
28732 y := v_1
28733 if !(!shiftIsBounded(v)) {
28734 break
28735 }
28736 v.reset(OpAMD64SARW)
28737 v.Type = t
28738 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28739 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28740 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28741 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28742 v3.AuxInt = int16ToAuxInt(16)
28743 v3.AddArg(y)
28744 v2.AddArg(v3)
28745 v1.AddArg(v2)
28746 v0.AddArg2(y, v1)
28747 v.AddArg2(x, v0)
28748 return true
28749 }
28750
28751
28752
28753 for {
28754 x := v_0
28755 y := v_1
28756 if !(shiftIsBounded(v)) {
28757 break
28758 }
28759 v.reset(OpAMD64SARW)
28760 v.AddArg2(x, y)
28761 return true
28762 }
28763 return false
28764 }
28765 func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
28766 v_1 := v.Args[1]
28767 v_0 := v.Args[0]
28768 b := v.Block
28769
28770
28771
28772 for {
28773 t := v.Type
28774 x := v_0
28775 y := v_1
28776 if !(!shiftIsBounded(v)) {
28777 break
28778 }
28779 v.reset(OpAMD64SARW)
28780 v.Type = t
28781 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28782 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28783 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28784 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28785 v3.AuxInt = int32ToAuxInt(16)
28786 v3.AddArg(y)
28787 v2.AddArg(v3)
28788 v1.AddArg(v2)
28789 v0.AddArg2(y, v1)
28790 v.AddArg2(x, v0)
28791 return true
28792 }
28793
28794
28795
28796 for {
28797 x := v_0
28798 y := v_1
28799 if !(shiftIsBounded(v)) {
28800 break
28801 }
28802 v.reset(OpAMD64SARW)
28803 v.AddArg2(x, y)
28804 return true
28805 }
28806 return false
28807 }
28808 func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
28809 v_1 := v.Args[1]
28810 v_0 := v.Args[0]
28811 b := v.Block
28812
28813
28814
28815 for {
28816 t := v.Type
28817 x := v_0
28818 y := v_1
28819 if !(!shiftIsBounded(v)) {
28820 break
28821 }
28822 v.reset(OpAMD64SARW)
28823 v.Type = t
28824 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28825 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28826 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28827 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28828 v3.AuxInt = int32ToAuxInt(16)
28829 v3.AddArg(y)
28830 v2.AddArg(v3)
28831 v1.AddArg(v2)
28832 v0.AddArg2(y, v1)
28833 v.AddArg2(x, v0)
28834 return true
28835 }
28836
28837
28838
28839 for {
28840 x := v_0
28841 y := v_1
28842 if !(shiftIsBounded(v)) {
28843 break
28844 }
28845 v.reset(OpAMD64SARW)
28846 v.AddArg2(x, y)
28847 return true
28848 }
28849 return false
28850 }
28851 func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
28852 v_1 := v.Args[1]
28853 v_0 := v.Args[0]
28854 b := v.Block
28855
28856
28857
28858 for {
28859 t := v.Type
28860 x := v_0
28861 y := v_1
28862 if !(!shiftIsBounded(v)) {
28863 break
28864 }
28865 v.reset(OpAMD64SARW)
28866 v.Type = t
28867 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28868 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28869 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28870 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28871 v3.AuxInt = int8ToAuxInt(16)
28872 v3.AddArg(y)
28873 v2.AddArg(v3)
28874 v1.AddArg(v2)
28875 v0.AddArg2(y, v1)
28876 v.AddArg2(x, v0)
28877 return true
28878 }
28879
28880
28881
28882 for {
28883 x := v_0
28884 y := v_1
28885 if !(shiftIsBounded(v)) {
28886 break
28887 }
28888 v.reset(OpAMD64SARW)
28889 v.AddArg2(x, y)
28890 return true
28891 }
28892 return false
28893 }
28894 func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
28895 v_1 := v.Args[1]
28896 v_0 := v.Args[0]
28897 b := v.Block
28898
28899
28900
28901 for {
28902 t := v.Type
28903 x := v_0
28904 y := v_1
28905 if !(!shiftIsBounded(v)) {
28906 break
28907 }
28908 v.reset(OpAMD64ANDL)
28909 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28910 v0.AddArg2(x, y)
28911 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28912 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28913 v2.AuxInt = int16ToAuxInt(32)
28914 v2.AddArg(y)
28915 v1.AddArg(v2)
28916 v.AddArg2(v0, v1)
28917 return true
28918 }
28919
28920
28921
28922 for {
28923 x := v_0
28924 y := v_1
28925 if !(shiftIsBounded(v)) {
28926 break
28927 }
28928 v.reset(OpAMD64SHRL)
28929 v.AddArg2(x, y)
28930 return true
28931 }
28932 return false
28933 }
28934 func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
28935 v_1 := v.Args[1]
28936 v_0 := v.Args[0]
28937 b := v.Block
28938
28939
28940
28941 for {
28942 t := v.Type
28943 x := v_0
28944 y := v_1
28945 if !(!shiftIsBounded(v)) {
28946 break
28947 }
28948 v.reset(OpAMD64ANDL)
28949 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28950 v0.AddArg2(x, y)
28951 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28952 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28953 v2.AuxInt = int32ToAuxInt(32)
28954 v2.AddArg(y)
28955 v1.AddArg(v2)
28956 v.AddArg2(v0, v1)
28957 return true
28958 }
28959
28960
28961
28962 for {
28963 x := v_0
28964 y := v_1
28965 if !(shiftIsBounded(v)) {
28966 break
28967 }
28968 v.reset(OpAMD64SHRL)
28969 v.AddArg2(x, y)
28970 return true
28971 }
28972 return false
28973 }
28974 func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
28975 v_1 := v.Args[1]
28976 v_0 := v.Args[0]
28977 b := v.Block
28978
28979
28980
28981 for {
28982 t := v.Type
28983 x := v_0
28984 y := v_1
28985 if !(!shiftIsBounded(v)) {
28986 break
28987 }
28988 v.reset(OpAMD64ANDL)
28989 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28990 v0.AddArg2(x, y)
28991 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28992 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28993 v2.AuxInt = int32ToAuxInt(32)
28994 v2.AddArg(y)
28995 v1.AddArg(v2)
28996 v.AddArg2(v0, v1)
28997 return true
28998 }
28999
29000
29001
29002 for {
29003 x := v_0
29004 y := v_1
29005 if !(shiftIsBounded(v)) {
29006 break
29007 }
29008 v.reset(OpAMD64SHRL)
29009 v.AddArg2(x, y)
29010 return true
29011 }
29012 return false
29013 }
29014 func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
29015 v_1 := v.Args[1]
29016 v_0 := v.Args[0]
29017 b := v.Block
29018
29019
29020
29021 for {
29022 t := v.Type
29023 x := v_0
29024 y := v_1
29025 if !(!shiftIsBounded(v)) {
29026 break
29027 }
29028 v.reset(OpAMD64ANDL)
29029 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
29030 v0.AddArg2(x, y)
29031 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29032 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29033 v2.AuxInt = int8ToAuxInt(32)
29034 v2.AddArg(y)
29035 v1.AddArg(v2)
29036 v.AddArg2(v0, v1)
29037 return true
29038 }
29039
29040
29041
29042 for {
29043 x := v_0
29044 y := v_1
29045 if !(shiftIsBounded(v)) {
29046 break
29047 }
29048 v.reset(OpAMD64SHRL)
29049 v.AddArg2(x, y)
29050 return true
29051 }
29052 return false
29053 }
29054 func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
29055 v_1 := v.Args[1]
29056 v_0 := v.Args[0]
29057 b := v.Block
29058
29059
29060
29061 for {
29062 t := v.Type
29063 x := v_0
29064 y := v_1
29065 if !(!shiftIsBounded(v)) {
29066 break
29067 }
29068 v.reset(OpAMD64SARL)
29069 v.Type = t
29070 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29071 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29072 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29073 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
29074 v3.AuxInt = int16ToAuxInt(32)
29075 v3.AddArg(y)
29076 v2.AddArg(v3)
29077 v1.AddArg(v2)
29078 v0.AddArg2(y, v1)
29079 v.AddArg2(x, v0)
29080 return true
29081 }
29082
29083
29084
29085 for {
29086 x := v_0
29087 y := v_1
29088 if !(shiftIsBounded(v)) {
29089 break
29090 }
29091 v.reset(OpAMD64SARL)
29092 v.AddArg2(x, y)
29093 return true
29094 }
29095 return false
29096 }
29097 func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
29098 v_1 := v.Args[1]
29099 v_0 := v.Args[0]
29100 b := v.Block
29101
29102
29103
29104 for {
29105 t := v.Type
29106 x := v_0
29107 y := v_1
29108 if !(!shiftIsBounded(v)) {
29109 break
29110 }
29111 v.reset(OpAMD64SARL)
29112 v.Type = t
29113 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29114 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29115 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29116 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
29117 v3.AuxInt = int32ToAuxInt(32)
29118 v3.AddArg(y)
29119 v2.AddArg(v3)
29120 v1.AddArg(v2)
29121 v0.AddArg2(y, v1)
29122 v.AddArg2(x, v0)
29123 return true
29124 }
29125
29126
29127
29128 for {
29129 x := v_0
29130 y := v_1
29131 if !(shiftIsBounded(v)) {
29132 break
29133 }
29134 v.reset(OpAMD64SARL)
29135 v.AddArg2(x, y)
29136 return true
29137 }
29138 return false
29139 }
29140 func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
29141 v_1 := v.Args[1]
29142 v_0 := v.Args[0]
29143 b := v.Block
29144
29145
29146
29147 for {
29148 t := v.Type
29149 x := v_0
29150 y := v_1
29151 if !(!shiftIsBounded(v)) {
29152 break
29153 }
29154 v.reset(OpAMD64SARL)
29155 v.Type = t
29156 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
29157 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
29158 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
29159 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29160 v3.AuxInt = int32ToAuxInt(32)
29161 v3.AddArg(y)
29162 v2.AddArg(v3)
29163 v1.AddArg(v2)
29164 v0.AddArg2(y, v1)
29165 v.AddArg2(x, v0)
29166 return true
29167 }
29168
29169
29170
29171 for {
29172 x := v_0
29173 y := v_1
29174 if !(shiftIsBounded(v)) {
29175 break
29176 }
29177 v.reset(OpAMD64SARL)
29178 v.AddArg2(x, y)
29179 return true
29180 }
29181 return false
29182 }
29183 func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
29184 v_1 := v.Args[1]
29185 v_0 := v.Args[0]
29186 b := v.Block
29187
29188
29189
29190 for {
29191 t := v.Type
29192 x := v_0
29193 y := v_1
29194 if !(!shiftIsBounded(v)) {
29195 break
29196 }
29197 v.reset(OpAMD64SARL)
29198 v.Type = t
29199 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29200 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29201 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29202 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29203 v3.AuxInt = int8ToAuxInt(32)
29204 v3.AddArg(y)
29205 v2.AddArg(v3)
29206 v1.AddArg(v2)
29207 v0.AddArg2(y, v1)
29208 v.AddArg2(x, v0)
29209 return true
29210 }
29211
29212
29213
29214 for {
29215 x := v_0
29216 y := v_1
29217 if !(shiftIsBounded(v)) {
29218 break
29219 }
29220 v.reset(OpAMD64SARL)
29221 v.AddArg2(x, y)
29222 return true
29223 }
29224 return false
29225 }
29226 func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
29227 v_1 := v.Args[1]
29228 v_0 := v.Args[0]
29229 b := v.Block
29230
29231
29232
29233 for {
29234 t := v.Type
29235 x := v_0
29236 y := v_1
29237 if !(!shiftIsBounded(v)) {
29238 break
29239 }
29240 v.reset(OpAMD64ANDQ)
29241 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
29242 v0.AddArg2(x, y)
29243 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
29244 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
29245 v2.AuxInt = int16ToAuxInt(64)
29246 v2.AddArg(y)
29247 v1.AddArg(v2)
29248 v.AddArg2(v0, v1)
29249 return true
29250 }
29251
29252
29253
29254 for {
29255 x := v_0
29256 y := v_1
29257 if !(shiftIsBounded(v)) {
29258 break
29259 }
29260 v.reset(OpAMD64SHRQ)
29261 v.AddArg2(x, y)
29262 return true
29263 }
29264 return false
29265 }
29266 func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
29267 v_1 := v.Args[1]
29268 v_0 := v.Args[0]
29269 b := v.Block
29270
29271
29272
29273 for {
29274 t := v.Type
29275 x := v_0
29276 y := v_1
29277 if !(!shiftIsBounded(v)) {
29278 break
29279 }
29280 v.reset(OpAMD64ANDQ)
29281 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
29282 v0.AddArg2(x, y)
29283 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
29284 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
29285 v2.AuxInt = int32ToAuxInt(64)
29286 v2.AddArg(y)
29287 v1.AddArg(v2)
29288 v.AddArg2(v0, v1)
29289 return true
29290 }
29291
29292
29293
29294 for {
29295 x := v_0
29296 y := v_1
29297 if !(shiftIsBounded(v)) {
29298 break
29299 }
29300 v.reset(OpAMD64SHRQ)
29301 v.AddArg2(x, y)
29302 return true
29303 }
29304 return false
29305 }
29306 func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
29307 v_1 := v.Args[1]
29308 v_0 := v.Args[0]
29309 b := v.Block
29310
29311
29312
29313 for {
29314 t := v.Type
29315 x := v_0
29316 y := v_1
29317 if !(!shiftIsBounded(v)) {
29318 break
29319 }
29320 v.reset(OpAMD64ANDQ)
29321 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
29322 v0.AddArg2(x, y)
29323 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
29324 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29325 v2.AuxInt = int32ToAuxInt(64)
29326 v2.AddArg(y)
29327 v1.AddArg(v2)
29328 v.AddArg2(v0, v1)
29329 return true
29330 }
29331
29332
29333
29334 for {
29335 x := v_0
29336 y := v_1
29337 if !(shiftIsBounded(v)) {
29338 break
29339 }
29340 v.reset(OpAMD64SHRQ)
29341 v.AddArg2(x, y)
29342 return true
29343 }
29344 return false
29345 }
29346 func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
29347 v_1 := v.Args[1]
29348 v_0 := v.Args[0]
29349 b := v.Block
29350
29351
29352
29353 for {
29354 t := v.Type
29355 x := v_0
29356 y := v_1
29357 if !(!shiftIsBounded(v)) {
29358 break
29359 }
29360 v.reset(OpAMD64ANDQ)
29361 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
29362 v0.AddArg2(x, y)
29363 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
29364 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29365 v2.AuxInt = int8ToAuxInt(64)
29366 v2.AddArg(y)
29367 v1.AddArg(v2)
29368 v.AddArg2(v0, v1)
29369 return true
29370 }
29371
29372
29373
29374 for {
29375 x := v_0
29376 y := v_1
29377 if !(shiftIsBounded(v)) {
29378 break
29379 }
29380 v.reset(OpAMD64SHRQ)
29381 v.AddArg2(x, y)
29382 return true
29383 }
29384 return false
29385 }
29386 func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
29387 v_1 := v.Args[1]
29388 v_0 := v.Args[0]
29389 b := v.Block
29390
29391
29392
29393 for {
29394 t := v.Type
29395 x := v_0
29396 y := v_1
29397 if !(!shiftIsBounded(v)) {
29398 break
29399 }
29400 v.reset(OpAMD64SARQ)
29401 v.Type = t
29402 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29403 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29404 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29405 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
29406 v3.AuxInt = int16ToAuxInt(64)
29407 v3.AddArg(y)
29408 v2.AddArg(v3)
29409 v1.AddArg(v2)
29410 v0.AddArg2(y, v1)
29411 v.AddArg2(x, v0)
29412 return true
29413 }
29414
29415
29416
29417 for {
29418 x := v_0
29419 y := v_1
29420 if !(shiftIsBounded(v)) {
29421 break
29422 }
29423 v.reset(OpAMD64SARQ)
29424 v.AddArg2(x, y)
29425 return true
29426 }
29427 return false
29428 }
29429 func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
29430 v_1 := v.Args[1]
29431 v_0 := v.Args[0]
29432 b := v.Block
29433
29434
29435
29436 for {
29437 t := v.Type
29438 x := v_0
29439 y := v_1
29440 if !(!shiftIsBounded(v)) {
29441 break
29442 }
29443 v.reset(OpAMD64SARQ)
29444 v.Type = t
29445 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29446 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29447 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29448 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
29449 v3.AuxInt = int32ToAuxInt(64)
29450 v3.AddArg(y)
29451 v2.AddArg(v3)
29452 v1.AddArg(v2)
29453 v0.AddArg2(y, v1)
29454 v.AddArg2(x, v0)
29455 return true
29456 }
29457
29458
29459
29460 for {
29461 x := v_0
29462 y := v_1
29463 if !(shiftIsBounded(v)) {
29464 break
29465 }
29466 v.reset(OpAMD64SARQ)
29467 v.AddArg2(x, y)
29468 return true
29469 }
29470 return false
29471 }
29472 func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
29473 v_1 := v.Args[1]
29474 v_0 := v.Args[0]
29475 b := v.Block
29476
29477
29478
29479 for {
29480 t := v.Type
29481 x := v_0
29482 y := v_1
29483 if !(!shiftIsBounded(v)) {
29484 break
29485 }
29486 v.reset(OpAMD64SARQ)
29487 v.Type = t
29488 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
29489 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
29490 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
29491 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29492 v3.AuxInt = int32ToAuxInt(64)
29493 v3.AddArg(y)
29494 v2.AddArg(v3)
29495 v1.AddArg(v2)
29496 v0.AddArg2(y, v1)
29497 v.AddArg2(x, v0)
29498 return true
29499 }
29500
29501
29502
29503 for {
29504 x := v_0
29505 y := v_1
29506 if !(shiftIsBounded(v)) {
29507 break
29508 }
29509 v.reset(OpAMD64SARQ)
29510 v.AddArg2(x, y)
29511 return true
29512 }
29513 return false
29514 }
29515 func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
29516 v_1 := v.Args[1]
29517 v_0 := v.Args[0]
29518 b := v.Block
29519
29520
29521
29522 for {
29523 t := v.Type
29524 x := v_0
29525 y := v_1
29526 if !(!shiftIsBounded(v)) {
29527 break
29528 }
29529 v.reset(OpAMD64SARQ)
29530 v.Type = t
29531 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29532 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29533 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29534 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29535 v3.AuxInt = int8ToAuxInt(64)
29536 v3.AddArg(y)
29537 v2.AddArg(v3)
29538 v1.AddArg(v2)
29539 v0.AddArg2(y, v1)
29540 v.AddArg2(x, v0)
29541 return true
29542 }
29543
29544
29545
29546 for {
29547 x := v_0
29548 y := v_1
29549 if !(shiftIsBounded(v)) {
29550 break
29551 }
29552 v.reset(OpAMD64SARQ)
29553 v.AddArg2(x, y)
29554 return true
29555 }
29556 return false
29557 }
29558 func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
29559 v_1 := v.Args[1]
29560 v_0 := v.Args[0]
29561 b := v.Block
29562
29563
29564
29565 for {
29566 t := v.Type
29567 x := v_0
29568 y := v_1
29569 if !(!shiftIsBounded(v)) {
29570 break
29571 }
29572 v.reset(OpAMD64ANDL)
29573 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
29574 v0.AddArg2(x, y)
29575 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29576 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
29577 v2.AuxInt = int16ToAuxInt(8)
29578 v2.AddArg(y)
29579 v1.AddArg(v2)
29580 v.AddArg2(v0, v1)
29581 return true
29582 }
29583
29584
29585
29586 for {
29587 x := v_0
29588 y := v_1
29589 if !(shiftIsBounded(v)) {
29590 break
29591 }
29592 v.reset(OpAMD64SHRB)
29593 v.AddArg2(x, y)
29594 return true
29595 }
29596 return false
29597 }
29598 func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
29599 v_1 := v.Args[1]
29600 v_0 := v.Args[0]
29601 b := v.Block
29602
29603
29604
29605 for {
29606 t := v.Type
29607 x := v_0
29608 y := v_1
29609 if !(!shiftIsBounded(v)) {
29610 break
29611 }
29612 v.reset(OpAMD64ANDL)
29613 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
29614 v0.AddArg2(x, y)
29615 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29616 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
29617 v2.AuxInt = int32ToAuxInt(8)
29618 v2.AddArg(y)
29619 v1.AddArg(v2)
29620 v.AddArg2(v0, v1)
29621 return true
29622 }
29623
29624
29625
29626 for {
29627 x := v_0
29628 y := v_1
29629 if !(shiftIsBounded(v)) {
29630 break
29631 }
29632 v.reset(OpAMD64SHRB)
29633 v.AddArg2(x, y)
29634 return true
29635 }
29636 return false
29637 }
29638 func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
29639 v_1 := v.Args[1]
29640 v_0 := v.Args[0]
29641 b := v.Block
29642
29643
29644
29645 for {
29646 t := v.Type
29647 x := v_0
29648 y := v_1
29649 if !(!shiftIsBounded(v)) {
29650 break
29651 }
29652 v.reset(OpAMD64ANDL)
29653 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
29654 v0.AddArg2(x, y)
29655 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29656 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29657 v2.AuxInt = int32ToAuxInt(8)
29658 v2.AddArg(y)
29659 v1.AddArg(v2)
29660 v.AddArg2(v0, v1)
29661 return true
29662 }
29663
29664
29665
29666 for {
29667 x := v_0
29668 y := v_1
29669 if !(shiftIsBounded(v)) {
29670 break
29671 }
29672 v.reset(OpAMD64SHRB)
29673 v.AddArg2(x, y)
29674 return true
29675 }
29676 return false
29677 }
29678 func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
29679 v_1 := v.Args[1]
29680 v_0 := v.Args[0]
29681 b := v.Block
29682
29683
29684
29685 for {
29686 t := v.Type
29687 x := v_0
29688 y := v_1
29689 if !(!shiftIsBounded(v)) {
29690 break
29691 }
29692 v.reset(OpAMD64ANDL)
29693 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
29694 v0.AddArg2(x, y)
29695 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29696 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29697 v2.AuxInt = int8ToAuxInt(8)
29698 v2.AddArg(y)
29699 v1.AddArg(v2)
29700 v.AddArg2(v0, v1)
29701 return true
29702 }
29703
29704
29705
29706 for {
29707 x := v_0
29708 y := v_1
29709 if !(shiftIsBounded(v)) {
29710 break
29711 }
29712 v.reset(OpAMD64SHRB)
29713 v.AddArg2(x, y)
29714 return true
29715 }
29716 return false
29717 }
29718 func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
29719 v_1 := v.Args[1]
29720 v_0 := v.Args[0]
29721 b := v.Block
29722
29723
29724
29725 for {
29726 t := v.Type
29727 x := v_0
29728 y := v_1
29729 if !(!shiftIsBounded(v)) {
29730 break
29731 }
29732 v.reset(OpAMD64SARB)
29733 v.Type = t
29734 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29735 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29736 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29737 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
29738 v3.AuxInt = int16ToAuxInt(8)
29739 v3.AddArg(y)
29740 v2.AddArg(v3)
29741 v1.AddArg(v2)
29742 v0.AddArg2(y, v1)
29743 v.AddArg2(x, v0)
29744 return true
29745 }
29746
29747
29748
29749 for {
29750 x := v_0
29751 y := v_1
29752 if !(shiftIsBounded(v)) {
29753 break
29754 }
29755 v.reset(OpAMD64SARB)
29756 v.AddArg2(x, y)
29757 return true
29758 }
29759 return false
29760 }
29761 func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
29762 v_1 := v.Args[1]
29763 v_0 := v.Args[0]
29764 b := v.Block
29765
29766
29767
29768 for {
29769 t := v.Type
29770 x := v_0
29771 y := v_1
29772 if !(!shiftIsBounded(v)) {
29773 break
29774 }
29775 v.reset(OpAMD64SARB)
29776 v.Type = t
29777 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29778 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29779 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29780 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
29781 v3.AuxInt = int32ToAuxInt(8)
29782 v3.AddArg(y)
29783 v2.AddArg(v3)
29784 v1.AddArg(v2)
29785 v0.AddArg2(y, v1)
29786 v.AddArg2(x, v0)
29787 return true
29788 }
29789
29790
29791
29792 for {
29793 x := v_0
29794 y := v_1
29795 if !(shiftIsBounded(v)) {
29796 break
29797 }
29798 v.reset(OpAMD64SARB)
29799 v.AddArg2(x, y)
29800 return true
29801 }
29802 return false
29803 }
29804 func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
29805 v_1 := v.Args[1]
29806 v_0 := v.Args[0]
29807 b := v.Block
29808
29809
29810
29811 for {
29812 t := v.Type
29813 x := v_0
29814 y := v_1
29815 if !(!shiftIsBounded(v)) {
29816 break
29817 }
29818 v.reset(OpAMD64SARB)
29819 v.Type = t
29820 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
29821 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
29822 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
29823 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29824 v3.AuxInt = int32ToAuxInt(8)
29825 v3.AddArg(y)
29826 v2.AddArg(v3)
29827 v1.AddArg(v2)
29828 v0.AddArg2(y, v1)
29829 v.AddArg2(x, v0)
29830 return true
29831 }
29832
29833
29834
29835 for {
29836 x := v_0
29837 y := v_1
29838 if !(shiftIsBounded(v)) {
29839 break
29840 }
29841 v.reset(OpAMD64SARB)
29842 v.AddArg2(x, y)
29843 return true
29844 }
29845 return false
29846 }
29847 func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
29848 v_1 := v.Args[1]
29849 v_0 := v.Args[0]
29850 b := v.Block
29851
29852
29853
29854 for {
29855 t := v.Type
29856 x := v_0
29857 y := v_1
29858 if !(!shiftIsBounded(v)) {
29859 break
29860 }
29861 v.reset(OpAMD64SARB)
29862 v.Type = t
29863 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29864 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29865 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29866 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29867 v3.AuxInt = int8ToAuxInt(8)
29868 v3.AddArg(y)
29869 v2.AddArg(v3)
29870 v1.AddArg(v2)
29871 v0.AddArg2(y, v1)
29872 v.AddArg2(x, v0)
29873 return true
29874 }
29875
29876
29877
29878 for {
29879 x := v_0
29880 y := v_1
29881 if !(shiftIsBounded(v)) {
29882 break
29883 }
29884 v.reset(OpAMD64SARB)
29885 v.AddArg2(x, y)
29886 return true
29887 }
29888 return false
29889 }
29890 func rewriteValueAMD64_OpSelect0(v *Value) bool {
29891 v_0 := v.Args[0]
29892 b := v.Block
29893 typ := &b.Func.Config.Types
29894
29895
29896 for {
29897 if v_0.Op != OpMul64uover {
29898 break
29899 }
29900 y := v_0.Args[1]
29901 x := v_0.Args[0]
29902 v.reset(OpSelect0)
29903 v.Type = typ.UInt64
29904 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
29905 v0.AddArg2(x, y)
29906 v.AddArg(v0)
29907 return true
29908 }
29909
29910
29911 for {
29912 if v_0.Op != OpMul32uover {
29913 break
29914 }
29915 y := v_0.Args[1]
29916 x := v_0.Args[0]
29917 v.reset(OpSelect0)
29918 v.Type = typ.UInt32
29919 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
29920 v0.AddArg2(x, y)
29921 v.AddArg(v0)
29922 return true
29923 }
29924
29925
29926 for {
29927 if v_0.Op != OpAdd64carry {
29928 break
29929 }
29930 c := v_0.Args[2]
29931 x := v_0.Args[0]
29932 y := v_0.Args[1]
29933 v.reset(OpSelect0)
29934 v.Type = typ.UInt64
29935 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29936 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29937 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29938 v2.AddArg(c)
29939 v1.AddArg(v2)
29940 v0.AddArg3(x, y, v1)
29941 v.AddArg(v0)
29942 return true
29943 }
29944
29945
29946 for {
29947 if v_0.Op != OpSub64borrow {
29948 break
29949 }
29950 c := v_0.Args[2]
29951 x := v_0.Args[0]
29952 y := v_0.Args[1]
29953 v.reset(OpSelect0)
29954 v.Type = typ.UInt64
29955 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29956 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29957 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29958 v2.AddArg(c)
29959 v1.AddArg(v2)
29960 v0.AddArg3(x, y, v1)
29961 v.AddArg(v0)
29962 return true
29963 }
29964
29965
29966 for {
29967 t := v.Type
29968 if v_0.Op != OpAMD64AddTupleFirst32 {
29969 break
29970 }
29971 tuple := v_0.Args[1]
29972 val := v_0.Args[0]
29973 v.reset(OpAMD64ADDL)
29974 v0 := b.NewValue0(v.Pos, OpSelect0, t)
29975 v0.AddArg(tuple)
29976 v.AddArg2(val, v0)
29977 return true
29978 }
29979
29980
29981 for {
29982 t := v.Type
29983 if v_0.Op != OpAMD64AddTupleFirst64 {
29984 break
29985 }
29986 tuple := v_0.Args[1]
29987 val := v_0.Args[0]
29988 v.reset(OpAMD64ADDQ)
29989 v0 := b.NewValue0(v.Pos, OpSelect0, t)
29990 v0.AddArg(tuple)
29991 v.AddArg2(val, v0)
29992 return true
29993 }
29994 return false
29995 }
29996 func rewriteValueAMD64_OpSelect1(v *Value) bool {
29997 v_0 := v.Args[0]
29998 b := v.Block
29999 typ := &b.Func.Config.Types
30000
30001
30002 for {
30003 if v_0.Op != OpMul64uover {
30004 break
30005 }
30006 y := v_0.Args[1]
30007 x := v_0.Args[0]
30008 v.reset(OpAMD64SETO)
30009 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
30010 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
30011 v1.AddArg2(x, y)
30012 v0.AddArg(v1)
30013 v.AddArg(v0)
30014 return true
30015 }
30016
30017
30018 for {
30019 if v_0.Op != OpMul32uover {
30020 break
30021 }
30022 y := v_0.Args[1]
30023 x := v_0.Args[0]
30024 v.reset(OpAMD64SETO)
30025 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
30026 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
30027 v1.AddArg2(x, y)
30028 v0.AddArg(v1)
30029 v.AddArg(v0)
30030 return true
30031 }
30032
30033
30034 for {
30035 if v_0.Op != OpAdd64carry {
30036 break
30037 }
30038 c := v_0.Args[2]
30039 x := v_0.Args[0]
30040 y := v_0.Args[1]
30041 v.reset(OpAMD64NEGQ)
30042 v.Type = typ.UInt64
30043 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
30044 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
30045 v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
30046 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
30047 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
30048 v4.AddArg(c)
30049 v3.AddArg(v4)
30050 v2.AddArg3(x, y, v3)
30051 v1.AddArg(v2)
30052 v0.AddArg(v1)
30053 v.AddArg(v0)
30054 return true
30055 }
30056
30057
30058 for {
30059 if v_0.Op != OpSub64borrow {
30060 break
30061 }
30062 c := v_0.Args[2]
30063 x := v_0.Args[0]
30064 y := v_0.Args[1]
30065 v.reset(OpAMD64NEGQ)
30066 v.Type = typ.UInt64
30067 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
30068 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
30069 v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
30070 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
30071 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
30072 v4.AddArg(c)
30073 v3.AddArg(v4)
30074 v2.AddArg3(x, y, v3)
30075 v1.AddArg(v2)
30076 v0.AddArg(v1)
30077 v.AddArg(v0)
30078 return true
30079 }
30080
30081
30082 for {
30083 if v_0.Op != OpAMD64NEGLflags {
30084 break
30085 }
30086 v_0_0 := v_0.Args[0]
30087 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
30088 break
30089 }
30090 v.reset(OpAMD64FlagEQ)
30091 return true
30092 }
30093
30094
30095 for {
30096 if v_0.Op != OpAMD64NEGLflags {
30097 break
30098 }
30099 v_0_0 := v_0.Args[0]
30100 if v_0_0.Op != OpAMD64NEGQ {
30101 break
30102 }
30103 v_0_0_0 := v_0_0.Args[0]
30104 if v_0_0_0.Op != OpAMD64SBBQcarrymask {
30105 break
30106 }
30107 x := v_0_0_0.Args[0]
30108 v.copyOf(x)
30109 return true
30110 }
30111
30112
30113 for {
30114 if v_0.Op != OpAMD64AddTupleFirst32 {
30115 break
30116 }
30117 tuple := v_0.Args[1]
30118 v.reset(OpSelect1)
30119 v.AddArg(tuple)
30120 return true
30121 }
30122
30123
30124 for {
30125 if v_0.Op != OpAMD64AddTupleFirst64 {
30126 break
30127 }
30128 tuple := v_0.Args[1]
30129 v.reset(OpSelect1)
30130 v.AddArg(tuple)
30131 return true
30132 }
30133
30134
30135
30136 for {
30137 a := v_0
30138 if a.Op != OpAMD64LoweredAtomicAnd64 {
30139 break
30140 }
30141 mem := a.Args[2]
30142 ptr := a.Args[0]
30143 val := a.Args[1]
30144 if !(a.Uses == 1 && clobber(a)) {
30145 break
30146 }
30147 v.reset(OpAMD64ANDQlock)
30148 v.AddArg3(ptr, val, mem)
30149 return true
30150 }
30151
30152
30153
30154 for {
30155 a := v_0
30156 if a.Op != OpAMD64LoweredAtomicAnd32 {
30157 break
30158 }
30159 mem := a.Args[2]
30160 ptr := a.Args[0]
30161 val := a.Args[1]
30162 if !(a.Uses == 1 && clobber(a)) {
30163 break
30164 }
30165 v.reset(OpAMD64ANDLlock)
30166 v.AddArg3(ptr, val, mem)
30167 return true
30168 }
30169
30170
30171
30172 for {
30173 a := v_0
30174 if a.Op != OpAMD64LoweredAtomicOr64 {
30175 break
30176 }
30177 mem := a.Args[2]
30178 ptr := a.Args[0]
30179 val := a.Args[1]
30180 if !(a.Uses == 1 && clobber(a)) {
30181 break
30182 }
30183 v.reset(OpAMD64ORQlock)
30184 v.AddArg3(ptr, val, mem)
30185 return true
30186 }
30187
30188
30189
30190 for {
30191 a := v_0
30192 if a.Op != OpAMD64LoweredAtomicOr32 {
30193 break
30194 }
30195 mem := a.Args[2]
30196 ptr := a.Args[0]
30197 val := a.Args[1]
30198 if !(a.Uses == 1 && clobber(a)) {
30199 break
30200 }
30201 v.reset(OpAMD64ORLlock)
30202 v.AddArg3(ptr, val, mem)
30203 return true
30204 }
30205 return false
30206 }
30207 func rewriteValueAMD64_OpSelectN(v *Value) bool {
30208 v_0 := v.Args[0]
30209 b := v.Block
30210 config := b.Func.Config
30211
30212
30213
30214 for {
30215 if auxIntToInt64(v.AuxInt) != 0 {
30216 break
30217 }
30218 call := v_0
30219 if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 {
30220 break
30221 }
30222 sym := auxToCall(call.Aux)
30223 s1 := call.Args[0]
30224 if s1.Op != OpAMD64MOVQstoreconst {
30225 break
30226 }
30227 sc := auxIntToValAndOff(s1.AuxInt)
30228 _ = s1.Args[1]
30229 s2 := s1.Args[1]
30230 if s2.Op != OpAMD64MOVQstore {
30231 break
30232 }
30233 _ = s2.Args[2]
30234 src := s2.Args[1]
30235 s3 := s2.Args[2]
30236 if s3.Op != OpAMD64MOVQstore {
30237 break
30238 }
30239 mem := s3.Args[2]
30240 dst := s3.Args[1]
30241 if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) {
30242 break
30243 }
30244 v.reset(OpMove)
30245 v.AuxInt = int64ToAuxInt(sc.Val64())
30246 v.AddArg3(dst, src, mem)
30247 return true
30248 }
30249
30250
30251
30252 for {
30253 if auxIntToInt64(v.AuxInt) != 0 {
30254 break
30255 }
30256 call := v_0
30257 if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 {
30258 break
30259 }
30260 sym := auxToCall(call.Aux)
30261 mem := call.Args[3]
30262 dst := call.Args[0]
30263 src := call.Args[1]
30264 call_2 := call.Args[2]
30265 if call_2.Op != OpAMD64MOVQconst {
30266 break
30267 }
30268 sz := auxIntToInt64(call_2.AuxInt)
30269 if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
30270 break
30271 }
30272 v.reset(OpMove)
30273 v.AuxInt = int64ToAuxInt(sz)
30274 v.AddArg3(dst, src, mem)
30275 return true
30276 }
30277 return false
30278 }
30279 func rewriteValueAMD64_OpSlicemask(v *Value) bool {
30280 v_0 := v.Args[0]
30281 b := v.Block
30282
30283
30284 for {
30285 t := v.Type
30286 x := v_0
30287 v.reset(OpAMD64SARQconst)
30288 v.AuxInt = int8ToAuxInt(63)
30289 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
30290 v0.AddArg(x)
30291 v.AddArg(v0)
30292 return true
30293 }
30294 }
30295 func rewriteValueAMD64_OpSpectreIndex(v *Value) bool {
30296 v_1 := v.Args[1]
30297 v_0 := v.Args[0]
30298 b := v.Block
30299 typ := &b.Func.Config.Types
30300
30301
30302 for {
30303 x := v_0
30304 y := v_1
30305 v.reset(OpAMD64CMOVQCC)
30306 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30307 v0.AuxInt = int64ToAuxInt(0)
30308 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30309 v1.AddArg2(x, y)
30310 v.AddArg3(x, v0, v1)
30311 return true
30312 }
30313 }
30314 func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool {
30315 v_1 := v.Args[1]
30316 v_0 := v.Args[0]
30317 b := v.Block
30318 typ := &b.Func.Config.Types
30319
30320
30321 for {
30322 x := v_0
30323 y := v_1
30324 v.reset(OpAMD64CMOVQHI)
30325 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30326 v0.AuxInt = int64ToAuxInt(0)
30327 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
30328 v1.AddArg2(x, y)
30329 v.AddArg3(x, v0, v1)
30330 return true
30331 }
30332 }
30333 func rewriteValueAMD64_OpStore(v *Value) bool {
30334 v_2 := v.Args[2]
30335 v_1 := v.Args[1]
30336 v_0 := v.Args[0]
30337
30338
30339
30340 for {
30341 t := auxToType(v.Aux)
30342 ptr := v_0
30343 val := v_1
30344 mem := v_2
30345 if !(t.Size() == 8 && t.IsFloat()) {
30346 break
30347 }
30348 v.reset(OpAMD64MOVSDstore)
30349 v.AddArg3(ptr, val, mem)
30350 return true
30351 }
30352
30353
30354
30355 for {
30356 t := auxToType(v.Aux)
30357 ptr := v_0
30358 val := v_1
30359 mem := v_2
30360 if !(t.Size() == 4 && t.IsFloat()) {
30361 break
30362 }
30363 v.reset(OpAMD64MOVSSstore)
30364 v.AddArg3(ptr, val, mem)
30365 return true
30366 }
30367
30368
30369
30370 for {
30371 t := auxToType(v.Aux)
30372 ptr := v_0
30373 val := v_1
30374 mem := v_2
30375 if !(t.Size() == 8 && !t.IsFloat()) {
30376 break
30377 }
30378 v.reset(OpAMD64MOVQstore)
30379 v.AddArg3(ptr, val, mem)
30380 return true
30381 }
30382
30383
30384
30385 for {
30386 t := auxToType(v.Aux)
30387 ptr := v_0
30388 val := v_1
30389 mem := v_2
30390 if !(t.Size() == 4 && !t.IsFloat()) {
30391 break
30392 }
30393 v.reset(OpAMD64MOVLstore)
30394 v.AddArg3(ptr, val, mem)
30395 return true
30396 }
30397
30398
30399
30400 for {
30401 t := auxToType(v.Aux)
30402 ptr := v_0
30403 val := v_1
30404 mem := v_2
30405 if !(t.Size() == 2) {
30406 break
30407 }
30408 v.reset(OpAMD64MOVWstore)
30409 v.AddArg3(ptr, val, mem)
30410 return true
30411 }
30412
30413
30414
30415 for {
30416 t := auxToType(v.Aux)
30417 ptr := v_0
30418 val := v_1
30419 mem := v_2
30420 if !(t.Size() == 1) {
30421 break
30422 }
30423 v.reset(OpAMD64MOVBstore)
30424 v.AddArg3(ptr, val, mem)
30425 return true
30426 }
30427 return false
30428 }
30429 func rewriteValueAMD64_OpTrunc(v *Value) bool {
30430 v_0 := v.Args[0]
30431
30432
30433 for {
30434 x := v_0
30435 v.reset(OpAMD64ROUNDSD)
30436 v.AuxInt = int8ToAuxInt(3)
30437 v.AddArg(x)
30438 return true
30439 }
30440 }
30441 func rewriteValueAMD64_OpZero(v *Value) bool {
30442 v_1 := v.Args[1]
30443 v_0 := v.Args[0]
30444 b := v.Block
30445 typ := &b.Func.Config.Types
30446
30447
30448 for {
30449 if auxIntToInt64(v.AuxInt) != 0 {
30450 break
30451 }
30452 mem := v_1
30453 v.copyOf(mem)
30454 return true
30455 }
30456
30457
30458 for {
30459 if auxIntToInt64(v.AuxInt) != 1 {
30460 break
30461 }
30462 destptr := v_0
30463 mem := v_1
30464 v.reset(OpAMD64MOVBstoreconst)
30465 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30466 v.AddArg2(destptr, mem)
30467 return true
30468 }
30469
30470
30471 for {
30472 if auxIntToInt64(v.AuxInt) != 2 {
30473 break
30474 }
30475 destptr := v_0
30476 mem := v_1
30477 v.reset(OpAMD64MOVWstoreconst)
30478 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30479 v.AddArg2(destptr, mem)
30480 return true
30481 }
30482
30483
30484 for {
30485 if auxIntToInt64(v.AuxInt) != 4 {
30486 break
30487 }
30488 destptr := v_0
30489 mem := v_1
30490 v.reset(OpAMD64MOVLstoreconst)
30491 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30492 v.AddArg2(destptr, mem)
30493 return true
30494 }
30495
30496
30497 for {
30498 if auxIntToInt64(v.AuxInt) != 8 {
30499 break
30500 }
30501 destptr := v_0
30502 mem := v_1
30503 v.reset(OpAMD64MOVQstoreconst)
30504 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30505 v.AddArg2(destptr, mem)
30506 return true
30507 }
30508
30509
30510 for {
30511 if auxIntToInt64(v.AuxInt) != 3 {
30512 break
30513 }
30514 destptr := v_0
30515 mem := v_1
30516 v.reset(OpAMD64MOVBstoreconst)
30517 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
30518 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
30519 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30520 v0.AddArg2(destptr, mem)
30521 v.AddArg2(destptr, v0)
30522 return true
30523 }
30524
30525
30526 for {
30527 if auxIntToInt64(v.AuxInt) != 5 {
30528 break
30529 }
30530 destptr := v_0
30531 mem := v_1
30532 v.reset(OpAMD64MOVBstoreconst)
30533 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
30534 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
30535 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30536 v0.AddArg2(destptr, mem)
30537 v.AddArg2(destptr, v0)
30538 return true
30539 }
30540
30541
30542 for {
30543 if auxIntToInt64(v.AuxInt) != 6 {
30544 break
30545 }
30546 destptr := v_0
30547 mem := v_1
30548 v.reset(OpAMD64MOVWstoreconst)
30549 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
30550 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
30551 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30552 v0.AddArg2(destptr, mem)
30553 v.AddArg2(destptr, v0)
30554 return true
30555 }
30556
30557
30558 for {
30559 if auxIntToInt64(v.AuxInt) != 7 {
30560 break
30561 }
30562 destptr := v_0
30563 mem := v_1
30564 v.reset(OpAMD64MOVLstoreconst)
30565 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
30566 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
30567 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30568 v0.AddArg2(destptr, mem)
30569 v.AddArg2(destptr, v0)
30570 return true
30571 }
30572
30573
30574 for {
30575 if auxIntToInt64(v.AuxInt) != 9 {
30576 break
30577 }
30578 destptr := v_0
30579 mem := v_1
30580 v.reset(OpAMD64MOVBstoreconst)
30581 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
30582 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30583 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30584 v0.AddArg2(destptr, mem)
30585 v.AddArg2(destptr, v0)
30586 return true
30587 }
30588
30589
30590 for {
30591 if auxIntToInt64(v.AuxInt) != 10 {
30592 break
30593 }
30594 destptr := v_0
30595 mem := v_1
30596 v.reset(OpAMD64MOVWstoreconst)
30597 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
30598 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30599 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30600 v0.AddArg2(destptr, mem)
30601 v.AddArg2(destptr, v0)
30602 return true
30603 }
30604
30605
30606 for {
30607 if auxIntToInt64(v.AuxInt) != 11 {
30608 break
30609 }
30610 destptr := v_0
30611 mem := v_1
30612 v.reset(OpAMD64MOVLstoreconst)
30613 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 7))
30614 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30615 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30616 v0.AddArg2(destptr, mem)
30617 v.AddArg2(destptr, v0)
30618 return true
30619 }
30620
30621
30622 for {
30623 if auxIntToInt64(v.AuxInt) != 12 {
30624 break
30625 }
30626 destptr := v_0
30627 mem := v_1
30628 v.reset(OpAMD64MOVLstoreconst)
30629 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
30630 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30631 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30632 v0.AddArg2(destptr, mem)
30633 v.AddArg2(destptr, v0)
30634 return true
30635 }
30636
30637
30638
30639 for {
30640 s := auxIntToInt64(v.AuxInt)
30641 destptr := v_0
30642 mem := v_1
30643 if !(s > 12 && s < 16) {
30644 break
30645 }
30646 v.reset(OpAMD64MOVQstoreconst)
30647 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8)))
30648 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
30649 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30650 v0.AddArg2(destptr, mem)
30651 v.AddArg2(destptr, v0)
30652 return true
30653 }
30654
30655
30656
30657 for {
30658 s := auxIntToInt64(v.AuxInt)
30659 destptr := v_0
30660 mem := v_1
30661 if !(s%16 != 0 && s > 16) {
30662 break
30663 }
30664 v.reset(OpZero)
30665 v.AuxInt = int64ToAuxInt(s - s%16)
30666 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
30667 v0.AuxInt = int64ToAuxInt(s % 16)
30668 v0.AddArg(destptr)
30669 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30670 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30671 v1.AddArg2(destptr, mem)
30672 v.AddArg2(v0, v1)
30673 return true
30674 }
30675
30676
30677 for {
30678 if auxIntToInt64(v.AuxInt) != 16 {
30679 break
30680 }
30681 destptr := v_0
30682 mem := v_1
30683 v.reset(OpAMD64MOVOstoreconst)
30684 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30685 v.AddArg2(destptr, mem)
30686 return true
30687 }
30688
30689
30690 for {
30691 if auxIntToInt64(v.AuxInt) != 32 {
30692 break
30693 }
30694 destptr := v_0
30695 mem := v_1
30696 v.reset(OpAMD64MOVOstoreconst)
30697 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30698 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30699 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30700 v0.AddArg2(destptr, mem)
30701 v.AddArg2(destptr, v0)
30702 return true
30703 }
30704
30705
30706 for {
30707 if auxIntToInt64(v.AuxInt) != 48 {
30708 break
30709 }
30710 destptr := v_0
30711 mem := v_1
30712 v.reset(OpAMD64MOVOstoreconst)
30713 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
30714 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30715 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30716 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30717 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30718 v1.AddArg2(destptr, mem)
30719 v0.AddArg2(destptr, v1)
30720 v.AddArg2(destptr, v0)
30721 return true
30722 }
30723
30724
30725 for {
30726 if auxIntToInt64(v.AuxInt) != 64 {
30727 break
30728 }
30729 destptr := v_0
30730 mem := v_1
30731 v.reset(OpAMD64MOVOstoreconst)
30732 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 48))
30733 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30734 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
30735 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30736 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30737 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30738 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30739 v2.AddArg2(destptr, mem)
30740 v1.AddArg2(destptr, v2)
30741 v0.AddArg2(destptr, v1)
30742 v.AddArg2(destptr, v0)
30743 return true
30744 }
30745
30746
30747
30748 for {
30749 s := auxIntToInt64(v.AuxInt)
30750 destptr := v_0
30751 mem := v_1
30752 if !(s > 64 && s <= 1024 && s%16 == 0) {
30753 break
30754 }
30755 v.reset(OpAMD64DUFFZERO)
30756 v.AuxInt = int64ToAuxInt(s)
30757 v.AddArg2(destptr, mem)
30758 return true
30759 }
30760
30761
30762
30763 for {
30764 s := auxIntToInt64(v.AuxInt)
30765 destptr := v_0
30766 mem := v_1
30767 if !(s > 1024 && s%8 == 0) {
30768 break
30769 }
30770 v.reset(OpAMD64REPSTOSQ)
30771 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30772 v0.AuxInt = int64ToAuxInt(s / 8)
30773 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30774 v1.AuxInt = int64ToAuxInt(0)
30775 v.AddArg4(destptr, v0, v1, mem)
30776 return true
30777 }
30778 return false
30779 }
30780 func rewriteBlockAMD64(b *Block) bool {
30781 typ := &b.Func.Config.Types
30782 switch b.Kind {
30783 case BlockAMD64EQ:
30784
30785
30786 for b.Controls[0].Op == OpAMD64TESTL {
30787 v_0 := b.Controls[0]
30788 _ = v_0.Args[1]
30789 v_0_0 := v_0.Args[0]
30790 v_0_1 := v_0.Args[1]
30791 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30792 if v_0_0.Op != OpAMD64SHLL {
30793 continue
30794 }
30795 x := v_0_0.Args[1]
30796 v_0_0_0 := v_0_0.Args[0]
30797 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
30798 continue
30799 }
30800 y := v_0_1
30801 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
30802 v0.AddArg2(x, y)
30803 b.resetWithControl(BlockAMD64UGE, v0)
30804 return true
30805 }
30806 break
30807 }
30808
30809
30810 for b.Controls[0].Op == OpAMD64TESTQ {
30811 v_0 := b.Controls[0]
30812 _ = v_0.Args[1]
30813 v_0_0 := v_0.Args[0]
30814 v_0_1 := v_0.Args[1]
30815 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30816 if v_0_0.Op != OpAMD64SHLQ {
30817 continue
30818 }
30819 x := v_0_0.Args[1]
30820 v_0_0_0 := v_0_0.Args[0]
30821 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
30822 continue
30823 }
30824 y := v_0_1
30825 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
30826 v0.AddArg2(x, y)
30827 b.resetWithControl(BlockAMD64UGE, v0)
30828 return true
30829 }
30830 break
30831 }
30832
30833
30834
30835 for b.Controls[0].Op == OpAMD64TESTLconst {
30836 v_0 := b.Controls[0]
30837 c := auxIntToInt32(v_0.AuxInt)
30838 x := v_0.Args[0]
30839 if !(isUint32PowerOfTwo(int64(c))) {
30840 break
30841 }
30842 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30843 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
30844 v0.AddArg(x)
30845 b.resetWithControl(BlockAMD64UGE, v0)
30846 return true
30847 }
30848
30849
30850
30851 for b.Controls[0].Op == OpAMD64TESTQconst {
30852 v_0 := b.Controls[0]
30853 c := auxIntToInt32(v_0.AuxInt)
30854 x := v_0.Args[0]
30855 if !(isUint64PowerOfTwo(int64(c))) {
30856 break
30857 }
30858 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30859 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
30860 v0.AddArg(x)
30861 b.resetWithControl(BlockAMD64UGE, v0)
30862 return true
30863 }
30864
30865
30866
30867 for b.Controls[0].Op == OpAMD64TESTQ {
30868 v_0 := b.Controls[0]
30869 _ = v_0.Args[1]
30870 v_0_0 := v_0.Args[0]
30871 v_0_1 := v_0.Args[1]
30872 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30873 if v_0_0.Op != OpAMD64MOVQconst {
30874 continue
30875 }
30876 c := auxIntToInt64(v_0_0.AuxInt)
30877 x := v_0_1
30878 if !(isUint64PowerOfTwo(c)) {
30879 continue
30880 }
30881 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30882 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
30883 v0.AddArg(x)
30884 b.resetWithControl(BlockAMD64UGE, v0)
30885 return true
30886 }
30887 break
30888 }
30889
30890
30891
30892 for b.Controls[0].Op == OpAMD64TESTQ {
30893 v_0 := b.Controls[0]
30894 _ = v_0.Args[1]
30895 v_0_0 := v_0.Args[0]
30896 v_0_1 := v_0.Args[1]
30897 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30898 z1 := v_0_0
30899 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
30900 continue
30901 }
30902 z1_0 := z1.Args[0]
30903 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
30904 continue
30905 }
30906 x := z1_0.Args[0]
30907 z2 := v_0_1
30908 if !(z1 == z2) {
30909 continue
30910 }
30911 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30912 v0.AuxInt = int8ToAuxInt(63)
30913 v0.AddArg(x)
30914 b.resetWithControl(BlockAMD64UGE, v0)
30915 return true
30916 }
30917 break
30918 }
30919
30920
30921
30922 for b.Controls[0].Op == OpAMD64TESTL {
30923 v_0 := b.Controls[0]
30924 _ = v_0.Args[1]
30925 v_0_0 := v_0.Args[0]
30926 v_0_1 := v_0.Args[1]
30927 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30928 z1 := v_0_0
30929 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
30930 continue
30931 }
30932 z1_0 := z1.Args[0]
30933 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
30934 continue
30935 }
30936 x := z1_0.Args[0]
30937 z2 := v_0_1
30938 if !(z1 == z2) {
30939 continue
30940 }
30941 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30942 v0.AuxInt = int8ToAuxInt(31)
30943 v0.AddArg(x)
30944 b.resetWithControl(BlockAMD64UGE, v0)
30945 return true
30946 }
30947 break
30948 }
30949
30950
30951
30952 for b.Controls[0].Op == OpAMD64TESTQ {
30953 v_0 := b.Controls[0]
30954 _ = v_0.Args[1]
30955 v_0_0 := v_0.Args[0]
30956 v_0_1 := v_0.Args[1]
30957 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30958 z1 := v_0_0
30959 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
30960 continue
30961 }
30962 z1_0 := z1.Args[0]
30963 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
30964 continue
30965 }
30966 x := z1_0.Args[0]
30967 z2 := v_0_1
30968 if !(z1 == z2) {
30969 continue
30970 }
30971 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30972 v0.AuxInt = int8ToAuxInt(0)
30973 v0.AddArg(x)
30974 b.resetWithControl(BlockAMD64UGE, v0)
30975 return true
30976 }
30977 break
30978 }
30979
30980
30981
30982 for b.Controls[0].Op == OpAMD64TESTL {
30983 v_0 := b.Controls[0]
30984 _ = v_0.Args[1]
30985 v_0_0 := v_0.Args[0]
30986 v_0_1 := v_0.Args[1]
30987 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30988 z1 := v_0_0
30989 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
30990 continue
30991 }
30992 z1_0 := z1.Args[0]
30993 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
30994 continue
30995 }
30996 x := z1_0.Args[0]
30997 z2 := v_0_1
30998 if !(z1 == z2) {
30999 continue
31000 }
31001 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31002 v0.AuxInt = int8ToAuxInt(0)
31003 v0.AddArg(x)
31004 b.resetWithControl(BlockAMD64UGE, v0)
31005 return true
31006 }
31007 break
31008 }
31009
31010
31011
31012 for b.Controls[0].Op == OpAMD64TESTQ {
31013 v_0 := b.Controls[0]
31014 _ = v_0.Args[1]
31015 v_0_0 := v_0.Args[0]
31016 v_0_1 := v_0.Args[1]
31017 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31018 z1 := v_0_0
31019 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
31020 continue
31021 }
31022 x := z1.Args[0]
31023 z2 := v_0_1
31024 if !(z1 == z2) {
31025 continue
31026 }
31027 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31028 v0.AuxInt = int8ToAuxInt(63)
31029 v0.AddArg(x)
31030 b.resetWithControl(BlockAMD64UGE, v0)
31031 return true
31032 }
31033 break
31034 }
31035
31036
31037
31038 for b.Controls[0].Op == OpAMD64TESTL {
31039 v_0 := b.Controls[0]
31040 _ = v_0.Args[1]
31041 v_0_0 := v_0.Args[0]
31042 v_0_1 := v_0.Args[1]
31043 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31044 z1 := v_0_0
31045 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
31046 continue
31047 }
31048 x := z1.Args[0]
31049 z2 := v_0_1
31050 if !(z1 == z2) {
31051 continue
31052 }
31053 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31054 v0.AuxInt = int8ToAuxInt(31)
31055 v0.AddArg(x)
31056 b.resetWithControl(BlockAMD64UGE, v0)
31057 return true
31058 }
31059 break
31060 }
31061
31062
31063 for b.Controls[0].Op == OpAMD64InvertFlags {
31064 v_0 := b.Controls[0]
31065 cmp := v_0.Args[0]
31066 b.resetWithControl(BlockAMD64EQ, cmp)
31067 return true
31068 }
31069
31070
31071 for b.Controls[0].Op == OpAMD64FlagEQ {
31072 b.Reset(BlockFirst)
31073 return true
31074 }
31075
31076
31077 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31078 b.Reset(BlockFirst)
31079 b.swapSuccessors()
31080 return true
31081 }
31082
31083
31084 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31085 b.Reset(BlockFirst)
31086 b.swapSuccessors()
31087 return true
31088 }
31089
31090
31091 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31092 b.Reset(BlockFirst)
31093 b.swapSuccessors()
31094 return true
31095 }
31096
31097
31098 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31099 b.Reset(BlockFirst)
31100 b.swapSuccessors()
31101 return true
31102 }
31103
31104
31105 for b.Controls[0].Op == OpAMD64TESTQ {
31106 v_0 := b.Controls[0]
31107 _ = v_0.Args[1]
31108 v_0_0 := v_0.Args[0]
31109 v_0_1 := v_0.Args[1]
31110 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31111 s := v_0_0
31112 if s.Op != OpSelect0 {
31113 continue
31114 }
31115 blsr := s.Args[0]
31116 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
31117 continue
31118 }
31119 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
31120 v0.AddArg(blsr)
31121 b.resetWithControl(BlockAMD64EQ, v0)
31122 return true
31123 }
31124 break
31125 }
31126
31127
31128 for b.Controls[0].Op == OpAMD64TESTL {
31129 v_0 := b.Controls[0]
31130 _ = v_0.Args[1]
31131 v_0_0 := v_0.Args[0]
31132 v_0_1 := v_0.Args[1]
31133 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31134 s := v_0_0
31135 if s.Op != OpSelect0 {
31136 continue
31137 }
31138 blsr := s.Args[0]
31139 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
31140 continue
31141 }
31142 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
31143 v0.AddArg(blsr)
31144 b.resetWithControl(BlockAMD64EQ, v0)
31145 return true
31146 }
31147 break
31148 }
31149 case BlockAMD64GE:
31150
31151
31152
31153 for b.Controls[0].Op == OpAMD64CMPQconst {
31154 c := b.Controls[0]
31155 if auxIntToInt32(c.AuxInt) != 128 {
31156 break
31157 }
31158 z := c.Args[0]
31159 if !(c.Uses == 1) {
31160 break
31161 }
31162 v0 := b.NewValue0(c.Pos, OpAMD64CMPQconst, types.TypeFlags)
31163 v0.AuxInt = int32ToAuxInt(127)
31164 v0.AddArg(z)
31165 b.resetWithControl(BlockAMD64GT, v0)
31166 return true
31167 }
31168
31169
31170
31171 for b.Controls[0].Op == OpAMD64CMPLconst {
31172 c := b.Controls[0]
31173 if auxIntToInt32(c.AuxInt) != 128 {
31174 break
31175 }
31176 z := c.Args[0]
31177 if !(c.Uses == 1) {
31178 break
31179 }
31180 v0 := b.NewValue0(c.Pos, OpAMD64CMPLconst, types.TypeFlags)
31181 v0.AuxInt = int32ToAuxInt(127)
31182 v0.AddArg(z)
31183 b.resetWithControl(BlockAMD64GT, v0)
31184 return true
31185 }
31186
31187
31188 for b.Controls[0].Op == OpAMD64InvertFlags {
31189 v_0 := b.Controls[0]
31190 cmp := v_0.Args[0]
31191 b.resetWithControl(BlockAMD64LE, cmp)
31192 return true
31193 }
31194
31195
31196 for b.Controls[0].Op == OpAMD64FlagEQ {
31197 b.Reset(BlockFirst)
31198 return true
31199 }
31200
31201
31202 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31203 b.Reset(BlockFirst)
31204 b.swapSuccessors()
31205 return true
31206 }
31207
31208
31209 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31210 b.Reset(BlockFirst)
31211 b.swapSuccessors()
31212 return true
31213 }
31214
31215
31216 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31217 b.Reset(BlockFirst)
31218 return true
31219 }
31220
31221
31222 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31223 b.Reset(BlockFirst)
31224 return true
31225 }
31226 case BlockAMD64GT:
31227
31228
31229 for b.Controls[0].Op == OpAMD64InvertFlags {
31230 v_0 := b.Controls[0]
31231 cmp := v_0.Args[0]
31232 b.resetWithControl(BlockAMD64LT, cmp)
31233 return true
31234 }
31235
31236
31237 for b.Controls[0].Op == OpAMD64FlagEQ {
31238 b.Reset(BlockFirst)
31239 b.swapSuccessors()
31240 return true
31241 }
31242
31243
31244 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31245 b.Reset(BlockFirst)
31246 b.swapSuccessors()
31247 return true
31248 }
31249
31250
31251 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31252 b.Reset(BlockFirst)
31253 b.swapSuccessors()
31254 return true
31255 }
31256
31257
31258 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31259 b.Reset(BlockFirst)
31260 return true
31261 }
31262
31263
31264 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31265 b.Reset(BlockFirst)
31266 return true
31267 }
31268 case BlockIf:
31269
31270
31271 for b.Controls[0].Op == OpAMD64SETL {
31272 v_0 := b.Controls[0]
31273 cmp := v_0.Args[0]
31274 b.resetWithControl(BlockAMD64LT, cmp)
31275 return true
31276 }
31277
31278
31279 for b.Controls[0].Op == OpAMD64SETLE {
31280 v_0 := b.Controls[0]
31281 cmp := v_0.Args[0]
31282 b.resetWithControl(BlockAMD64LE, cmp)
31283 return true
31284 }
31285
31286
31287 for b.Controls[0].Op == OpAMD64SETG {
31288 v_0 := b.Controls[0]
31289 cmp := v_0.Args[0]
31290 b.resetWithControl(BlockAMD64GT, cmp)
31291 return true
31292 }
31293
31294
31295 for b.Controls[0].Op == OpAMD64SETGE {
31296 v_0 := b.Controls[0]
31297 cmp := v_0.Args[0]
31298 b.resetWithControl(BlockAMD64GE, cmp)
31299 return true
31300 }
31301
31302
31303 for b.Controls[0].Op == OpAMD64SETEQ {
31304 v_0 := b.Controls[0]
31305 cmp := v_0.Args[0]
31306 b.resetWithControl(BlockAMD64EQ, cmp)
31307 return true
31308 }
31309
31310
31311 for b.Controls[0].Op == OpAMD64SETNE {
31312 v_0 := b.Controls[0]
31313 cmp := v_0.Args[0]
31314 b.resetWithControl(BlockAMD64NE, cmp)
31315 return true
31316 }
31317
31318
31319 for b.Controls[0].Op == OpAMD64SETB {
31320 v_0 := b.Controls[0]
31321 cmp := v_0.Args[0]
31322 b.resetWithControl(BlockAMD64ULT, cmp)
31323 return true
31324 }
31325
31326
31327 for b.Controls[0].Op == OpAMD64SETBE {
31328 v_0 := b.Controls[0]
31329 cmp := v_0.Args[0]
31330 b.resetWithControl(BlockAMD64ULE, cmp)
31331 return true
31332 }
31333
31334
31335 for b.Controls[0].Op == OpAMD64SETA {
31336 v_0 := b.Controls[0]
31337 cmp := v_0.Args[0]
31338 b.resetWithControl(BlockAMD64UGT, cmp)
31339 return true
31340 }
31341
31342
31343 for b.Controls[0].Op == OpAMD64SETAE {
31344 v_0 := b.Controls[0]
31345 cmp := v_0.Args[0]
31346 b.resetWithControl(BlockAMD64UGE, cmp)
31347 return true
31348 }
31349
31350
31351 for b.Controls[0].Op == OpAMD64SETO {
31352 v_0 := b.Controls[0]
31353 cmp := v_0.Args[0]
31354 b.resetWithControl(BlockAMD64OS, cmp)
31355 return true
31356 }
31357
31358
31359 for b.Controls[0].Op == OpAMD64SETGF {
31360 v_0 := b.Controls[0]
31361 cmp := v_0.Args[0]
31362 b.resetWithControl(BlockAMD64UGT, cmp)
31363 return true
31364 }
31365
31366
31367 for b.Controls[0].Op == OpAMD64SETGEF {
31368 v_0 := b.Controls[0]
31369 cmp := v_0.Args[0]
31370 b.resetWithControl(BlockAMD64UGE, cmp)
31371 return true
31372 }
31373
31374
31375 for b.Controls[0].Op == OpAMD64SETEQF {
31376 v_0 := b.Controls[0]
31377 cmp := v_0.Args[0]
31378 b.resetWithControl(BlockAMD64EQF, cmp)
31379 return true
31380 }
31381
31382
31383 for b.Controls[0].Op == OpAMD64SETNEF {
31384 v_0 := b.Controls[0]
31385 cmp := v_0.Args[0]
31386 b.resetWithControl(BlockAMD64NEF, cmp)
31387 return true
31388 }
31389
31390
31391 for {
31392 cond := b.Controls[0]
31393 v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags)
31394 v0.AddArg2(cond, cond)
31395 b.resetWithControl(BlockAMD64NE, v0)
31396 return true
31397 }
31398 case BlockJumpTable:
31399
31400
31401 for {
31402 idx := b.Controls[0]
31403 v0 := b.NewValue0(b.Pos, OpAMD64LEAQ, typ.Uintptr)
31404 v0.Aux = symToAux(makeJumpTableSym(b))
31405 v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr)
31406 v0.AddArg(v1)
31407 b.resetWithControl2(BlockAMD64JUMPTABLE, idx, v0)
31408 b.Aux = symToAux(makeJumpTableSym(b))
31409 return true
31410 }
31411 case BlockAMD64LE:
31412
31413
31414 for b.Controls[0].Op == OpAMD64InvertFlags {
31415 v_0 := b.Controls[0]
31416 cmp := v_0.Args[0]
31417 b.resetWithControl(BlockAMD64GE, cmp)
31418 return true
31419 }
31420
31421
31422 for b.Controls[0].Op == OpAMD64FlagEQ {
31423 b.Reset(BlockFirst)
31424 return true
31425 }
31426
31427
31428 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31429 b.Reset(BlockFirst)
31430 return true
31431 }
31432
31433
31434 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31435 b.Reset(BlockFirst)
31436 return true
31437 }
31438
31439
31440 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31441 b.Reset(BlockFirst)
31442 b.swapSuccessors()
31443 return true
31444 }
31445
31446
31447 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31448 b.Reset(BlockFirst)
31449 b.swapSuccessors()
31450 return true
31451 }
31452 case BlockAMD64LT:
31453
31454
31455
31456 for b.Controls[0].Op == OpAMD64CMPQconst {
31457 c := b.Controls[0]
31458 if auxIntToInt32(c.AuxInt) != 128 {
31459 break
31460 }
31461 z := c.Args[0]
31462 if !(c.Uses == 1) {
31463 break
31464 }
31465 v0 := b.NewValue0(c.Pos, OpAMD64CMPQconst, types.TypeFlags)
31466 v0.AuxInt = int32ToAuxInt(127)
31467 v0.AddArg(z)
31468 b.resetWithControl(BlockAMD64LE, v0)
31469 return true
31470 }
31471
31472
31473
31474 for b.Controls[0].Op == OpAMD64CMPLconst {
31475 c := b.Controls[0]
31476 if auxIntToInt32(c.AuxInt) != 128 {
31477 break
31478 }
31479 z := c.Args[0]
31480 if !(c.Uses == 1) {
31481 break
31482 }
31483 v0 := b.NewValue0(c.Pos, OpAMD64CMPLconst, types.TypeFlags)
31484 v0.AuxInt = int32ToAuxInt(127)
31485 v0.AddArg(z)
31486 b.resetWithControl(BlockAMD64LE, v0)
31487 return true
31488 }
31489
31490
31491 for b.Controls[0].Op == OpAMD64InvertFlags {
31492 v_0 := b.Controls[0]
31493 cmp := v_0.Args[0]
31494 b.resetWithControl(BlockAMD64GT, cmp)
31495 return true
31496 }
31497
31498
31499 for b.Controls[0].Op == OpAMD64FlagEQ {
31500 b.Reset(BlockFirst)
31501 b.swapSuccessors()
31502 return true
31503 }
31504
31505
31506 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31507 b.Reset(BlockFirst)
31508 return true
31509 }
31510
31511
31512 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31513 b.Reset(BlockFirst)
31514 return true
31515 }
31516
31517
31518 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31519 b.Reset(BlockFirst)
31520 b.swapSuccessors()
31521 return true
31522 }
31523
31524
31525 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31526 b.Reset(BlockFirst)
31527 b.swapSuccessors()
31528 return true
31529 }
31530 case BlockAMD64NE:
31531
31532
31533 for b.Controls[0].Op == OpAMD64TESTB {
31534 v_0 := b.Controls[0]
31535 _ = v_0.Args[1]
31536 v_0_0 := v_0.Args[0]
31537 if v_0_0.Op != OpAMD64SETL {
31538 break
31539 }
31540 cmp := v_0_0.Args[0]
31541 v_0_1 := v_0.Args[1]
31542 if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] {
31543 break
31544 }
31545 b.resetWithControl(BlockAMD64LT, cmp)
31546 return true
31547 }
31548
31549
31550 for b.Controls[0].Op == OpAMD64TESTB {
31551 v_0 := b.Controls[0]
31552 _ = v_0.Args[1]
31553 v_0_0 := v_0.Args[0]
31554 if v_0_0.Op != OpAMD64SETLE {
31555 break
31556 }
31557 cmp := v_0_0.Args[0]
31558 v_0_1 := v_0.Args[1]
31559 if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] {
31560 break
31561 }
31562 b.resetWithControl(BlockAMD64LE, cmp)
31563 return true
31564 }
31565
31566
31567 for b.Controls[0].Op == OpAMD64TESTB {
31568 v_0 := b.Controls[0]
31569 _ = v_0.Args[1]
31570 v_0_0 := v_0.Args[0]
31571 if v_0_0.Op != OpAMD64SETG {
31572 break
31573 }
31574 cmp := v_0_0.Args[0]
31575 v_0_1 := v_0.Args[1]
31576 if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] {
31577 break
31578 }
31579 b.resetWithControl(BlockAMD64GT, cmp)
31580 return true
31581 }
31582
31583
31584 for b.Controls[0].Op == OpAMD64TESTB {
31585 v_0 := b.Controls[0]
31586 _ = v_0.Args[1]
31587 v_0_0 := v_0.Args[0]
31588 if v_0_0.Op != OpAMD64SETGE {
31589 break
31590 }
31591 cmp := v_0_0.Args[0]
31592 v_0_1 := v_0.Args[1]
31593 if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] {
31594 break
31595 }
31596 b.resetWithControl(BlockAMD64GE, cmp)
31597 return true
31598 }
31599
31600
31601 for b.Controls[0].Op == OpAMD64TESTB {
31602 v_0 := b.Controls[0]
31603 _ = v_0.Args[1]
31604 v_0_0 := v_0.Args[0]
31605 if v_0_0.Op != OpAMD64SETEQ {
31606 break
31607 }
31608 cmp := v_0_0.Args[0]
31609 v_0_1 := v_0.Args[1]
31610 if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] {
31611 break
31612 }
31613 b.resetWithControl(BlockAMD64EQ, cmp)
31614 return true
31615 }
31616
31617
31618 for b.Controls[0].Op == OpAMD64TESTB {
31619 v_0 := b.Controls[0]
31620 _ = v_0.Args[1]
31621 v_0_0 := v_0.Args[0]
31622 if v_0_0.Op != OpAMD64SETNE {
31623 break
31624 }
31625 cmp := v_0_0.Args[0]
31626 v_0_1 := v_0.Args[1]
31627 if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] {
31628 break
31629 }
31630 b.resetWithControl(BlockAMD64NE, cmp)
31631 return true
31632 }
31633
31634
31635 for b.Controls[0].Op == OpAMD64TESTB {
31636 v_0 := b.Controls[0]
31637 _ = v_0.Args[1]
31638 v_0_0 := v_0.Args[0]
31639 if v_0_0.Op != OpAMD64SETB {
31640 break
31641 }
31642 cmp := v_0_0.Args[0]
31643 v_0_1 := v_0.Args[1]
31644 if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] {
31645 break
31646 }
31647 b.resetWithControl(BlockAMD64ULT, cmp)
31648 return true
31649 }
31650
31651
31652 for b.Controls[0].Op == OpAMD64TESTB {
31653 v_0 := b.Controls[0]
31654 _ = v_0.Args[1]
31655 v_0_0 := v_0.Args[0]
31656 if v_0_0.Op != OpAMD64SETBE {
31657 break
31658 }
31659 cmp := v_0_0.Args[0]
31660 v_0_1 := v_0.Args[1]
31661 if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] {
31662 break
31663 }
31664 b.resetWithControl(BlockAMD64ULE, cmp)
31665 return true
31666 }
31667
31668
31669 for b.Controls[0].Op == OpAMD64TESTB {
31670 v_0 := b.Controls[0]
31671 _ = v_0.Args[1]
31672 v_0_0 := v_0.Args[0]
31673 if v_0_0.Op != OpAMD64SETA {
31674 break
31675 }
31676 cmp := v_0_0.Args[0]
31677 v_0_1 := v_0.Args[1]
31678 if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] {
31679 break
31680 }
31681 b.resetWithControl(BlockAMD64UGT, cmp)
31682 return true
31683 }
31684
31685
31686 for b.Controls[0].Op == OpAMD64TESTB {
31687 v_0 := b.Controls[0]
31688 _ = v_0.Args[1]
31689 v_0_0 := v_0.Args[0]
31690 if v_0_0.Op != OpAMD64SETAE {
31691 break
31692 }
31693 cmp := v_0_0.Args[0]
31694 v_0_1 := v_0.Args[1]
31695 if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] {
31696 break
31697 }
31698 b.resetWithControl(BlockAMD64UGE, cmp)
31699 return true
31700 }
31701
31702
31703 for b.Controls[0].Op == OpAMD64TESTB {
31704 v_0 := b.Controls[0]
31705 _ = v_0.Args[1]
31706 v_0_0 := v_0.Args[0]
31707 if v_0_0.Op != OpAMD64SETO {
31708 break
31709 }
31710 cmp := v_0_0.Args[0]
31711 v_0_1 := v_0.Args[1]
31712 if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] {
31713 break
31714 }
31715 b.resetWithControl(BlockAMD64OS, cmp)
31716 return true
31717 }
31718
31719
31720 for b.Controls[0].Op == OpAMD64TESTL {
31721 v_0 := b.Controls[0]
31722 _ = v_0.Args[1]
31723 v_0_0 := v_0.Args[0]
31724 v_0_1 := v_0.Args[1]
31725 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31726 if v_0_0.Op != OpAMD64SHLL {
31727 continue
31728 }
31729 x := v_0_0.Args[1]
31730 v_0_0_0 := v_0_0.Args[0]
31731 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
31732 continue
31733 }
31734 y := v_0_1
31735 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
31736 v0.AddArg2(x, y)
31737 b.resetWithControl(BlockAMD64ULT, v0)
31738 return true
31739 }
31740 break
31741 }
31742
31743
31744 for b.Controls[0].Op == OpAMD64TESTQ {
31745 v_0 := b.Controls[0]
31746 _ = v_0.Args[1]
31747 v_0_0 := v_0.Args[0]
31748 v_0_1 := v_0.Args[1]
31749 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31750 if v_0_0.Op != OpAMD64SHLQ {
31751 continue
31752 }
31753 x := v_0_0.Args[1]
31754 v_0_0_0 := v_0_0.Args[0]
31755 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
31756 continue
31757 }
31758 y := v_0_1
31759 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
31760 v0.AddArg2(x, y)
31761 b.resetWithControl(BlockAMD64ULT, v0)
31762 return true
31763 }
31764 break
31765 }
31766
31767
31768
31769 for b.Controls[0].Op == OpAMD64TESTLconst {
31770 v_0 := b.Controls[0]
31771 c := auxIntToInt32(v_0.AuxInt)
31772 x := v_0.Args[0]
31773 if !(isUint32PowerOfTwo(int64(c))) {
31774 break
31775 }
31776 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31777 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
31778 v0.AddArg(x)
31779 b.resetWithControl(BlockAMD64ULT, v0)
31780 return true
31781 }
31782
31783
31784
31785 for b.Controls[0].Op == OpAMD64TESTQconst {
31786 v_0 := b.Controls[0]
31787 c := auxIntToInt32(v_0.AuxInt)
31788 x := v_0.Args[0]
31789 if !(isUint64PowerOfTwo(int64(c))) {
31790 break
31791 }
31792 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31793 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
31794 v0.AddArg(x)
31795 b.resetWithControl(BlockAMD64ULT, v0)
31796 return true
31797 }
31798
31799
31800
31801 for b.Controls[0].Op == OpAMD64TESTQ {
31802 v_0 := b.Controls[0]
31803 _ = v_0.Args[1]
31804 v_0_0 := v_0.Args[0]
31805 v_0_1 := v_0.Args[1]
31806 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31807 if v_0_0.Op != OpAMD64MOVQconst {
31808 continue
31809 }
31810 c := auxIntToInt64(v_0_0.AuxInt)
31811 x := v_0_1
31812 if !(isUint64PowerOfTwo(c)) {
31813 continue
31814 }
31815 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31816 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
31817 v0.AddArg(x)
31818 b.resetWithControl(BlockAMD64ULT, v0)
31819 return true
31820 }
31821 break
31822 }
31823
31824
31825
31826 for b.Controls[0].Op == OpAMD64TESTQ {
31827 v_0 := b.Controls[0]
31828 _ = v_0.Args[1]
31829 v_0_0 := v_0.Args[0]
31830 v_0_1 := v_0.Args[1]
31831 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31832 z1 := v_0_0
31833 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
31834 continue
31835 }
31836 z1_0 := z1.Args[0]
31837 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
31838 continue
31839 }
31840 x := z1_0.Args[0]
31841 z2 := v_0_1
31842 if !(z1 == z2) {
31843 continue
31844 }
31845 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31846 v0.AuxInt = int8ToAuxInt(63)
31847 v0.AddArg(x)
31848 b.resetWithControl(BlockAMD64ULT, v0)
31849 return true
31850 }
31851 break
31852 }
31853
31854
31855
31856 for b.Controls[0].Op == OpAMD64TESTL {
31857 v_0 := b.Controls[0]
31858 _ = v_0.Args[1]
31859 v_0_0 := v_0.Args[0]
31860 v_0_1 := v_0.Args[1]
31861 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31862 z1 := v_0_0
31863 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
31864 continue
31865 }
31866 z1_0 := z1.Args[0]
31867 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
31868 continue
31869 }
31870 x := z1_0.Args[0]
31871 z2 := v_0_1
31872 if !(z1 == z2) {
31873 continue
31874 }
31875 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31876 v0.AuxInt = int8ToAuxInt(31)
31877 v0.AddArg(x)
31878 b.resetWithControl(BlockAMD64ULT, v0)
31879 return true
31880 }
31881 break
31882 }
31883
31884
31885
31886 for b.Controls[0].Op == OpAMD64TESTQ {
31887 v_0 := b.Controls[0]
31888 _ = v_0.Args[1]
31889 v_0_0 := v_0.Args[0]
31890 v_0_1 := v_0.Args[1]
31891 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31892 z1 := v_0_0
31893 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
31894 continue
31895 }
31896 z1_0 := z1.Args[0]
31897 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
31898 continue
31899 }
31900 x := z1_0.Args[0]
31901 z2 := v_0_1
31902 if !(z1 == z2) {
31903 continue
31904 }
31905 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31906 v0.AuxInt = int8ToAuxInt(0)
31907 v0.AddArg(x)
31908 b.resetWithControl(BlockAMD64ULT, v0)
31909 return true
31910 }
31911 break
31912 }
31913
31914
31915
31916 for b.Controls[0].Op == OpAMD64TESTL {
31917 v_0 := b.Controls[0]
31918 _ = v_0.Args[1]
31919 v_0_0 := v_0.Args[0]
31920 v_0_1 := v_0.Args[1]
31921 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31922 z1 := v_0_0
31923 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
31924 continue
31925 }
31926 z1_0 := z1.Args[0]
31927 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
31928 continue
31929 }
31930 x := z1_0.Args[0]
31931 z2 := v_0_1
31932 if !(z1 == z2) {
31933 continue
31934 }
31935 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31936 v0.AuxInt = int8ToAuxInt(0)
31937 v0.AddArg(x)
31938 b.resetWithControl(BlockAMD64ULT, v0)
31939 return true
31940 }
31941 break
31942 }
31943
31944
31945
31946 for b.Controls[0].Op == OpAMD64TESTQ {
31947 v_0 := b.Controls[0]
31948 _ = v_0.Args[1]
31949 v_0_0 := v_0.Args[0]
31950 v_0_1 := v_0.Args[1]
31951 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31952 z1 := v_0_0
31953 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
31954 continue
31955 }
31956 x := z1.Args[0]
31957 z2 := v_0_1
31958 if !(z1 == z2) {
31959 continue
31960 }
31961 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31962 v0.AuxInt = int8ToAuxInt(63)
31963 v0.AddArg(x)
31964 b.resetWithControl(BlockAMD64ULT, v0)
31965 return true
31966 }
31967 break
31968 }
31969
31970
31971
31972 for b.Controls[0].Op == OpAMD64TESTL {
31973 v_0 := b.Controls[0]
31974 _ = v_0.Args[1]
31975 v_0_0 := v_0.Args[0]
31976 v_0_1 := v_0.Args[1]
31977 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31978 z1 := v_0_0
31979 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
31980 continue
31981 }
31982 x := z1.Args[0]
31983 z2 := v_0_1
31984 if !(z1 == z2) {
31985 continue
31986 }
31987 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31988 v0.AuxInt = int8ToAuxInt(31)
31989 v0.AddArg(x)
31990 b.resetWithControl(BlockAMD64ULT, v0)
31991 return true
31992 }
31993 break
31994 }
31995
31996
31997 for b.Controls[0].Op == OpAMD64TESTB {
31998 v_0 := b.Controls[0]
31999 _ = v_0.Args[1]
32000 v_0_0 := v_0.Args[0]
32001 if v_0_0.Op != OpAMD64SETGF {
32002 break
32003 }
32004 cmp := v_0_0.Args[0]
32005 v_0_1 := v_0.Args[1]
32006 if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] {
32007 break
32008 }
32009 b.resetWithControl(BlockAMD64UGT, cmp)
32010 return true
32011 }
32012
32013
32014 for b.Controls[0].Op == OpAMD64TESTB {
32015 v_0 := b.Controls[0]
32016 _ = v_0.Args[1]
32017 v_0_0 := v_0.Args[0]
32018 if v_0_0.Op != OpAMD64SETGEF {
32019 break
32020 }
32021 cmp := v_0_0.Args[0]
32022 v_0_1 := v_0.Args[1]
32023 if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] {
32024 break
32025 }
32026 b.resetWithControl(BlockAMD64UGE, cmp)
32027 return true
32028 }
32029
32030
32031 for b.Controls[0].Op == OpAMD64TESTB {
32032 v_0 := b.Controls[0]
32033 _ = v_0.Args[1]
32034 v_0_0 := v_0.Args[0]
32035 if v_0_0.Op != OpAMD64SETEQF {
32036 break
32037 }
32038 cmp := v_0_0.Args[0]
32039 v_0_1 := v_0.Args[1]
32040 if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] {
32041 break
32042 }
32043 b.resetWithControl(BlockAMD64EQF, cmp)
32044 return true
32045 }
32046
32047
32048 for b.Controls[0].Op == OpAMD64TESTB {
32049 v_0 := b.Controls[0]
32050 _ = v_0.Args[1]
32051 v_0_0 := v_0.Args[0]
32052 if v_0_0.Op != OpAMD64SETNEF {
32053 break
32054 }
32055 cmp := v_0_0.Args[0]
32056 v_0_1 := v_0.Args[1]
32057 if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] {
32058 break
32059 }
32060 b.resetWithControl(BlockAMD64NEF, cmp)
32061 return true
32062 }
32063
32064
32065 for b.Controls[0].Op == OpAMD64InvertFlags {
32066 v_0 := b.Controls[0]
32067 cmp := v_0.Args[0]
32068 b.resetWithControl(BlockAMD64NE, cmp)
32069 return true
32070 }
32071
32072
32073 for b.Controls[0].Op == OpAMD64FlagEQ {
32074 b.Reset(BlockFirst)
32075 b.swapSuccessors()
32076 return true
32077 }
32078
32079
32080 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
32081 b.Reset(BlockFirst)
32082 return true
32083 }
32084
32085
32086 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
32087 b.Reset(BlockFirst)
32088 return true
32089 }
32090
32091
32092 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
32093 b.Reset(BlockFirst)
32094 return true
32095 }
32096
32097
32098 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
32099 b.Reset(BlockFirst)
32100 return true
32101 }
32102
32103
32104 for b.Controls[0].Op == OpAMD64TESTQ {
32105 v_0 := b.Controls[0]
32106 _ = v_0.Args[1]
32107 v_0_0 := v_0.Args[0]
32108 v_0_1 := v_0.Args[1]
32109 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
32110 s := v_0_0
32111 if s.Op != OpSelect0 {
32112 continue
32113 }
32114 blsr := s.Args[0]
32115 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
32116 continue
32117 }
32118 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
32119 v0.AddArg(blsr)
32120 b.resetWithControl(BlockAMD64NE, v0)
32121 return true
32122 }
32123 break
32124 }
32125
32126
32127 for b.Controls[0].Op == OpAMD64TESTL {
32128 v_0 := b.Controls[0]
32129 _ = v_0.Args[1]
32130 v_0_0 := v_0.Args[0]
32131 v_0_1 := v_0.Args[1]
32132 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
32133 s := v_0_0
32134 if s.Op != OpSelect0 {
32135 continue
32136 }
32137 blsr := s.Args[0]
32138 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
32139 continue
32140 }
32141 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
32142 v0.AddArg(blsr)
32143 b.resetWithControl(BlockAMD64NE, v0)
32144 return true
32145 }
32146 break
32147 }
32148 case BlockAMD64UGE:
32149
32150
32151 for b.Controls[0].Op == OpAMD64TESTQ {
32152 v_0 := b.Controls[0]
32153 x := v_0.Args[1]
32154 if x != v_0.Args[0] {
32155 break
32156 }
32157 b.Reset(BlockFirst)
32158 return true
32159 }
32160
32161
32162 for b.Controls[0].Op == OpAMD64TESTL {
32163 v_0 := b.Controls[0]
32164 x := v_0.Args[1]
32165 if x != v_0.Args[0] {
32166 break
32167 }
32168 b.Reset(BlockFirst)
32169 return true
32170 }
32171
32172
32173 for b.Controls[0].Op == OpAMD64TESTW {
32174 v_0 := b.Controls[0]
32175 x := v_0.Args[1]
32176 if x != v_0.Args[0] {
32177 break
32178 }
32179 b.Reset(BlockFirst)
32180 return true
32181 }
32182
32183
32184 for b.Controls[0].Op == OpAMD64TESTB {
32185 v_0 := b.Controls[0]
32186 x := v_0.Args[1]
32187 if x != v_0.Args[0] {
32188 break
32189 }
32190 b.Reset(BlockFirst)
32191 return true
32192 }
32193
32194
32195 for b.Controls[0].Op == OpAMD64InvertFlags {
32196 v_0 := b.Controls[0]
32197 cmp := v_0.Args[0]
32198 b.resetWithControl(BlockAMD64ULE, cmp)
32199 return true
32200 }
32201
32202
32203 for b.Controls[0].Op == OpAMD64FlagEQ {
32204 b.Reset(BlockFirst)
32205 return true
32206 }
32207
32208
32209 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
32210 b.Reset(BlockFirst)
32211 b.swapSuccessors()
32212 return true
32213 }
32214
32215
32216 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
32217 b.Reset(BlockFirst)
32218 return true
32219 }
32220
32221
32222 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
32223 b.Reset(BlockFirst)
32224 b.swapSuccessors()
32225 return true
32226 }
32227
32228
32229 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
32230 b.Reset(BlockFirst)
32231 return true
32232 }
32233 case BlockAMD64UGT:
32234
32235
32236 for b.Controls[0].Op == OpAMD64InvertFlags {
32237 v_0 := b.Controls[0]
32238 cmp := v_0.Args[0]
32239 b.resetWithControl(BlockAMD64ULT, cmp)
32240 return true
32241 }
32242
32243
32244 for b.Controls[0].Op == OpAMD64FlagEQ {
32245 b.Reset(BlockFirst)
32246 b.swapSuccessors()
32247 return true
32248 }
32249
32250
32251 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
32252 b.Reset(BlockFirst)
32253 b.swapSuccessors()
32254 return true
32255 }
32256
32257
32258 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
32259 b.Reset(BlockFirst)
32260 return true
32261 }
32262
32263
32264 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
32265 b.Reset(BlockFirst)
32266 b.swapSuccessors()
32267 return true
32268 }
32269
32270
32271 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
32272 b.Reset(BlockFirst)
32273 return true
32274 }
32275 case BlockAMD64ULE:
32276
32277
32278 for b.Controls[0].Op == OpAMD64InvertFlags {
32279 v_0 := b.Controls[0]
32280 cmp := v_0.Args[0]
32281 b.resetWithControl(BlockAMD64UGE, cmp)
32282 return true
32283 }
32284
32285
32286 for b.Controls[0].Op == OpAMD64FlagEQ {
32287 b.Reset(BlockFirst)
32288 return true
32289 }
32290
32291
32292 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
32293 b.Reset(BlockFirst)
32294 return true
32295 }
32296
32297
32298 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
32299 b.Reset(BlockFirst)
32300 b.swapSuccessors()
32301 return true
32302 }
32303
32304
32305 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
32306 b.Reset(BlockFirst)
32307 return true
32308 }
32309
32310
32311 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
32312 b.Reset(BlockFirst)
32313 b.swapSuccessors()
32314 return true
32315 }
32316 case BlockAMD64ULT:
32317
32318
32319 for b.Controls[0].Op == OpAMD64TESTQ {
32320 v_0 := b.Controls[0]
32321 x := v_0.Args[1]
32322 if x != v_0.Args[0] {
32323 break
32324 }
32325 b.Reset(BlockFirst)
32326 b.swapSuccessors()
32327 return true
32328 }
32329
32330
32331 for b.Controls[0].Op == OpAMD64TESTL {
32332 v_0 := b.Controls[0]
32333 x := v_0.Args[1]
32334 if x != v_0.Args[0] {
32335 break
32336 }
32337 b.Reset(BlockFirst)
32338 b.swapSuccessors()
32339 return true
32340 }
32341
32342
32343 for b.Controls[0].Op == OpAMD64TESTW {
32344 v_0 := b.Controls[0]
32345 x := v_0.Args[1]
32346 if x != v_0.Args[0] {
32347 break
32348 }
32349 b.Reset(BlockFirst)
32350 b.swapSuccessors()
32351 return true
32352 }
32353
32354
32355 for b.Controls[0].Op == OpAMD64TESTB {
32356 v_0 := b.Controls[0]
32357 x := v_0.Args[1]
32358 if x != v_0.Args[0] {
32359 break
32360 }
32361 b.Reset(BlockFirst)
32362 b.swapSuccessors()
32363 return true
32364 }
32365
32366
32367 for b.Controls[0].Op == OpAMD64InvertFlags {
32368 v_0 := b.Controls[0]
32369 cmp := v_0.Args[0]
32370 b.resetWithControl(BlockAMD64UGT, cmp)
32371 return true
32372 }
32373
32374
32375 for b.Controls[0].Op == OpAMD64FlagEQ {
32376 b.Reset(BlockFirst)
32377 b.swapSuccessors()
32378 return true
32379 }
32380
32381
32382 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
32383 b.Reset(BlockFirst)
32384 return true
32385 }
32386
32387
32388 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
32389 b.Reset(BlockFirst)
32390 b.swapSuccessors()
32391 return true
32392 }
32393
32394
32395 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
32396 b.Reset(BlockFirst)
32397 return true
32398 }
32399
32400
32401 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
32402 b.Reset(BlockFirst)
32403 b.swapSuccessors()
32404 return true
32405 }
32406 }
32407 return false
32408 }
32409
View as plain text