Lines Matching +full:0 +full:x64
2 ; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+sse | FileCheck %s --check-prefix=X64-SSE
3 ; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+sse | FileCheck %s --check-prefix=X64-SSE
5 ; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
6 ; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
7 ; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+avx512f | FileCheck %s --check-prefix=X64…
8 ; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefix=X64-AVX
12 @vi16 = common global i16 0, align 2
13 @vi32 = common global i32 0, align 4
14 @vi64 = common global i64 0, align 8
15 @vi128 = common global i128 0, align 16
16 @vu32 = common global i32 0, align 4
17 @vu64 = common global i64 0, align 8
18 @vu128 = common global i128 0, align 16
21 @vf80 = common global x86_fp80 0xK00000000000000000000, align 8
22 @vf128 = common global fp128 0xL00000000000000000000000000000000, align 16
25 ; X64-SSE-LABEL: TestFPExtF32_F128:
26 ; X64-SSE: # %bb.0: # %entry
27 ; X64-SSE-NEXT: pushq %rax
28 ; X64-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
29 ; X64-SSE-NEXT: callq __extendsftf2
30 ; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
31 ; X64-SSE-NEXT: popq %rax
32 ; X64-SSE-NEXT: retq
35 ; X32: # %bb.0: # %entry
39 ; X32-NEXT: fstps {{[0-9]+}}(%esp)
40 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
44 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
45 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
46 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
47 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
56 ; X64-AVX-LABEL: TestFPExtF32_F128:
57 ; X64-AVX: # %bb.0: # %entry
58 ; X64-AVX-NEXT: pushq %rax
59 ; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
60 ; X64-AVX-NEXT: callq __extendsftf2
61 ; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
62 ; X64-AVX-NEXT: popq %rax
63 ; X64-AVX-NEXT: retq
65 %0 = load float, float* @vf32, align 4
66 %conv = fpext float %0 to fp128
72 ; X64-SSE-LABEL: TestFPExtF64_F128:
73 ; X64-SSE: # %bb.0: # %entry
74 ; X64-SSE-NEXT: pushq %rax
75 ; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
76 ; X64-SSE-NEXT: callq __extenddftf2
77 ; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
78 ; X64-SSE-NEXT: popq %rax
79 ; X64-SSE-NEXT: retq
82 ; X32: # %bb.0: # %entry
86 ; X32-NEXT: fstpl {{[0-9]+}}(%esp)
87 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
91 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
92 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
93 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
94 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
103 ; X64-AVX-LABEL: TestFPExtF64_F128:
104 ; X64-AVX: # %bb.0: # %entry
105 ; X64-AVX-NEXT: pushq %rax
106 ; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
107 ; X64-AVX-NEXT: callq __extenddftf2
108 ; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
109 ; X64-AVX-NEXT: popq %rax
110 ; X64-AVX-NEXT: retq
112 %0 = load double, double* @vf64, align 8
113 %conv = fpext double %0 to fp128
119 ; X64-SSE-LABEL: TestFPExtF80_F128:
120 ; X64-SSE: # %bb.0: # %entry
121 ; X64-SSE-NEXT: subq $24, %rsp
122 ; X64-SSE-NEXT: fldt {{.*}}(%rip)
123 ; X64-SSE-NEXT: fstpt (%rsp)
124 ; X64-SSE-NEXT: callq __extendxftf2
125 ; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
126 ; X64-SSE-NEXT: addq $24, %rsp
127 ; X64-SSE-NEXT: retq
130 ; X32: # %bb.0: # %entry
134 ; X32-NEXT: fstpt {{[0-9]+}}(%esp)
135 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
139 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
140 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
141 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
142 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
151 ; X64-AVX-LABEL: TestFPExtF80_F128:
152 ; X64-AVX: # %bb.0: # %entry
153 ; X64-AVX-NEXT: subq $24, %rsp
154 ; X64-AVX-NEXT: fldt {{.*}}(%rip)
155 ; X64-AVX-NEXT: fstpt (%rsp)
156 ; X64-AVX-NEXT: callq __extendxftf2
157 ; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
158 ; X64-AVX-NEXT: addq $24, %rsp
159 ; X64-AVX-NEXT: retq
161 %0 = load x86_fp80, x86_fp80* @vf80, align 8
162 %conv = fpext x86_fp80 %0 to fp128
168 ; X64-SSE-LABEL: TestFPToSIF128_I16:
169 ; X64-SSE: # %bb.0: # %entry
170 ; X64-SSE-NEXT: pushq %rax
171 ; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
172 ; X64-SSE-NEXT: callq __fixtfsi
173 ; X64-SSE-NEXT: movw %ax, {{.*}}(%rip)
174 ; X64-SSE-NEXT: popq %rax
175 ; X64-SSE-NEXT: retq
178 ; X32: # %bb.0: # %entry
190 ; X64-AVX-LABEL: TestFPToSIF128_I16:
191 ; X64-AVX: # %bb.0: # %entry
192 ; X64-AVX-NEXT: pushq %rax
193 ; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
194 ; X64-AVX-NEXT: callq __fixtfsi
195 ; X64-AVX-NEXT: movw %ax, {{.*}}(%rip)
196 ; X64-AVX-NEXT: popq %rax
197 ; X64-AVX-NEXT: retq
199 %0 = load fp128, fp128* @vf128, align 16
200 %conv = fptosi fp128 %0 to i16
206 ; X64-SSE-LABEL: TestFPToUIF128_I16:
207 ; X64-SSE: # %bb.0: # %entry
208 ; X64-SSE-NEXT: pushq %rax
209 ; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
210 ; X64-SSE-NEXT: callq __fixtfsi
211 ; X64-SSE-NEXT: movw %ax, {{.*}}(%rip)
212 ; X64-SSE-NEXT: popq %rax
213 ; X64-SSE-NEXT: retq
216 ; X32: # %bb.0: # %entry
228 ; X64-AVX-LABEL: TestFPToUIF128_I16:
229 ; X64-AVX: # %bb.0: # %entry
230 ; X64-AVX-NEXT: pushq %rax
231 ; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
232 ; X64-AVX-NEXT: callq __fixtfsi
233 ; X64-AVX-NEXT: movw %ax, {{.*}}(%rip)
234 ; X64-AVX-NEXT: popq %rax
235 ; X64-AVX-NEXT: retq
237 %0 = load fp128, fp128* @vf128, align 16
238 %conv = fptoui fp128 %0 to i16
244 ; X64-SSE-LABEL: TestFPToSIF128_I32:
245 ; X64-SSE: # %bb.0: # %entry
246 ; X64-SSE-NEXT: pushq %rax
247 ; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
248 ; X64-SSE-NEXT: callq __fixtfsi
249 ; X64-SSE-NEXT: movl %eax, {{.*}}(%rip)
250 ; X64-SSE-NEXT: popq %rax
251 ; X64-SSE-NEXT: retq
254 ; X32: # %bb.0: # %entry
266 ; X64-AVX-LABEL: TestFPToSIF128_I32:
267 ; X64-AVX: # %bb.0: # %entry
268 ; X64-AVX-NEXT: pushq %rax
269 ; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
270 ; X64-AVX-NEXT: callq __fixtfsi
271 ; X64-AVX-NEXT: movl %eax, {{.*}}(%rip)
272 ; X64-AVX-NEXT: popq %rax
273 ; X64-AVX-NEXT: retq
275 %0 = load fp128, fp128* @vf128, align 16
276 %conv = fptosi fp128 %0 to i32
282 ; X64-SSE-LABEL: TestFPToUIF128_U32:
283 ; X64-SSE: # %bb.0: # %entry
284 ; X64-SSE-NEXT: pushq %rax
285 ; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
286 ; X64-SSE-NEXT: callq __fixunstfsi
287 ; X64-SSE-NEXT: movl %eax, {{.*}}(%rip)
288 ; X64-SSE-NEXT: popq %rax
289 ; X64-SSE-NEXT: retq
292 ; X32: # %bb.0: # %entry
304 ; X64-AVX-LABEL: TestFPToUIF128_U32:
305 ; X64-AVX: # %bb.0: # %entry
306 ; X64-AVX-NEXT: pushq %rax
307 ; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
308 ; X64-AVX-NEXT: callq __fixunstfsi
309 ; X64-AVX-NEXT: movl %eax, {{.*}}(%rip)
310 ; X64-AVX-NEXT: popq %rax
311 ; X64-AVX-NEXT: retq
313 %0 = load fp128, fp128* @vf128, align 16
314 %conv = fptoui fp128 %0 to i32
320 ; X64-SSE-LABEL: TestFPToSIF128_I64:
321 ; X64-SSE: # %bb.0: # %entry
322 ; X64-SSE-NEXT: pushq %rax
323 ; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
324 ; X64-SSE-NEXT: callq __fixtfsi
325 ; X64-SSE-NEXT: cltq
326 ; X64-SSE-NEXT: movq %rax, {{.*}}(%rip)
327 ; X64-SSE-NEXT: popq %rax
328 ; X64-SSE-NEXT: retq
331 ; X32: # %bb.0: # %entry
345 ; X64-AVX-LABEL: TestFPToSIF128_I64:
346 ; X64-AVX: # %bb.0: # %entry
347 ; X64-AVX-NEXT: pushq %rax
348 ; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
349 ; X64-AVX-NEXT: callq __fixtfsi
350 ; X64-AVX-NEXT: cltq
351 ; X64-AVX-NEXT: movq %rax, {{.*}}(%rip)
352 ; X64-AVX-NEXT: popq %rax
353 ; X64-AVX-NEXT: retq
355 %0 = load fp128, fp128* @vf128, align 16
356 %conv = fptosi fp128 %0 to i32
363 ; X64-SSE-LABEL: TestFPToUIF128_U64:
364 ; X64-SSE: # %bb.0: # %entry
365 ; X64-SSE-NEXT: pushq %rax
366 ; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
367 ; X64-SSE-NEXT: callq __fixunstfsi
368 ; X64-SSE-NEXT: movl %eax, %eax
369 ; X64-SSE-NEXT: movq %rax, {{.*}}(%rip)
370 ; X64-SSE-NEXT: popq %rax
371 ; X64-SSE-NEXT: retq
374 ; X32: # %bb.0: # %entry
383 ; X32-NEXT: movl $0, vu64+4
387 ; X64-AVX-LABEL: TestFPToUIF128_U64:
388 ; X64-AVX: # %bb.0: # %entry
389 ; X64-AVX-NEXT: pushq %rax
390 ; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
391 ; X64-AVX-NEXT: callq __fixunstfsi
392 ; X64-AVX-NEXT: movl %eax, %eax
393 ; X64-AVX-NEXT: movq %rax, {{.*}}(%rip)
394 ; X64-AVX-NEXT: popq %rax
395 ; X64-AVX-NEXT: retq
397 %0 = load fp128, fp128* @vf128, align 16
398 %conv = fptoui fp128 %0 to i32
405 ; X64-SSE-LABEL: TestFPToSIF128_I128:
406 ; X64-SSE: # %bb.0: # %entry
407 ; X64-SSE-NEXT: pushq %rax
408 ; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
409 ; X64-SSE-NEXT: callq __fixtfti
410 ; X64-SSE-NEXT: movq %rdx, vi128+{{.*}}(%rip)
411 ; X64-SSE-NEXT: movq %rax, {{.*}}(%rip)
412 ; X64-SSE-NEXT: popq %rax
413 ; X64-SSE-NEXT: retq
416 ; X32: # %bb.0: # %entry
419 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
427 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
428 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
429 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
430 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
439 ; X64-AVX-LABEL: TestFPToSIF128_I128:
440 ; X64-AVX: # %bb.0: # %entry
441 ; X64-AVX-NEXT: pushq %rax
442 ; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
443 ; X64-AVX-NEXT: callq __fixtfti
444 ; X64-AVX-NEXT: movq %rdx, vi128+{{.*}}(%rip)
445 ; X64-AVX-NEXT: movq %rax, {{.*}}(%rip)
446 ; X64-AVX-NEXT: popq %rax
447 ; X64-AVX-NEXT: retq
449 %0 = load fp128, fp128* @vf128, align 16
450 %conv = fptosi fp128 %0 to i128
456 ; X64-SSE-LABEL: TestFPToUIF128_U128:
457 ; X64-SSE: # %bb.0: # %entry
458 ; X64-SSE-NEXT: pushq %rax
459 ; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
460 ; X64-SSE-NEXT: callq __fixunstfti
461 ; X64-SSE-NEXT: movq %rdx, vu128+{{.*}}(%rip)
462 ; X64-SSE-NEXT: movq %rax, {{.*}}(%rip)
463 ; X64-SSE-NEXT: popq %rax
464 ; X64-SSE-NEXT: retq
467 ; X32: # %bb.0: # %entry
470 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
478 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
479 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
480 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
481 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
490 ; X64-AVX-LABEL: TestFPToUIF128_U128:
491 ; X64-AVX: # %bb.0: # %entry
492 ; X64-AVX-NEXT: pushq %rax
493 ; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
494 ; X64-AVX-NEXT: callq __fixunstfti
495 ; X64-AVX-NEXT: movq %rdx, vu128+{{.*}}(%rip)
496 ; X64-AVX-NEXT: movq %rax, {{.*}}(%rip)
497 ; X64-AVX-NEXT: popq %rax
498 ; X64-AVX-NEXT: retq
500 %0 = load fp128, fp128* @vf128, align 16
501 %conv = fptoui fp128 %0 to i128
507 ; X64-SSE-LABEL: TestFPTruncF128_F32:
508 ; X64-SSE: # %bb.0: # %entry
509 ; X64-SSE-NEXT: pushq %rax
510 ; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
511 ; X64-SSE-NEXT: callq __trunctfsf2
512 ; X64-SSE-NEXT: movss %xmm0, {{.*}}(%rip)
513 ; X64-SSE-NEXT: popq %rax
514 ; X64-SSE-NEXT: retq
517 ; X32: # %bb.0: # %entry
529 ; X64-AVX-LABEL: TestFPTruncF128_F32:
530 ; X64-AVX: # %bb.0: # %entry
531 ; X64-AVX-NEXT: pushq %rax
532 ; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
533 ; X64-AVX-NEXT: callq __trunctfsf2
534 ; X64-AVX-NEXT: vmovss %xmm0, {{.*}}(%rip)
535 ; X64-AVX-NEXT: popq %rax
536 ; X64-AVX-NEXT: retq
538 %0 = load fp128, fp128* @vf128, align 16
539 %conv = fptrunc fp128 %0 to float
545 ; X64-SSE-LABEL: TestFPTruncF128_F64:
546 ; X64-SSE: # %bb.0: # %entry
547 ; X64-SSE-NEXT: pushq %rax
548 ; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
549 ; X64-SSE-NEXT: callq __trunctfdf2
550 ; X64-SSE-NEXT: movsd %xmm0, {{.*}}(%rip)
551 ; X64-SSE-NEXT: popq %rax
552 ; X64-SSE-NEXT: retq
555 ; X32: # %bb.0: # %entry
567 ; X64-AVX-LABEL: TestFPTruncF128_F64:
568 ; X64-AVX: # %bb.0: # %entry
569 ; X64-AVX-NEXT: pushq %rax
570 ; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
571 ; X64-AVX-NEXT: callq __trunctfdf2
572 ; X64-AVX-NEXT: vmovsd %xmm0, {{.*}}(%rip)
573 ; X64-AVX-NEXT: popq %rax
574 ; X64-AVX-NEXT: retq
576 %0 = load fp128, fp128* @vf128, align 16
577 %conv = fptrunc fp128 %0 to double
583 ; X64-SSE-LABEL: TestFPTruncF128_F80:
584 ; X64-SSE: # %bb.0: # %entry
585 ; X64-SSE-NEXT: pushq %rax
586 ; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
587 ; X64-SSE-NEXT: callq __trunctfxf2
588 ; X64-SSE-NEXT: fstpt {{.*}}(%rip)
589 ; X64-SSE-NEXT: popq %rax
590 ; X64-SSE-NEXT: retq
593 ; X32: # %bb.0: # %entry
605 ; X64-AVX-LABEL: TestFPTruncF128_F80:
606 ; X64-AVX: # %bb.0: # %entry
607 ; X64-AVX-NEXT: pushq %rax
608 ; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
609 ; X64-AVX-NEXT: callq __trunctfxf2
610 ; X64-AVX-NEXT: fstpt {{.*}}(%rip)
611 ; X64-AVX-NEXT: popq %rax
612 ; X64-AVX-NEXT: retq
614 %0 = load fp128, fp128* @vf128, align 16
615 %conv = fptrunc fp128 %0 to x86_fp80
621 ; X64-SSE-LABEL: TestSIToFPI16_F128:
622 ; X64-SSE: # %bb.0: # %entry
623 ; X64-SSE-NEXT: pushq %rax
624 ; X64-SSE-NEXT: movswl {{.*}}(%rip), %edi
625 ; X64-SSE-NEXT: callq __floatsitf
626 ; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
627 ; X64-SSE-NEXT: popq %rax
628 ; X64-SSE-NEXT: retq
631 ; X32: # %bb.0: # %entry
636 ; X32-NEXT: leal {{[0-9]+}}(%esp), %ecx
641 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
642 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
643 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
644 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
653 ; X64-AVX-LABEL: TestSIToFPI16_F128:
654 ; X64-AVX: # %bb.0: # %entry
655 ; X64-AVX-NEXT: pushq %rax
656 ; X64-AVX-NEXT: movswl {{.*}}(%rip), %edi
657 ; X64-AVX-NEXT: callq __floatsitf
658 ; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
659 ; X64-AVX-NEXT: popq %rax
660 ; X64-AVX-NEXT: retq
662 %0 = load i16, i16* @vi16, align 4
663 %conv = sitofp i16 %0 to fp128
669 ; X64-SSE-LABEL: TestSIToFPU16_F128:
670 ; X64-SSE: # %bb.0: # %entry
671 ; X64-SSE-NEXT: pushq %rax
672 ; X64-SSE-NEXT: movzwl {{.*}}(%rip), %edi
673 ; X64-SSE-NEXT: callq __floatsitf
674 ; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
675 ; X64-SSE-NEXT: popq %rax
676 ; X64-SSE-NEXT: retq
679 ; X32: # %bb.0: # %entry
684 ; X32-NEXT: leal {{[0-9]+}}(%esp), %ecx
689 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
690 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
691 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
692 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
701 ; X64-AVX-LABEL: TestSIToFPU16_F128:
702 ; X64-AVX: # %bb.0: # %entry
703 ; X64-AVX-NEXT: pushq %rax
704 ; X64-AVX-NEXT: movzwl {{.*}}(%rip), %edi
705 ; X64-AVX-NEXT: callq __floatsitf
706 ; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
707 ; X64-AVX-NEXT: popq %rax
708 ; X64-AVX-NEXT: retq
710 %0 = load i16, i16* @vi16, align 4
711 %conv = uitofp i16 %0 to fp128
717 ; X64-SSE-LABEL: TestSIToFPI32_F128:
718 ; X64-SSE: # %bb.0: # %entry
719 ; X64-SSE-NEXT: pushq %rax
720 ; X64-SSE-NEXT: movl {{.*}}(%rip), %edi
721 ; X64-SSE-NEXT: callq __floatsitf
722 ; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
723 ; X64-SSE-NEXT: popq %rax
724 ; X64-SSE-NEXT: retq
727 ; X32: # %bb.0: # %entry
730 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
735 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
736 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
737 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
738 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
747 ; X64-AVX-LABEL: TestSIToFPI32_F128:
748 ; X64-AVX: # %bb.0: # %entry
749 ; X64-AVX-NEXT: pushq %rax
750 ; X64-AVX-NEXT: movl {{.*}}(%rip), %edi
751 ; X64-AVX-NEXT: callq __floatsitf
752 ; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
753 ; X64-AVX-NEXT: popq %rax
754 ; X64-AVX-NEXT: retq
756 %0 = load i32, i32* @vi32, align 4
757 %conv = sitofp i32 %0 to fp128
763 ; X64-SSE-LABEL: TestUIToFPU32_F128:
764 ; X64-SSE: # %bb.0: # %entry
765 ; X64-SSE-NEXT: pushq %rax
766 ; X64-SSE-NEXT: movl {{.*}}(%rip), %edi
767 ; X64-SSE-NEXT: callq __floatunsitf
768 ; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
769 ; X64-SSE-NEXT: popq %rax
770 ; X64-SSE-NEXT: retq
773 ; X32: # %bb.0: # %entry
776 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
781 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
782 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
783 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
784 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
793 ; X64-AVX-LABEL: TestUIToFPU32_F128:
794 ; X64-AVX: # %bb.0: # %entry
795 ; X64-AVX-NEXT: pushq %rax
796 ; X64-AVX-NEXT: movl {{.*}}(%rip), %edi
797 ; X64-AVX-NEXT: callq __floatunsitf
798 ; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
799 ; X64-AVX-NEXT: popq %rax
800 ; X64-AVX-NEXT: retq
802 %0 = load i32, i32* @vu32, align 4
803 %conv = uitofp i32 %0 to fp128
809 ; X64-SSE-LABEL: TestSIToFPI64_F128:
810 ; X64-SSE: # %bb.0: # %entry
811 ; X64-SSE-NEXT: pushq %rax
812 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rdi
813 ; X64-SSE-NEXT: callq __floatditf
814 ; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
815 ; X64-SSE-NEXT: popq %rax
816 ; X64-SSE-NEXT: retq
819 ; X32: # %bb.0: # %entry
822 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
828 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
829 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
830 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
831 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
840 ; X64-AVX-LABEL: TestSIToFPI64_F128:
841 ; X64-AVX: # %bb.0: # %entry
842 ; X64-AVX-NEXT: pushq %rax
843 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rdi
844 ; X64-AVX-NEXT: callq __floatditf
845 ; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
846 ; X64-AVX-NEXT: popq %rax
847 ; X64-AVX-NEXT: retq
849 %0 = load i64, i64* @vi64, align 8
850 %conv = sitofp i64 %0 to fp128
856 ; X64-SSE-LABEL: TestUIToFPU64_F128:
857 ; X64-SSE: # %bb.0: # %entry
858 ; X64-SSE-NEXT: pushq %rax
859 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rdi
860 ; X64-SSE-NEXT: callq __floatunditf
861 ; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
862 ; X64-SSE-NEXT: popq %rax
863 ; X64-SSE-NEXT: retq
866 ; X32: # %bb.0: # %entry
869 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
875 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
876 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
877 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
878 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
887 ; X64-AVX-LABEL: TestUIToFPU64_F128:
888 ; X64-AVX: # %bb.0: # %entry
889 ; X64-AVX-NEXT: pushq %rax
890 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rdi
891 ; X64-AVX-NEXT: callq __floatunditf
892 ; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
893 ; X64-AVX-NEXT: popq %rax
894 ; X64-AVX-NEXT: retq
896 %0 = load i64, i64* @vu64, align 8
897 %conv = uitofp i64 %0 to fp128
903 ; X64-SSE-LABEL: TestSIToFPI128_F128:
904 ; X64-SSE: # %bb.0: # %entry
905 ; X64-SSE-NEXT: pushq %rax
906 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rdi
907 ; X64-SSE-NEXT: movq vi128+{{.*}}(%rip), %rsi
908 ; X64-SSE-NEXT: callq __floattitf
909 ; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
910 ; X64-SSE-NEXT: popq %rax
911 ; X64-SSE-NEXT: retq
914 ; X32: # %bb.0: # %entry
917 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
925 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
926 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
927 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
928 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
937 ; X64-AVX-LABEL: TestSIToFPI128_F128:
938 ; X64-AVX: # %bb.0: # %entry
939 ; X64-AVX-NEXT: pushq %rax
940 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rdi
941 ; X64-AVX-NEXT: movq vi128+{{.*}}(%rip), %rsi
942 ; X64-AVX-NEXT: callq __floattitf
943 ; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
944 ; X64-AVX-NEXT: popq %rax
945 ; X64-AVX-NEXT: retq
947 %0 = load i128, i128* @vi128, align 16
948 %conv = sitofp i128 %0 to fp128
954 ; X64-SSE-LABEL: TestUIToFPU128_F128:
955 ; X64-SSE: # %bb.0: # %entry
956 ; X64-SSE-NEXT: pushq %rax
957 ; X64-SSE-NEXT: movq {{.*}}(%rip), %rdi
958 ; X64-SSE-NEXT: movq vu128+{{.*}}(%rip), %rsi
959 ; X64-SSE-NEXT: callq __floatuntitf
960 ; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
961 ; X64-SSE-NEXT: popq %rax
962 ; X64-SSE-NEXT: retq
965 ; X32: # %bb.0: # %entry
968 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
976 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
977 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
978 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
979 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
988 ; X64-AVX-LABEL: TestUIToFPU128_F128:
989 ; X64-AVX: # %bb.0: # %entry
990 ; X64-AVX-NEXT: pushq %rax
991 ; X64-AVX-NEXT: movq {{.*}}(%rip), %rdi
992 ; X64-AVX-NEXT: movq vu128+{{.*}}(%rip), %rsi
993 ; X64-AVX-NEXT: callq __floatuntitf
994 ; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
995 ; X64-AVX-NEXT: popq %rax
996 ; X64-AVX-NEXT: retq
998 %0 = load i128, i128* @vu128, align 16
999 %conv = uitofp i128 %0 to fp128
1005 ; X64-SSE-LABEL: TestConst128:
1006 ; X64-SSE: # %bb.0: # %entry
1007 ; X64-SSE-NEXT: pushq %rax
1008 ; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm1
1009 ; X64-SSE-NEXT: callq __gttf2
1010 ; X64-SSE-NEXT: xorl %ecx, %ecx
1011 ; X64-SSE-NEXT: testl %eax, %eax
1012 ; X64-SSE-NEXT: setg %cl
1013 ; X64-SSE-NEXT: movl %ecx, %eax
1014 ; X64-SSE-NEXT: popq %rcx
1015 ; X64-SSE-NEXT: retq
1018 ; X32: # %bb.0: # %entry
1020 ; X32-NEXT: pushl $1073676288 # imm = 0x3FFF0000
1021 ; X32-NEXT: pushl $0
1022 ; X32-NEXT: pushl $0
1023 ; X32-NEXT: pushl $0
1024 ; X32-NEXT: pushl {{[0-9]+}}(%esp)
1025 ; X32-NEXT: pushl {{[0-9]+}}(%esp)
1026 ; X32-NEXT: pushl {{[0-9]+}}(%esp)
1027 ; X32-NEXT: pushl {{[0-9]+}}(%esp)
1037 ; X64-AVX-LABEL: TestConst128:
1038 ; X64-AVX: # %bb.0: # %entry
1039 ; X64-AVX-NEXT: pushq %rax
1040 ; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm1
1041 ; X64-AVX-NEXT: callq __gttf2
1042 ; X64-AVX-NEXT: xorl %ecx, %ecx
1043 ; X64-AVX-NEXT: testl %eax, %eax
1044 ; X64-AVX-NEXT: setg %cl
1045 ; X64-AVX-NEXT: movl %ecx, %eax
1046 ; X64-AVX-NEXT: popq %rcx
1047 ; X64-AVX-NEXT: retq
1049 %cmp = fcmp ogt fp128 %v, 0xL00000000000000003FFF000000000000
1056 ; X64-SSE-LABEL: TestConst128Zero:
1057 ; X64-SSE: # %bb.0: # %entry
1058 ; X64-SSE-NEXT: pushq %rax
1059 ; X64-SSE-NEXT: xorps %xmm1, %xmm1
1060 ; X64-SSE-NEXT: callq __gttf2
1061 ; X64-SSE-NEXT: xorl %ecx, %ecx
1062 ; X64-SSE-NEXT: testl %eax, %eax
1063 ; X64-SSE-NEXT: setg %cl
1064 ; X64-SSE-NEXT: movl %ecx, %eax
1065 ; X64-SSE-NEXT: popq %rcx
1066 ; X64-SSE-NEXT: retq
1069 ; X32: # %bb.0: # %entry
1071 ; X32-NEXT: pushl $0
1072 ; X32-NEXT: pushl $0
1073 ; X32-NEXT: pushl $0
1074 ; X32-NEXT: pushl $0
1075 ; X32-NEXT: pushl {{[0-9]+}}(%esp)
1076 ; X32-NEXT: pushl {{[0-9]+}}(%esp)
1077 ; X32-NEXT: pushl {{[0-9]+}}(%esp)
1078 ; X32-NEXT: pushl {{[0-9]+}}(%esp)
1088 ; X64-AVX-LABEL: TestConst128Zero:
1089 ; X64-AVX: # %bb.0: # %entry
1090 ; X64-AVX-NEXT: pushq %rax
1091 ; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
1092 ; X64-AVX-NEXT: callq __gttf2
1093 ; X64-AVX-NEXT: xorl %ecx, %ecx
1094 ; X64-AVX-NEXT: testl %eax, %eax
1095 ; X64-AVX-NEXT: setg %cl
1096 ; X64-AVX-NEXT: movl %ecx, %eax
1097 ; X64-AVX-NEXT: popq %rcx
1098 ; X64-AVX-NEXT: retq
1100 %cmp = fcmp ogt fp128 %v, 0xL00000000000000000000000000000000
1117 ; return ((u.bits.v1 | u.bits.v2) == 0);
1120 ; X64-SSE-LABEL: TestBits128:
1121 ; X64-SSE: # %bb.0: # %entry
1122 ; X64-SSE-NEXT: subq $24, %rsp
1123 ; X64-SSE-NEXT: movaps %xmm0, %xmm1
1124 ; X64-SSE-NEXT: callq __multf3
1125 ; X64-SSE-NEXT: movaps %xmm0, (%rsp)
1126 ; X64-SSE-NEXT: movq (%rsp), %rcx
1127 ; X64-SSE-NEXT: movq %rcx, %rdx
1128 ; X64-SSE-NEXT: shrq $32, %rdx
1129 ; X64-SSE-NEXT: xorl %eax, %eax
1130 ; X64-SSE-NEXT: orl %ecx, %edx
1131 ; X64-SSE-NEXT: sete %al
1132 ; X64-SSE-NEXT: addq $24, %rsp
1133 ; X64-SSE-NEXT: retq
1136 ; X32: # %bb.0: # %entry
1140 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
1141 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
1142 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
1143 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
1145 ; X32-NEXT: leal {{[0-9]+}}(%esp), %edi
1157 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
1166 ; X64-AVX-LABEL: TestBits128:
1167 ; X64-AVX: # %bb.0: # %entry
1168 ; X64-AVX-NEXT: subq $24, %rsp
1169 ; X64-AVX-NEXT: vmovaps %xmm0, %xmm1
1170 ; X64-AVX-NEXT: callq __multf3
1171 ; X64-AVX-NEXT: vmovaps %xmm0, (%rsp)
1172 ; X64-AVX-NEXT: movq (%rsp), %rcx
1173 ; X64-AVX-NEXT: movq %rcx, %rdx
1174 ; X64-AVX-NEXT: shrq $32, %rdx
1175 ; X64-AVX-NEXT: xorl %eax, %eax
1176 ; X64-AVX-NEXT: orl %ecx, %edx
1177 ; X64-AVX-NEXT: sete %al
1178 ; X64-AVX-NEXT: addq $24, %rsp
1179 ; X64-AVX-NEXT: retq
1182 %0 = bitcast fp128 %mul to i128
1183 %shift = lshr i128 %0, 32
1184 %or5 = or i128 %shift, %0
1186 %cmp = icmp eq i32 %or, 0
1206 ; X64-SSE-LABEL: TestPair128:
1207 ; X64-SSE: # %bb.0: # %entry
1208 ; X64-SSE-NEXT: addq $3, %rsi
1209 ; X64-SSE-NEXT: movq %rsi, -{{[0-9]+}}(%rsp)
1210 ; X64-SSE-NEXT: adcq $0, %rdi
1211 ; X64-SSE-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
1212 ; X64-SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
1213 ; X64-SSE-NEXT: retq
1216 ; X32: # %bb.0: # %entry
1219 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
1220 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
1221 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
1222 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
1223 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
1225 ; X32-NEXT: adcl $0, %edx
1226 ; X32-NEXT: adcl $0, %esi
1227 ; X32-NEXT: adcl $0, %edi
1236 ; X64-AVX-LABEL: TestPair128:
1237 ; X64-AVX: # %bb.0: # %entry
1238 ; X64-AVX-NEXT: addq $3, %rsi
1239 ; X64-AVX-NEXT: movq %rsi, -{{[0-9]+}}(%rsp)
1240 ; X64-AVX-NEXT: adcq $0, %rdi
1241 ; X64-AVX-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
1242 ; X64-AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
1243 ; X64-AVX-NEXT: retq
1250 %0 = bitcast i128 %add to fp128
1251 ret fp128 %0
1255 ; X64-SSE-LABEL: TestTruncCopysign:
1256 ; X64-SSE: # %bb.0: # %entry
1257 ; X64-SSE-NEXT: cmpl $50001, %edi # imm = 0xC351
1258 ; X64-SSE-NEXT: jl .LBB26_2
1259 ; X64-SSE-NEXT: # %bb.1: # %if.then
1260 ; X64-SSE-NEXT: pushq %rax
1261 ; X64-SSE-NEXT: callq __trunctfdf2
1262 ; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm0
1263 ; X64-SSE-NEXT: orps {{.*}}(%rip), %xmm0
1264 ; X64-SSE-NEXT: callq __extenddftf2
1265 ; X64-SSE-NEXT: addq $8, %rsp
1266 ; X64-SSE-NEXT: .LBB26_2: # %cleanup
1267 ; X64-SSE-NEXT: retq
1270 ; X32: # %bb.0: # %entry
1274 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
1275 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
1276 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
1277 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
1278 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
1279 ; X32-NEXT: cmpl $50001, {{[0-9]+}}(%esp) # imm = 0xC351
1288 ; X32-NEXT: fstpl {{[0-9]+}}(%esp)
1289 ; X32-NEXT: testb $-128, {{[0-9]+}}(%esp)
1297 ; X32-NEXT: fstp %st(0)
1299 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
1301 ; X32-NEXT: fstpl {{[0-9]+}}(%esp)
1304 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
1305 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
1306 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
1307 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
1319 ; X64-AVX-LABEL: TestTruncCopysign:
1320 ; X64-AVX: # %bb.0: # %entry
1321 ; X64-AVX-NEXT: cmpl $50001, %edi # imm = 0xC351
1322 ; X64-AVX-NEXT: jl .LBB26_2
1323 ; X64-AVX-NEXT: # %bb.1: # %if.then
1324 ; X64-AVX-NEXT: pushq %rax
1325 ; X64-AVX-NEXT: callq __trunctfdf2
1326 ; X64-AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
1327 ; X64-AVX-NEXT: vmovddup {{.*#+}} xmm1 = [+Inf,+Inf]
1328 ; X64-AVX-NEXT: # xmm1 = mem[0,0]
1329 ; X64-AVX-NEXT: vorps %xmm0, %xmm1, %xmm0
1330 ; X64-AVX-NEXT: callq __extenddftf2
1331 ; X64-AVX-NEXT: addq $8, %rsp
1332 ; X64-AVX-NEXT: .LBB26_2: # %cleanup
1333 ; X64-AVX-NEXT: retq
1340 %call = tail call double @copysign(double 0x7FF0000000000000, double %conv) #2
1345 %retval.0 = phi fp128 [ %conv1, %if.then ], [ %x, %entry ]
1346 ret fp128 %retval.0
1350 ; X64-SSE-LABEL: PR34866:
1351 ; X64-SSE: # %bb.0:
1352 ; X64-SSE-NEXT: xorps %xmm0, %xmm0
1353 ; X64-SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
1354 ; X64-SSE-NEXT: xorq -{{[0-9]+}}(%rsp), %rsi
1355 ; X64-SSE-NEXT: xorq -{{[0-9]+}}(%rsp), %rdi
1356 ; X64-SSE-NEXT: orq %rsi, %rdi
1357 ; X64-SSE-NEXT: sete %al
1358 ; X64-SSE-NEXT: retq
1361 ; X32: # %bb.0:
1362 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
1363 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
1364 ; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
1365 ; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
1370 ; X64-AVX-LABEL: PR34866:
1371 ; X64-AVX: # %bb.0:
1372 ; X64-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
1373 ; X64-AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
1374 ; X64-AVX-NEXT: xorq -{{[0-9]+}}(%rsp), %rsi
1375 ; X64-AVX-NEXT: xorq -{{[0-9]+}}(%rsp), %rdi
1376 ; X64-AVX-NEXT: orq %rsi, %rdi
1377 ; X64-AVX-NEXT: sete %al
1378 ; X64-AVX-NEXT: retq
1379 %bc_mmx = bitcast fp128 0xL00000000000000000000000000000000 to i128
1385 ; X64-SSE-LABEL: PR34866_commute:
1386 ; X64-SSE: # %bb.0:
1387 ; X64-SSE-NEXT: xorps %xmm0, %xmm0
1388 ; X64-SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
1389 ; X64-SSE-NEXT: xorq -{{[0-9]+}}(%rsp), %rsi
1390 ; X64-SSE-NEXT: xorq -{{[0-9]+}}(%rsp), %rdi
1391 ; X64-SSE-NEXT: orq %rsi, %rdi
1392 ; X64-SSE-NEXT: sete %al
1393 ; X64-SSE-NEXT: retq
1396 ; X32: # %bb.0:
1397 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
1398 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
1399 ; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
1400 ; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
1405 ; X64-AVX-LABEL: PR34866_commute:
1406 ; X64-AVX: # %bb.0:
1407 ; X64-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
1408 ; X64-AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
1409 ; X64-AVX-NEXT: xorq -{{[0-9]+}}(%rsp), %rsi
1410 ; X64-AVX-NEXT: xorq -{{[0-9]+}}(%rsp), %rdi
1411 ; X64-AVX-NEXT: orq %rsi, %rdi
1412 ; X64-AVX-NEXT: sete %al
1413 ; X64-AVX-NEXT: retq
1414 %bc_mmx = bitcast fp128 0xL00000000000000000000000000000000 to i128