1; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2,sse-unaligned-mem | FileCheck %s --check-prefix=SSE2
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx                    | FileCheck %s --check-prefix=AVX
3
4; Although we have the ability to fold an unaligned load with AVX
5; and under special conditions with some SSE implementations, we
6; can not fold the load under any circumstances in these test
7; cases because they are not 16-byte loads. The load must be
8; executed as a scalar ('movs*') with a zero extension to
9; 128-bits and then used in the packed logical ('andp*') op.
10; PR22371 - http://llvm.org/bugs/show_bug.cgi?id=22371
11
12define double @load_double_no_fold(double %x, double %y) {
13; SSE2-LABEL: load_double_no_fold:
14; SSE2:       BB#0:
15; SSE2-NEXT:    cmplesd %xmm0, %xmm1
16; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
17; SSE2-NEXT:    andpd %xmm1, %xmm0
18; SSE2-NEXT:    retq
19;
20; AVX-LABEL: load_double_no_fold:
21; AVX:       BB#0:
22; AVX-NEXT:    vcmplesd %xmm0, %xmm1, %xmm0
23; AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
24; AVX-NEXT:    vandpd %xmm1, %xmm0, %xmm0
25; AVX-NEXT:    retq
26
27  %cmp = fcmp oge double %x, %y
28  %zext = zext i1 %cmp to i32
29  %conv = sitofp i32 %zext to double
30  ret double %conv
31}
32
33define float @load_float_no_fold(float %x, float %y) {
34; SSE2-LABEL: load_float_no_fold:
35; SSE2:       BB#0:
36; SSE2-NEXT:    cmpless %xmm0, %xmm1
37; SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
38; SSE2-NEXT:    andps %xmm1, %xmm0
39; SSE2-NEXT:    retq
40;
41; AVX-LABEL: load_float_no_fold:
42; AVX:       BB#0:
43; AVX-NEXT:    vcmpless %xmm0, %xmm1, %xmm0
44; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
45; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
46; AVX-NEXT:    retq
47
48  %cmp = fcmp oge float %x, %y
49  %zext = zext i1 %cmp to i32
50  %conv = sitofp i32 %zext to float
51  ret float %conv
52}
53
54