1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
3
4;
5; testz(~X,Y) -> testc(X,Y)
6;
7
8define i32 @testpdz_128_invert0(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b) {
9; CHECK-LABEL: testpdz_128_invert0:
10; CHECK:       # %bb.0:
11; CHECK-NEXT:    movl %edi, %eax
12; CHECK-NEXT:    vtestpd %xmm1, %xmm0
13; CHECK-NEXT:    cmovael %esi, %eax
14; CHECK-NEXT:    retq
15  %t0 = bitcast <2 x double> %c to <2 x i64>
16  %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
17  %t2 = bitcast <2 x i64> %t1 to <2 x double>
18  %t3 = call i32 @llvm.x86.avx.vtestz.pd(<2 x double> %t2, <2 x double> %d)
19  %t4 = icmp ne i32 %t3, 0
20  %t5 = select i1 %t4, i32 %a, i32 %b
21  ret i32 %t5
22}
23
24define i32 @testpdz_256_invert0(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b) {
25; CHECK-LABEL: testpdz_256_invert0:
26; CHECK:       # %bb.0:
27; CHECK-NEXT:    movl %edi, %eax
28; CHECK-NEXT:    vtestpd %ymm1, %ymm0
29; CHECK-NEXT:    cmovael %esi, %eax
30; CHECK-NEXT:    vzeroupper
31; CHECK-NEXT:    retq
32  %t0 = bitcast <4 x double> %c to <4 x i64>
33  %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
34  %t2 = bitcast <4 x i64> %t1 to <4 x double>
35  %t3 = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %t2, <4 x double> %d)
36  %t4 = icmp ne i32 %t3, 0
37  %t5 = select i1 %t4, i32 %a, i32 %b
38  ret i32 %t5
39}
40
41;
42; testz(X,~Y) -> testc(Y,X)
43;
44
45define i32 @testpdz_128_invert1(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b) {
46; CHECK-LABEL: testpdz_128_invert1:
47; CHECK:       # %bb.0:
48; CHECK-NEXT:    movl %edi, %eax
49; CHECK-NEXT:    vtestpd %xmm0, %xmm1
50; CHECK-NEXT:    cmovael %esi, %eax
51; CHECK-NEXT:    retq
52  %t0 = bitcast <2 x double> %d to <2 x i64>
53  %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
54  %t2 = bitcast <2 x i64> %t1 to <2 x double>
55  %t3 = call i32 @llvm.x86.avx.vtestz.pd(<2 x double> %c, <2 x double> %t2)
56  %t4 = icmp ne i32 %t3, 0
57  %t5 = select i1 %t4, i32 %a, i32 %b
58  ret i32 %t5
59}
60
61define i32 @testpdz_256_invert1(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b) {
62; CHECK-LABEL: testpdz_256_invert1:
63; CHECK:       # %bb.0:
64; CHECK-NEXT:    movl %edi, %eax
65; CHECK-NEXT:    vtestpd %ymm0, %ymm1
66; CHECK-NEXT:    cmovael %esi, %eax
67; CHECK-NEXT:    vzeroupper
68; CHECK-NEXT:    retq
69  %t0 = bitcast <4 x double> %d to <4 x i64>
70  %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
71  %t2 = bitcast <4 x i64> %t1 to <4 x double>
72  %t3 = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %c, <4 x double> %t2)
73  %t4 = icmp ne i32 %t3, 0
74  %t5 = select i1 %t4, i32 %a, i32 %b
75  ret i32 %t5
76}
77
78;
79; testc(~X,Y) -> testz(X,Y)
80;
81
82define i32 @testpdc_128_invert0(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b) {
83; CHECK-LABEL: testpdc_128_invert0:
84; CHECK:       # %bb.0:
85; CHECK-NEXT:    movl %edi, %eax
86; CHECK-NEXT:    vtestpd %xmm1, %xmm0
87; CHECK-NEXT:    cmovnel %esi, %eax
88; CHECK-NEXT:    retq
89  %t0 = bitcast <2 x double> %c to <2 x i64>
90  %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
91  %t2 = bitcast <2 x i64> %t1 to <2 x double>
92  %t3 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %t2, <2 x double> %d)
93  %t4 = icmp ne i32 %t3, 0
94  %t5 = select i1 %t4, i32 %a, i32 %b
95  ret i32 %t5
96}
97
98define i32 @testpdc_256_invert0(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b) {
99; CHECK-LABEL: testpdc_256_invert0:
100; CHECK:       # %bb.0:
101; CHECK-NEXT:    movl %edi, %eax
102; CHECK-NEXT:    vtestpd %ymm1, %ymm0
103; CHECK-NEXT:    cmovnel %esi, %eax
104; CHECK-NEXT:    vzeroupper
105; CHECK-NEXT:    retq
106  %t0 = bitcast <4 x double> %c to <4 x i64>
107  %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
108  %t2 = bitcast <4 x i64> %t1 to <4 x double>
109  %t3 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %t2, <4 x double> %d)
110  %t4 = icmp ne i32 %t3, 0
111  %t5 = select i1 %t4, i32 %a, i32 %b
112  ret i32 %t5
113}
114
115;
116; testnzc(~X,Y) -> testnzc(X,Y)
117;
118
119define i32 @testpdnzc_128_invert0(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b) {
120; CHECK-LABEL: testpdnzc_128_invert0:
121; CHECK:       # %bb.0:
122; CHECK-NEXT:    movl %edi, %eax
123; CHECK-NEXT:    vtestpd %xmm1, %xmm0
124; CHECK-NEXT:    cmovbel %esi, %eax
125; CHECK-NEXT:    retq
126  %t0 = bitcast <2 x double> %c to <2 x i64>
127  %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
128  %t2 = bitcast <2 x i64> %t1 to <2 x double>
129  %t3 = call i32 @llvm.x86.avx.vtestnzc.pd(<2 x double> %t2, <2 x double> %d)
130  %t4 = icmp ne i32 %t3, 0
131  %t5 = select i1 %t4, i32 %a, i32 %b
132  ret i32 %t5
133}
134
135define i32 @testpdnzc_256_invert0(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b) {
136; CHECK-LABEL: testpdnzc_256_invert0:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    movl %edi, %eax
139; CHECK-NEXT:    vtestpd %ymm1, %ymm0
140; CHECK-NEXT:    cmovbel %esi, %eax
141; CHECK-NEXT:    vzeroupper
142; CHECK-NEXT:    retq
143  %t0 = bitcast <4 x double> %c to <4 x i64>
144  %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
145  %t2 = bitcast <4 x i64> %t1 to <4 x double>
146  %t3 = call i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double> %t2, <4 x double> %d)
147  %t4 = icmp ne i32 %t3, 0
148  %t5 = select i1 %t4, i32 %a, i32 %b
149  ret i32 %t5
150}
151
152declare i32 @llvm.x86.avx.vtestz.pd(<2 x double>, <2 x double>) nounwind readnone
153declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnone
154declare i32 @llvm.x86.avx.vtestnzc.pd(<2 x double>, <2 x double>) nounwind readnone
155
156declare i32 @llvm.x86.avx.vtestz.pd.256(<4 x double>, <4 x double>) nounwind readnone
157declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind readnone
158declare i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double>, <4 x double>) nounwind readnone
159