1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg -sroa -early-cse | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg -sroa -early-cse | FileCheck %s
4
5 #include <arm_mve.h>
6
7 // CHECK-LABEL: @test_vctp16q(
8 // CHECK-NEXT: entry:
9 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[A:%.*]])
10 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
11 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
12 // CHECK-NEXT: ret i16 [[TMP2]]
13 //
test_vctp16q(uint32_t a)14 mve_pred16_t test_vctp16q(uint32_t a)
15 {
16 return vctp16q(a);
17 }
18
19 // CHECK-LABEL: @test_vctp16q_m(
20 // CHECK-NEXT: entry:
21 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
22 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
23 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[A:%.*]])
24 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
25 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
26 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
27 // CHECK-NEXT: ret i16 [[TMP5]]
28 //
test_vctp16q_m(uint32_t a,mve_pred16_t p)29 mve_pred16_t test_vctp16q_m(uint32_t a, mve_pred16_t p)
30 {
31 return vctp16q_m(a, p);
32 }
33
34 // CHECK-LABEL: @test_vctp32q(
35 // CHECK-NEXT: entry:
36 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[A:%.*]])
37 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
38 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
39 // CHECK-NEXT: ret i16 [[TMP2]]
40 //
test_vctp32q(uint32_t a)41 mve_pred16_t test_vctp32q(uint32_t a)
42 {
43 return vctp32q(a);
44 }
45
46 // CHECK-LABEL: @test_vctp32q_m(
47 // CHECK-NEXT: entry:
48 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
49 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
50 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[A:%.*]])
51 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
52 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
53 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
54 // CHECK-NEXT: ret i16 [[TMP5]]
55 //
test_vctp32q_m(uint32_t a,mve_pred16_t p)56 mve_pred16_t test_vctp32q_m(uint32_t a, mve_pred16_t p)
57 {
58 return vctp32q_m(a, p);
59 }
60
61 // CHECK-LABEL: @test_vctp64q(
62 // CHECK-NEXT: entry:
63 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i1> @llvm.arm.mve.vctp64(i32 [[A:%.*]])
64 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
65 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
66 // CHECK-NEXT: ret i16 [[TMP2]]
67 //
test_vctp64q(uint32_t a)68 mve_pred16_t test_vctp64q(uint32_t a)
69 {
70 return vctp64q(a);
71 }
72
73 // CHECK-LABEL: @test_vctp64q_m(
74 // CHECK-NEXT: entry:
75 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
76 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
77 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.vctp64(i32 [[A:%.*]])
78 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
79 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
80 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
81 // CHECK-NEXT: ret i16 [[TMP5]]
82 //
test_vctp64q_m(uint32_t a,mve_pred16_t p)83 mve_pred16_t test_vctp64q_m(uint32_t a, mve_pred16_t p)
84 {
85 return vctp64q_m(a, p);
86 }
87
88 // CHECK-LABEL: @test_vctp8q(
89 // CHECK-NEXT: entry:
90 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i1> @llvm.arm.mve.vctp8(i32 [[A:%.*]])
91 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
92 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
93 // CHECK-NEXT: ret i16 [[TMP2]]
94 //
test_vctp8q(uint32_t a)95 mve_pred16_t test_vctp8q(uint32_t a)
96 {
97 return vctp8q(a);
98 }
99
100 // CHECK-LABEL: @test_vctp8q_m(
101 // CHECK-NEXT: entry:
102 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
103 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
104 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.vctp8(i32 [[A:%.*]])
105 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
106 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
107 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
108 // CHECK-NEXT: ret i16 [[TMP5]]
109 //
test_vctp8q_m(uint32_t a,mve_pred16_t p)110 mve_pred16_t test_vctp8q_m(uint32_t a, mve_pred16_t p)
111 {
112 return vctp8q_m(a, p);
113 }
114
115 // CHECK-LABEL: @test_vpnot(
116 // CHECK-NEXT: entry:
117 // CHECK-NEXT: [[TMP0:%.*]] = xor i16 [[A:%.*]], -1
118 // CHECK-NEXT: ret i16 [[TMP0]]
119 //
test_vpnot(mve_pred16_t a)120 mve_pred16_t test_vpnot(mve_pred16_t a)
121 {
122 return vpnot(a);
123 }
124
125 // CHECK-LABEL: @test_vpselq_f16(
126 // CHECK-NEXT: entry:
127 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
128 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
129 // CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]]
130 // CHECK-NEXT: ret <8 x half> [[TMP2]]
131 //
test_vpselq_f16(float16x8_t a,float16x8_t b,mve_pred16_t p)132 float16x8_t test_vpselq_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
133 {
134 #ifdef POLYMORPHIC
135 return vpselq(a, b, p);
136 #else /* POLYMORPHIC */
137 return vpselq_f16(a, b, p);
138 #endif /* POLYMORPHIC */
139 }
140
141 // CHECK-LABEL: @test_vpselq_f32(
142 // CHECK-NEXT: entry:
143 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
144 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
145 // CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP1]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]]
146 // CHECK-NEXT: ret <4 x float> [[TMP2]]
147 //
test_vpselq_f32(float32x4_t a,float32x4_t b,mve_pred16_t p)148 float32x4_t test_vpselq_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
149 {
150 #ifdef POLYMORPHIC
151 return vpselq(a, b, p);
152 #else /* POLYMORPHIC */
153 return vpselq_f32(a, b, p);
154 #endif /* POLYMORPHIC */
155 }
156
157 // CHECK-LABEL: @test_vpselq_s16(
158 // CHECK-NEXT: entry:
159 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
160 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
161 // CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]
162 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
163 //
test_vpselq_s16(int16x8_t a,int16x8_t b,mve_pred16_t p)164 int16x8_t test_vpselq_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
165 {
166 #ifdef POLYMORPHIC
167 return vpselq(a, b, p);
168 #else /* POLYMORPHIC */
169 return vpselq_s16(a, b, p);
170 #endif /* POLYMORPHIC */
171 }
172
173 // CHECK-LABEL: @test_vpselq_s32(
174 // CHECK-NEXT: entry:
175 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
176 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
177 // CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]
178 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
179 //
test_vpselq_s32(int32x4_t a,int32x4_t b,mve_pred16_t p)180 int32x4_t test_vpselq_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
181 {
182 #ifdef POLYMORPHIC
183 return vpselq(a, b, p);
184 #else /* POLYMORPHIC */
185 return vpselq_s32(a, b, p);
186 #endif /* POLYMORPHIC */
187 }
188
189 // CHECK-LABEL: @test_vpselq_s64(
190 // CHECK-NEXT: entry:
191 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
192 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
193 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[A:%.*]] to <4 x i32>
194 // CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B:%.*]] to <4 x i32>
195 // CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP2]], <4 x i32> [[TMP3]]
196 // CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <2 x i64>
197 // CHECK-NEXT: ret <2 x i64> [[TMP5]]
198 //
test_vpselq_s64(int64x2_t a,int64x2_t b,mve_pred16_t p)199 int64x2_t test_vpselq_s64(int64x2_t a, int64x2_t b, mve_pred16_t p)
200 {
201 #ifdef POLYMORPHIC
202 return vpselq(a, b, p);
203 #else /* POLYMORPHIC */
204 return vpselq_s64(a, b, p);
205 #endif /* POLYMORPHIC */
206 }
207
208 // CHECK-LABEL: @test_vpselq_s8(
209 // CHECK-NEXT: entry:
210 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
211 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
212 // CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]
213 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
214 //
test_vpselq_s8(int8x16_t a,int8x16_t b,mve_pred16_t p)215 int8x16_t test_vpselq_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
216 {
217 #ifdef POLYMORPHIC
218 return vpselq(a, b, p);
219 #else /* POLYMORPHIC */
220 return vpselq_s8(a, b, p);
221 #endif /* POLYMORPHIC */
222 }
223
224 // CHECK-LABEL: @test_vpselq_u16(
225 // CHECK-NEXT: entry:
226 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
227 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
228 // CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]
229 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
230 //
test_vpselq_u16(uint16x8_t a,uint16x8_t b,mve_pred16_t p)231 uint16x8_t test_vpselq_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p)
232 {
233 #ifdef POLYMORPHIC
234 return vpselq(a, b, p);
235 #else /* POLYMORPHIC */
236 return vpselq_u16(a, b, p);
237 #endif /* POLYMORPHIC */
238 }
239
240 // CHECK-LABEL: @test_vpselq_u32(
241 // CHECK-NEXT: entry:
242 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
243 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
244 // CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]
245 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
246 //
test_vpselq_u32(uint32x4_t a,uint32x4_t b,mve_pred16_t p)247 uint32x4_t test_vpselq_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p)
248 {
249 #ifdef POLYMORPHIC
250 return vpselq(a, b, p);
251 #else /* POLYMORPHIC */
252 return vpselq_u32(a, b, p);
253 #endif /* POLYMORPHIC */
254 }
255
256 // CHECK-LABEL: @test_vpselq_u64(
257 // CHECK-NEXT: entry:
258 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
259 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
260 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[A:%.*]] to <4 x i32>
261 // CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B:%.*]] to <4 x i32>
262 // CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP2]], <4 x i32> [[TMP3]]
263 // CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <2 x i64>
264 // CHECK-NEXT: ret <2 x i64> [[TMP5]]
265 //
test_vpselq_u64(uint64x2_t a,uint64x2_t b,mve_pred16_t p)266 uint64x2_t test_vpselq_u64(uint64x2_t a, uint64x2_t b, mve_pred16_t p)
267 {
268 #ifdef POLYMORPHIC
269 return vpselq(a, b, p);
270 #else /* POLYMORPHIC */
271 return vpselq_u64(a, b, p);
272 #endif /* POLYMORPHIC */
273 }
274
275 // CHECK-LABEL: @test_vpselq_u8(
276 // CHECK-NEXT: entry:
277 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
278 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
279 // CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]
280 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
281 //
test_vpselq_u8(uint8x16_t a,uint8x16_t b,mve_pred16_t p)282 uint8x16_t test_vpselq_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p)
283 {
284 #ifdef POLYMORPHIC
285 return vpselq(a, b, p);
286 #else /* POLYMORPHIC */
287 return vpselq_u8(a, b, p);
288 #endif /* POLYMORPHIC */
289 }
290
291