1 // REQUIRES: powerpc-registered-target
2 // RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
3 // RUN: %clang_cc1 -faltivec -target-feature +vsx -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE
4
5 vector unsigned char vuc = { 8, 9, 10, 11, 12, 13, 14, 15,
6 0, 1, 2, 3, 4, 5, 6, 7};
7 vector float vf = { -1.5, 2.5, -3.5, 4.5 };
8 vector double vd = { 3.5, -7.5 };
9 vector signed int vsi = { -1, 2, -3, 4 };
10 vector unsigned int vui = { 0, 1, 2, 3 };
11 vector bool long long vbll = { 1, 0 };
12 vector signed long long vsll = { 255LL, -937LL };
13 vector unsigned long long vull = { 1447LL, 2894LL };
14 double d = 23.4;
15
16 vector float res_vf;
17 vector double res_vd;
18 vector signed int res_vsi;
19 vector unsigned int res_vui;
20 vector bool int res_vbi;
21 vector bool long long res_vbll;
22 vector signed long long res_vsll;
23 vector unsigned long long res_vull;
24 double res_d;
25
dummy()26 void dummy() { }
27
test1()28 void test1() {
29 // CHECK-LABEL: define void @test1
30 // CHECK-LE-LABEL: define void @test1
31
32 res_vd = vec_add(vd, vd);
33 // CHECK: fadd <2 x double>
34 // CHECK-LE: fadd <2 x double>
35
36 res_vd = vec_and(vbll, vd);
37 // CHECK: and <2 x i64>
38 // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
39 // CHECK-LE: and <2 x i64>
40 // CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
41
42 res_vd = vec_and(vd, vbll);
43 // CHECK: and <2 x i64>
44 // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
45 // CHECK-LE: and <2 x i64>
46 // CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
47
48 res_vd = vec_and(vd, vd);
49 // CHECK: and <2 x i64>
50 // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
51 // CHECK-LE: and <2 x i64>
52 // CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
53
54 dummy();
55 // CHECK: call void @dummy()
56 // CHECK-LE: call void @dummy()
57
58 res_vd = vec_andc(vbll, vd);
59 // CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
60 // CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
61 // CHECK: and <2 x i64>
62 // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
63 // CHECK-LE: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
64 // CHECK-LE: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
65 // CHECK-LE: and <2 x i64>
66 // CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
67
68 dummy();
69 // CHECK: call void @dummy()
70 // CHECK-LE: call void @dummy()
71
72 res_vd = vec_andc(vd, vbll);
73 // CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
74 // CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
75 // CHECK: and <2 x i64>
76 // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
77 // CHECK-LE: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
78 // CHECK-LE: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
79 // CHECK-LE: and <2 x i64>
80 // CHECK-LE: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
81
82 dummy();
83 // CHECK: call void @dummy()
84
85 res_vd = vec_andc(vd, vd);
86 // CHECK: bitcast <2 x double> %{{[0-9]*}} to <2 x i64>
87 // CHECK: xor <2 x i64> %{{[0-9]*}}, <i64 -1, i64 -1>
88 // CHECK: and <2 x i64>
89 // CHECK: bitcast <2 x i64> %{{[0-9]*}} to <2 x double>
90
91 dummy();
92 // CHECK: call void @dummy()
93 // CHECK-LE: call void @dummy()
94
95 res_vd = vec_ceil(vd);
96 // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{[0-9]*}})
97 // CHECK-LE: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{[0-9]*}})
98
99 res_vf = vec_ceil(vf);
100 // CHECK: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{[0-9]*}})
101 // CHECK-LE: call <4 x float> @llvm.ceil.v4f32(<4 x float> %{{[0-9]*}})
102
103 res_vbll = vec_cmpeq(vd, vd);
104 // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
105 // CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
106
107 res_vbi = vec_cmpeq(vf, vf);
108 // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
109 // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
110
111 res_vbll = vec_cmpge(vd, vd);
112 // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
113 // CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
114
115 res_vbi = vec_cmpge(vf, vf);
116 // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
117 // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
118
119 res_vbll = vec_cmpgt(vd, vd);
120 // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
121 // CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
122
123 res_vbi = vec_cmpgt(vf, vf);
124 // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
125 // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
126
127 res_vbll = vec_cmple(vd, vd);
128 // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
129 // CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
130
131 res_vbi = vec_cmple(vf, vf);
132 // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
133 // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
134
135 res_vbll = vec_cmplt(vd, vd);
136 // CHECK: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
137 // CHECK-LE: call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %{{[0-9]*}}, <2 x double> %{{[0-9]*}})
138
139 res_vbi = vec_cmplt(vf, vf);
140 // CHECK: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
141 // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %{{[0-9]*}}, <4 x float> %{{[0-9]*}})
142
143 /* vec_cpsgn */
144 res_vf = vec_cpsgn(vf, vf);
145 // CHECK: call <4 x float> @llvm.copysign.v4f32(<4 x float> %{{.+}}, <4 x float> %{{.+}})
146 // CHECK-LE: call <4 x float> @llvm.copysign.v4f32(<4 x float> %{{.+}}, <4 x float> %{{.+}})
147
148 res_vd = vec_cpsgn(vd, vd);
149 // CHECK: call <2 x double> @llvm.copysign.v2f64(<2 x double> %{{.+}}, <2 x double> %{{.+}})
150 // CHECK-LE: call <2 x double> @llvm.copysign.v2f64(<2 x double> %{{.+}}, <2 x double> %{{.+}})
151
152 /* vec_div */
153 res_vsll = vec_div(vsll, vsll);
154 // CHECK: sdiv <2 x i64>
155 // CHECK-LE: sdiv <2 x i64>
156
157 res_vull = vec_div(vull, vull);
158 // CHECK: udiv <2 x i64>
159 // CHECK-LE: udiv <2 x i64>
160
161 res_vf = vec_div(vf, vf);
162 // CHECK: fdiv <4 x float>
163 // CHECK-LE: fdiv <4 x float>
164
165 res_vd = vec_div(vd, vd);
166 // CHECK: fdiv <2 x double>
167 // CHECK-LE: fdiv <2 x double>
168
169 /* vec_max */
170 res_vf = vec_max(vf, vf);
171 // CHECK: @llvm.ppc.vsx.xvmaxsp
172 // CHECK-LE: @llvm.ppc.vsx.xvmaxsp
173
174 res_vd = vec_max(vd, vd);
175 // CHECK: @llvm.ppc.vsx.xvmaxdp
176 // CHECK-LE: @llvm.ppc.vsx.xvmaxdp
177
178 res_vf = vec_vmaxfp(vf, vf);
179 // CHECK: @llvm.ppc.vsx.xvmaxsp
180 // CHECK-LE: @llvm.ppc.vsx.xvmaxsp
181
182 /* vec_min */
183 res_vf = vec_min(vf, vf);
184 // CHECK: @llvm.ppc.vsx.xvminsp
185 // CHECK-LE: @llvm.ppc.vsx.xvminsp
186
187 res_vd = vec_min(vd, vd);
188 // CHECK: @llvm.ppc.vsx.xvmindp
189 // CHECK-LE: @llvm.ppc.vsx.xvmindp
190
191 res_vf = vec_vminfp(vf, vf);
192 // CHECK: @llvm.ppc.vsx.xvminsp
193 // CHECK-LE: @llvm.ppc.vsx.xvminsp
194
195 res_d = __builtin_vsx_xsmaxdp(d, d);
196 // CHECK: @llvm.ppc.vsx.xsmaxdp
197 // CHECK-LE: @llvm.ppc.vsx.xsmaxdp
198
199 res_d = __builtin_vsx_xsmindp(d, d);
200 // CHECK: @llvm.ppc.vsx.xsmindp
201 // CHECK-LE: @llvm.ppc.vsx.xsmindp
202
203 /* vec_perm */
204 res_vsll = vec_perm(vsll, vsll, vuc);
205 // CHECK: @llvm.ppc.altivec.vperm
206 // CHECK-LE: @llvm.ppc.altivec.vperm
207
208 res_vull = vec_perm(vull, vull, vuc);
209 // CHECK: @llvm.ppc.altivec.vperm
210 // CHECK-LE: @llvm.ppc.altivec.vperm
211
212 res_vbll = vec_perm(vbll, vbll, vuc);
213 // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
214 // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
215 // CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
216 // CHECK-LE: xor <16 x i8>
217 // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
218 // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
219 // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
220
221 res_vf = vec_round(vf);
222 // CHECK: call <4 x float> @llvm.round.v4f32(<4 x float>
223 // CHECK-LE: call <4 x float> @llvm.round.v4f32(<4 x float>
224
225 res_vd = vec_round(vd);
226 // CHECK: call <2 x double> @llvm.round.v2f64(<2 x double>
227 // CHECK-LE: call <2 x double> @llvm.round.v2f64(<2 x double>
228
229 res_vd = vec_perm(vd, vd, vuc);
230 // CHECK: @llvm.ppc.altivec.vperm
231 // CHECK-LE: @llvm.ppc.altivec.vperm
232
233 res_vd = vec_splat(vd, 1);
234 // CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
235 // CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
236 // CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
237 // CHECK-LE: xor <16 x i8>
238 // CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
239 // CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
240 // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
241
242 res_vbll = vec_splat(vbll, 1);
243 // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
244 // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
245 // CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
246 // CHECK-LE: xor <16 x i8>
247 // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
248 // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
249 // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
250
251 res_vsll = vec_splat(vsll, 1);
252 // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
253 // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
254 // CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
255 // CHECK-LE: xor <16 x i8>
256 // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
257 // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
258 // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
259
260 res_vull = vec_splat(vull, 1);
261 // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
262 // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
263 // CHECK: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
264 // CHECK-LE: xor <16 x i8>
265 // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
266 // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
267 // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8>
268
269 res_vsi = vec_pack(vsll, vsll);
270 // CHECK: @llvm.ppc.altivec.vperm
271 // CHECK-LE: @llvm.ppc.altivec.vperm
272
273 res_vui = vec_pack(vull, vull);
274 // CHECK: @llvm.ppc.altivec.vperm
275 // CHECK-LE: @llvm.ppc.altivec.vperm
276
277 res_vbi = vec_pack(vbll, vbll);
278 // CHECK: @llvm.ppc.altivec.vperm
279 // CHECK-LE: @llvm.ppc.altivec.vperm
280
281 res_vsll = vec_vperm(vsll, vsll, vuc);
282 // CHECK: @llvm.ppc.altivec.vperm
283 // CHECK-LE: @llvm.ppc.altivec.vperm
284
285 res_vull = vec_vperm(vull, vull, vuc);
286 // CHECK: @llvm.ppc.altivec.vperm
287 // CHECK-LE: @llvm.ppc.altivec.vperm
288
289 res_vd = vec_vperm(vd, vd, vuc);
290 // CHECK: @llvm.ppc.altivec.vperm
291 // CHECK-LE: @llvm.ppc.altivec.vperm
292
293 /* vec_vsx_ld */
294
295 res_vsi = vec_vsx_ld(0, &vsi);
296 // CHECK: @llvm.ppc.vsx.lxvw4x
297 // CHECK-LE: @llvm.ppc.vsx.lxvw4x
298
299 res_vui = vec_vsx_ld(0, &vui);
300 // CHECK: @llvm.ppc.vsx.lxvw4x
301 // CHECK-LE: @llvm.ppc.vsx.lxvw4x
302
303 res_vf = vec_vsx_ld (0, &vf);
304 // CHECK: @llvm.ppc.vsx.lxvw4x
305 // CHECK-LE: @llvm.ppc.vsx.lxvw4x
306
307 res_vsll = vec_vsx_ld(0, &vsll);
308 // CHECK: @llvm.ppc.vsx.lxvd2x
309 // CHECK-LE: @llvm.ppc.vsx.lxvd2x
310
311 res_vull = vec_vsx_ld(0, &vull);
312 // CHECK: @llvm.ppc.vsx.lxvd2x
313 // CHECK-LE: @llvm.ppc.vsx.lxvd2x
314
315 res_vd = vec_vsx_ld(0, &vd);
316 // CHECK: @llvm.ppc.vsx.lxvd2x
317 // CHECK-LE: @llvm.ppc.vsx.lxvd2x
318
319 /* vec_vsx_st */
320
321 vec_vsx_st(vsi, 0, &res_vsi);
322 // CHECK: @llvm.ppc.vsx.stxvw4x
323 // CHECK-LE: @llvm.ppc.vsx.stxvw4x
324
325 vec_vsx_st(vui, 0, &res_vui);
326 // CHECK: @llvm.ppc.vsx.stxvw4x
327 // CHECK-LE: @llvm.ppc.vsx.stxvw4x
328
329 vec_vsx_st(vf, 0, &res_vf);
330 // CHECK: @llvm.ppc.vsx.stxvw4x
331 // CHECK-LE: @llvm.ppc.vsx.stxvw4x
332
333 vec_vsx_st(vsll, 0, &res_vsll);
334 // CHECK: @llvm.ppc.vsx.stxvd2x
335 // CHECK-LE: @llvm.ppc.vsx.stxvd2x
336
337 vec_vsx_st(vull, 0, &res_vull);
338 // CHECK: @llvm.ppc.vsx.stxvd2x
339 // CHECK-LE: @llvm.ppc.vsx.stxvd2x
340
341 vec_vsx_st(vd, 0, &res_vd);
342 // CHECK: @llvm.ppc.vsx.stxvd2x
343 // CHECK-LE: @llvm.ppc.vsx.stxvd2x
344
345 /* vec_and */
346 res_vsll = vec_and(vsll, vsll);
347 // CHECK: and <2 x i64>
348 // CHECK-LE: and <2 x i64>
349
350 res_vsll = vec_and(vbll, vsll);
351 // CHECK: and <2 x i64>
352 // CHECK-LE: and <2 x i64>
353
354 res_vsll = vec_and(vsll, vbll);
355 // CHECK: and <2 x i64>
356 // CHECK-LE: and <2 x i64>
357
358 res_vull = vec_and(vull, vull);
359 // CHECK: and <2 x i64>
360 // CHECK-LE: and <2 x i64>
361
362 res_vull = vec_and(vbll, vull);
363 // CHECK: and <2 x i64>
364 // CHECK-LE: and <2 x i64>
365
366 res_vull = vec_and(vull, vbll);
367 // CHECK: and <2 x i64>
368 // CHECK-LE: and <2 x i64>
369
370 res_vbll = vec_and(vbll, vbll);
371 // CHECK: and <2 x i64>
372 // CHECK-LE: and <2 x i64>
373
374 /* vec_vand */
375 res_vsll = vec_vand(vsll, vsll);
376 // CHECK: and <2 x i64>
377 // CHECK-LE: and <2 x i64>
378
379 res_vsll = vec_vand(vbll, vsll);
380 // CHECK: and <2 x i64>
381 // CHECK-LE: and <2 x i64>
382
383 res_vsll = vec_vand(vsll, vbll);
384 // CHECK: and <2 x i64>
385 // CHECK-LE: and <2 x i64>
386
387 res_vull = vec_vand(vull, vull);
388 // CHECK: and <2 x i64>
389 // CHECK-LE: and <2 x i64>
390
391 res_vull = vec_vand(vbll, vull);
392 // CHECK: and <2 x i64>
393 // CHECK-LE: and <2 x i64>
394
395 res_vull = vec_vand(vull, vbll);
396 // CHECK: and <2 x i64>
397 // CHECK-LE: and <2 x i64>
398
399 res_vbll = vec_vand(vbll, vbll);
400 // CHECK: and <2 x i64>
401 // CHECK-LE: and <2 x i64>
402
403 /* vec_andc */
404 res_vsll = vec_andc(vsll, vsll);
405 // CHECK: xor <2 x i64>
406 // CHECK: and <2 x i64>
407 // CHECK-LE: xor <2 x i64>
408 // CHECK-LE: and <2 x i64>
409
410 res_vsll = vec_andc(vbll, vsll);
411 // CHECK: xor <2 x i64>
412 // CHECK: and <2 x i64>
413 // CHECK-LE: xor <2 x i64>
414 // CHECK-LE: and <2 x i64>
415
416 res_vsll = vec_andc(vsll, vbll);
417 // CHECK: xor <2 x i64>
418 // CHECK: and <2 x i64>
419 // CHECK-LE: xor <2 x i64>
420 // CHECK-LE: and <2 x i64>
421
422 res_vull = vec_andc(vull, vull);
423 // CHECK: xor <2 x i64>
424 // CHECK: and <2 x i64>
425 // CHECK-LE: xor <2 x i64>
426 // CHECK-LE: and <2 x i64>
427
428 res_vull = vec_andc(vbll, vull);
429 // CHECK: xor <2 x i64>
430 // CHECK: and <2 x i64>
431 // CHECK-LE: xor <2 x i64>
432 // CHECK-LE: and <2 x i64>
433
434 res_vull = vec_andc(vull, vbll);
435 // CHECK: xor <2 x i64>
436 // CHECK: and <2 x i64>
437 // CHECK-LE: xor <2 x i64>
438 // CHECK-LE: and <2 x i64>
439
440 res_vbll = vec_andc(vbll, vbll);
441 // CHECK: xor <2 x i64>
442 // CHECK: and <2 x i64>
443 // CHECK-LE: xor <2 x i64>
444 // CHECK-LE: and <2 x i64>
445
446 res_vf = vec_floor(vf);
447 // CHECK: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{[0-9]+}})
448 // CHECK-LE: call <4 x float> @llvm.floor.v4f32(<4 x float> %{{[0-9]+}})
449
450 res_vd = vec_floor(vd);
451 // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{[0-9]+}})
452 // CHECK-LE: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{[0-9]+}})
453
454 res_vf = vec_madd(vf, vf, vf);
455 // CHECK: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
456 // CHECK-LE: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
457
458 res_vd = vec_madd(vd, vd, vd);
459 // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
460 // CHECK-LE: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
461
462 /* vec_mergeh */
463 res_vsll = vec_mergeh(vsll, vsll);
464 // CHECK: @llvm.ppc.altivec.vperm
465 // CHECK-LE: @llvm.ppc.altivec.vperm
466
467 res_vsll = vec_mergeh(vsll, vbll);
468 // CHECK: @llvm.ppc.altivec.vperm
469 // CHECK-LE: @llvm.ppc.altivec.vperm
470
471 res_vsll = vec_mergeh(vbll, vsll);
472 // CHECK: @llvm.ppc.altivec.vperm
473 // CHECK-LE: @llvm.ppc.altivec.vperm
474
475 res_vull = vec_mergeh(vull, vull);
476 // CHECK: @llvm.ppc.altivec.vperm
477 // CHECK-LE: @llvm.ppc.altivec.vperm
478
479 res_vull = vec_mergeh(vull, vbll);
480 // CHECK: @llvm.ppc.altivec.vperm
481 // CHECK-LE: @llvm.ppc.altivec.vperm
482
483 res_vull = vec_mergeh(vbll, vull);
484 // CHECK: @llvm.ppc.altivec.vperm
485 // CHECK-LE: @llvm.ppc.altivec.vperm
486
487 /* vec_mergel */
488 res_vsll = vec_mergel(vsll, vsll);
489 // CHECK: @llvm.ppc.altivec.vperm
490 // CHECK-LE: @llvm.ppc.altivec.vperm
491
492 res_vsll = vec_mergel(vsll, vbll);
493 // CHECK: @llvm.ppc.altivec.vperm
494 // CHECK-LE: @llvm.ppc.altivec.vperm
495
496 res_vsll = vec_mergel(vbll, vsll);
497 // CHECK: @llvm.ppc.altivec.vperm
498 // CHECK-LE: @llvm.ppc.altivec.vperm
499
500 res_vull = vec_mergel(vull, vull);
501 // CHECK: @llvm.ppc.altivec.vperm
502 // CHECK-LE: @llvm.ppc.altivec.vperm
503
504 res_vull = vec_mergel(vull, vbll);
505 // CHECK: @llvm.ppc.altivec.vperm
506 // CHECK-LE: @llvm.ppc.altivec.vperm
507
508 res_vull = vec_mergel(vbll, vull);
509 // CHECK: @llvm.ppc.altivec.vperm
510 // CHECK-LE: @llvm.ppc.altivec.vperm
511
512 /* vec_msub */
513 res_vf = vec_msub(vf, vf, vf);
514 // CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
515 // CHECK-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
516 // CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
517 // CHECK-LE-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
518
519 res_vd = vec_msub(vd, vd, vd);
520 // CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
521 // CHECK-NEXT: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
522 // CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
523 // CHECK-LE-NEXT: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
524
525 res_vsll = vec_mul(vsll, vsll);
526 // CHECK: mul <2 x i64>
527 // CHECK-LE: mul <2 x i64>
528
529 res_vull = vec_mul(vull, vull);
530 // CHECK: mul <2 x i64>
531 // CHECK-LE: mul <2 x i64>
532
533 res_vf = vec_mul(vf, vf);
534 // CHECK: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}
535 // CHECK-LE: fmul <4 x float> %{{[0-9]+}}, %{{[0-9]+}}
536
537 res_vd = vec_mul(vd, vd);
538 // CHECK: fmul <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
539 // CHECK-LE: fmul <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
540
541 res_vf = vec_nearbyint(vf);
542 // CHECK: call <4 x float> @llvm.round.v4f32(<4 x float> %{{[0-9]+}})
543 // CHECK-LE: call <4 x float> @llvm.round.v4f32(<4 x float> %{{[0-9]+}})
544
545 res_vd = vec_nearbyint(vd);
546 // CHECK: call <2 x double> @llvm.round.v2f64(<2 x double> %{{[0-9]+}})
547 // CHECK-LE: call <2 x double> @llvm.round.v2f64(<2 x double> %{{[0-9]+}})
548
549 res_vf = vec_nmadd(vf, vf, vf);
550 // CHECK: [[FM:[0-9]+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
551 // CHECK-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[FM]]
552 // CHECK-LE: [[FM:[0-9]+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}})
553 // CHECK-LE-NEXT: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[FM]]
554
555 res_vd = vec_nmadd(vd, vd, vd);
556 // CHECK: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
557 // CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
558 // CHECK-LE: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}})
559 // CHECK-LE-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
560
561 res_vf = vec_nmsub(vf, vf, vf);
562 // CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
563 // CHECK-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
564 // CHECK: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
565 // CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
566 // CHECK-LE-NEXT: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{[0-9]+}}, <4 x float> %{{[0-9]+}}, <4 x float>
567 // CHECK-LE: fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %{{[0-9]+}}
568
569 res_vd = vec_nmsub(vd, vd, vd);
570 // CHECK: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
571 // CHECK-NEXT: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
572 // CHECK-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
573 // CHECK-LE: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %{{[0-9]+}}
574 // CHECK-LE-NEXT: [[FM:[0-9]+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x double>
575 // CHECK-LE-NEXT: fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %[[FM]]
576
577 /* vec_nor */
578 res_vsll = vec_nor(vsll, vsll);
579 // CHECK: or <2 x i64>
580 // CHECK: xor <2 x i64>
581 // CHECK-LE: or <2 x i64>
582 // CHECK-LE: xor <2 x i64>
583
584 res_vull = vec_nor(vull, vull);
585 // CHECK: or <2 x i64>
586 // CHECK: xor <2 x i64>
587 // CHECK-LE: or <2 x i64>
588 // CHECK-LE: xor <2 x i64>
589
590 res_vull = vec_nor(vbll, vbll);
591 // CHECK: or <2 x i64>
592 // CHECK: xor <2 x i64>
593 // CHECK-LE: or <2 x i64>
594 // CHECK-LE: xor <2 x i64>
595
596 res_vd = vec_nor(vd, vd);
597 // CHECK: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
598 // CHECK: [[OR:%.+]] = or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
599 // CHECK-NEXT: xor <2 x i64> [[OR]], <i64 -1, i64 -1>
600 // CHECK-LE: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
601 // CHECK-LE: [[OR:%.+]] = or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
602 // CHECK-LE-NEXT: xor <2 x i64> [[OR]], <i64 -1, i64 -1>
603
604 /* vec_or */
605 res_vsll = vec_or(vsll, vsll);
606 // CHECK: or <2 x i64>
607 // CHECK-LE: or <2 x i64>
608
609 res_vsll = vec_or(vbll, vsll);
610 // CHECK: or <2 x i64>
611 // CHECK-LE: or <2 x i64>
612
613 res_vsll = vec_or(vsll, vbll);
614 // CHECK: or <2 x i64>
615 // CHECK-LE: or <2 x i64>
616
617 res_vull = vec_or(vull, vull);
618 // CHECK: or <2 x i64>
619 // CHECK-LE: or <2 x i64>
620
621 res_vull = vec_or(vbll, vull);
622 // CHECK: or <2 x i64>
623 // CHECK-LE: or <2 x i64>
624
625 res_vull = vec_or(vull, vbll);
626 // CHECK: or <2 x i64>
627 // CHECK-LE: or <2 x i64>
628
629 res_vbll = vec_or(vbll, vbll);
630 // CHECK: or <2 x i64>
631 // CHECK-LE: or <2 x i64>
632
633 res_vd = vec_or(vd, vd);
634 // CHECK: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
635 // CHECK: or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
636 // CHECK-LE: bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
637 // CHECK-LE: or <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
638
639 res_vd = vec_or(vbll, vd);
640 // CHECK: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
641 // CHECK: [[T2:%.+]] = or <2 x i64> %{{[0-9]+}}, [[T1]]
642 // CHECK: bitcast <2 x i64> [[T2]] to <2 x double>
643 // CHECK-LE: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
644 // CHECK-LE: [[T2:%.+]] = or <2 x i64> %{{[0-9]+}}, [[T1]]
645 // CHECK-LE: bitcast <2 x i64> [[T2]] to <2 x double>
646
647 res_vd = vec_or(vd, vbll);
648 // CHECK: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
649 // CHECK: [[T2:%.+]] = or <2 x i64> [[T1]], %{{[0-9]+}}
650 // CHECK: bitcast <2 x i64> [[T2]] to <2 x double>
651 // CHECK-LE: [[T1:%.+]] = bitcast <2 x double> %{{[0-9]+}} to <2 x i64>
652 // CHECK-LE: [[T2:%.+]] = or <2 x i64> [[T1]], %{{[0-9]+}}
653 // CHECK-LE: bitcast <2 x i64> [[T2]] to <2 x double>
654
655 res_vf = vec_re(vf);
656 // CHECK: call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>
657 // CHECK-LE: call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>
658
659 res_vd = vec_re(vd);
660 // CHECK: call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>
661 // CHECK-LE: call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>
662
663 res_vf = vec_rint(vf);
664 // CHECK: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})
665 // CHECK-LE: call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %{{[0-9]+}})
666
667 res_vd = vec_rint(vd);
668 // CHECK: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{[0-9]+}})
669 // CHECK-LE: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{[0-9]+}})
670
671 res_vf = vec_rsqrte(vf);
672 // CHECK: call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %{{[0-9]+}})
673 // CHECK-LE: call <4 x float> @llvm.ppc.vsx.xvrsqrtesp(<4 x float> %{{[0-9]+}})
674
675 res_vd = vec_rsqrte(vd);
676 // CHECK: call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %{{[0-9]+}})
677 // CHECK-LE: call <2 x double> @llvm.ppc.vsx.xvrsqrtedp(<2 x double> %{{[0-9]+}})
678
679 dummy();
680 // CHECK: call void @dummy()
681 // CHECK-LE: call void @dummy()
682
683 res_vf = vec_sel(vd, vd, vbll);
684 // CHECK: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
685 // CHECK: and <2 x i64> %{{[0-9]+}},
686 // CHECK: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
687 // CHECK: or <2 x i64>
688 // CHECK: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
689 // CHECK-LE: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
690 // CHECK-LE: and <2 x i64> %{{[0-9]+}},
691 // CHECK-LE: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
692 // CHECK-LE: or <2 x i64>
693 // CHECK-LE: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
694
695 dummy();
696 // CHECK: call void @dummy()
697 // CHECK-LE: call void @dummy()
698
699 res_vd = vec_sel(vd, vd, vull);
700 // CHECK: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
701 // CHECK: and <2 x i64> %{{[0-9]+}},
702 // CHECK: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
703 // CHECK: or <2 x i64>
704 // CHECK: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
705 // CHECK-LE: xor <2 x i64> %{{[0-9]+}}, <i64 -1, i64 -1>
706 // CHECK-LE: and <2 x i64> %{{[0-9]+}},
707 // CHECK-LE: and <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
708 // CHECK-LE: or <2 x i64>
709 // CHECK-LE: bitcast <2 x i64> %{{[0-9]+}} to <2 x double>
710
711 res_vf = vec_sqrt(vf);
712 // CHECK: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{[0-9]+}})
713 // CHECK-LE: call <4 x float> @llvm.sqrt.v4f32(<4 x float> %{{[0-9]+}})
714
715 res_vd = vec_sqrt(vd);
716 // CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{[0-9]+}})
717 // CHECK-LE: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{[0-9]+}})
718
719 res_vd = vec_sub(vd, vd);
720 // CHECK: fsub <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
721 // CHECK-LE: fsub <2 x double> %{{[0-9]+}}, %{{[0-9]+}}
722
723 res_vf = vec_trunc(vf);
724 // CHECK: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{[0-9]+}})
725 // CHECK-LE: call <4 x float> @llvm.trunc.v4f32(<4 x float> %{{[0-9]+}})
726
727 res_vd = vec_trunc(vd);
728 // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{[0-9]+}})
729 // CHECK-LE: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{[0-9]+}})
730
731 /* vec_vor */
732 res_vsll = vec_vor(vsll, vsll);
733 // CHECK: or <2 x i64>
734 // CHECK-LE: or <2 x i64>
735
736 res_vsll = vec_vor(vbll, vsll);
737 // CHECK: or <2 x i64>
738 // CHECK-LE: or <2 x i64>
739
740 res_vsll = vec_vor(vsll, vbll);
741 // CHECK: or <2 x i64>
742 // CHECK-LE: or <2 x i64>
743
744 res_vull = vec_vor(vull, vull);
745 // CHECK: or <2 x i64>
746 // CHECK-LE: or <2 x i64>
747
748 res_vull = vec_vor(vbll, vull);
749 // CHECK: or <2 x i64>
750 // CHECK-LE: or <2 x i64>
751
752 res_vull = vec_vor(vull, vbll);
753 // CHECK: or <2 x i64>
754 // CHECK-LE: or <2 x i64>
755
756 res_vbll = vec_vor(vbll, vbll);
757 // CHECK: or <2 x i64>
758 // CHECK-LE: or <2 x i64>
759
760 /* vec_xor */
761 res_vsll = vec_xor(vsll, vsll);
762 // CHECK: xor <2 x i64>
763 // CHECK-LE: xor <2 x i64>
764
765 res_vsll = vec_xor(vbll, vsll);
766 // CHECK: xor <2 x i64>
767 // CHECK-LE: xor <2 x i64>
768
769 res_vsll = vec_xor(vsll, vbll);
770 // CHECK: xor <2 x i64>
771 // CHECK-LE: xor <2 x i64>
772
773 res_vull = vec_xor(vull, vull);
774 // CHECK: xor <2 x i64>
775 // CHECK-LE: xor <2 x i64>
776
777 res_vull = vec_xor(vbll, vull);
778 // CHECK: xor <2 x i64>
779 // CHECK-LE: xor <2 x i64>
780
781 res_vull = vec_xor(vull, vbll);
782 // CHECK: xor <2 x i64>
783 // CHECK-LE: xor <2 x i64>
784
785 res_vbll = vec_xor(vbll, vbll);
786 // CHECK: xor <2 x i64>
787 // CHECK-LE: xor <2 x i64>
788
789 dummy();
790 // CHECK: call void @dummy()
791 // CHECK-LE: call void @dummy()
792
793 res_vd = vec_xor(vd, vd);
794 // CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
795 // CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
796 // CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
797 // CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>
798
799 dummy();
800 // CHECK: call void @dummy()
801 // CHECK-LE: call void @dummy()
802
803 res_vd = vec_xor(vd, vbll);
804 // CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
805 // CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
806 // CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
807 // CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>
808
809 dummy();
810 // CHECK: call void @dummy()
811 // CHECK-LE: call void @dummy()
812
813 res_vd = vec_xor(vbll, vd);
814 // CHECK: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
815 // CHECK: bitcast <2 x i64> [[X1]] to <2 x double>
816 // CHECK-LE: [[X1:%.+]] = xor <2 x i64> %{{[0-9]+}}, %{{[0-9]+}}
817 // CHECK-LE: bitcast <2 x i64> [[X1]] to <2 x double>
818
819 /* vec_vxor */
820 res_vsll = vec_vxor(vsll, vsll);
821 // CHECK: xor <2 x i64>
822 // CHECK-LE: xor <2 x i64>
823
824 res_vsll = vec_vxor(vbll, vsll);
825 // CHECK: xor <2 x i64>
826 // CHECK-LE: xor <2 x i64>
827
828 res_vsll = vec_vxor(vsll, vbll);
829 // CHECK: xor <2 x i64>
830 // CHECK-LE: xor <2 x i64>
831
832 res_vull = vec_vxor(vull, vull);
833 // CHECK: xor <2 x i64>
834 // CHECK-LE: xor <2 x i64>
835
836 res_vull = vec_vxor(vbll, vull);
837 // CHECK: xor <2 x i64>
838 // CHECK-LE: xor <2 x i64>
839
840 res_vull = vec_vxor(vull, vbll);
841 // CHECK: xor <2 x i64>
842 // CHECK-LE: xor <2 x i64>
843
844 res_vbll = vec_vxor(vbll, vbll);
845 // CHECK: xor <2 x i64>
846 // CHECK-LE: xor <2 x i64>
847
848 }
849