1; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
2
3;;; Test vector shift right arithmetic intrinsic instructions
4;;;
5;;; Note:
6;;;   We test VSRA*vvl, VSRA*vvl_v, VSRA*vrl, VSRA*vrl_v, VSRA*vil, VSRA*vil_v,
7;;;   VSRA*vvml_v, VSRA*vrml_v, VSRA*viml_v, PVSRA*vvl, PVSRA*vvl_v, PVSRA*vrl,
8;;;   PVSRA*vrl_v, PVSRA*vvml_v, and PVSRA*vrml_v instructions.
9
10; Function Attrs: nounwind readnone
11define fastcc <256 x double> @vsrawsx_vvvl(<256 x double> %0, <256 x double> %1) {
12; CHECK-LABEL: vsrawsx_vvvl:
13; CHECK:       # %bb.0:
14; CHECK-NEXT:    lea %s0, 256
15; CHECK-NEXT:    lvl %s0
16; CHECK-NEXT:    vsra.w.sx %v0, %v0, %v1
17; CHECK-NEXT:    b.l.t (, %s10)
18  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
19  ret <256 x double> %3
20}
21
22; Function Attrs: nounwind readnone
23declare <256 x double> @llvm.ve.vl.vsrawsx.vvvl(<256 x double>, <256 x double>, i32)
24
25; Function Attrs: nounwind readnone
26define fastcc <256 x double> @vsrawsx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
27; CHECK-LABEL: vsrawsx_vvvvl:
28; CHECK:       # %bb.0:
29; CHECK-NEXT:    lea %s0, 128
30; CHECK-NEXT:    lvl %s0
31; CHECK-NEXT:    vsra.w.sx %v2, %v0, %v1
32; CHECK-NEXT:    lea %s16, 256
33; CHECK-NEXT:    lvl %s16
34; CHECK-NEXT:    vor %v0, (0)1, %v2
35; CHECK-NEXT:    b.l.t (, %s10)
36  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
37  ret <256 x double> %4
38}
39
40; Function Attrs: nounwind readnone
41declare <256 x double> @llvm.ve.vl.vsrawsx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
42
43; Function Attrs: nounwind readnone
44define fastcc <256 x double> @vsrawsx_vvsl(<256 x double> %0, i32 signext %1) {
45; CHECK-LABEL: vsrawsx_vvsl:
46; CHECK:       # %bb.0:
47; CHECK-NEXT:    and %s0, %s0, (32)0
48; CHECK-NEXT:    lea %s1, 256
49; CHECK-NEXT:    lvl %s1
50; CHECK-NEXT:    vsra.w.sx %v0, %v0, %s0
51; CHECK-NEXT:    b.l.t (, %s10)
52  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsl(<256 x double> %0, i32 %1, i32 256)
53  ret <256 x double> %3
54}
55
56; Function Attrs: nounwind readnone
57declare <256 x double> @llvm.ve.vl.vsrawsx.vvsl(<256 x double>, i32, i32)
58
59; Function Attrs: nounwind readnone
60define fastcc <256 x double> @vsrawsx_vvsvl(<256 x double> %0, i32 signext %1, <256 x double> %2) {
61; CHECK-LABEL: vsrawsx_vvsvl:
62; CHECK:       # %bb.0:
63; CHECK-NEXT:    and %s0, %s0, (32)0
64; CHECK-NEXT:    lea %s1, 128
65; CHECK-NEXT:    lvl %s1
66; CHECK-NEXT:    vsra.w.sx %v1, %v0, %s0
67; CHECK-NEXT:    lea %s16, 256
68; CHECK-NEXT:    lvl %s16
69; CHECK-NEXT:    vor %v0, (0)1, %v1
70; CHECK-NEXT:    b.l.t (, %s10)
71  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsvl(<256 x double> %0, i32 %1, <256 x double> %2, i32 128)
72  ret <256 x double> %4
73}
74
75; Function Attrs: nounwind readnone
76declare <256 x double> @llvm.ve.vl.vsrawsx.vvsvl(<256 x double>, i32, <256 x double>, i32)
77
78; Function Attrs: nounwind readnone
79define fastcc <256 x double> @vsrawsx_vvsl_imm(<256 x double> %0) {
80; CHECK-LABEL: vsrawsx_vvsl_imm:
81; CHECK:       # %bb.0:
82; CHECK-NEXT:    lea %s0, 256
83; CHECK-NEXT:    lvl %s0
84; CHECK-NEXT:    vsra.w.sx %v0, %v0, 8
85; CHECK-NEXT:    b.l.t (, %s10)
86  %2 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsl(<256 x double> %0, i32 8, i32 256)
87  ret <256 x double> %2
88}
89
90; Function Attrs: nounwind readnone
91define fastcc <256 x double> @vsrawsx_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
92; CHECK-LABEL: vsrawsx_vvsvl_imm:
93; CHECK:       # %bb.0:
94; CHECK-NEXT:    lea %s0, 128
95; CHECK-NEXT:    lvl %s0
96; CHECK-NEXT:    vsra.w.sx %v1, %v0, 8
97; CHECK-NEXT:    lea %s16, 256
98; CHECK-NEXT:    lvl %s16
99; CHECK-NEXT:    vor %v0, (0)1, %v1
100; CHECK-NEXT:    b.l.t (, %s10)
101  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsvl(<256 x double> %0, i32 8, <256 x double> %1, i32 128)
102  ret <256 x double> %3
103}
104
105; Function Attrs: nounwind readnone
106define fastcc <256 x double> @vsrawsx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
107; CHECK-LABEL: vsrawsx_vvvmvl:
108; CHECK:       # %bb.0:
109; CHECK-NEXT:    lea %s0, 128
110; CHECK-NEXT:    lvl %s0
111; CHECK-NEXT:    vsra.w.sx %v2, %v0, %v1, %vm1
112; CHECK-NEXT:    lea %s16, 256
113; CHECK-NEXT:    lvl %s16
114; CHECK-NEXT:    vor %v0, (0)1, %v2
115; CHECK-NEXT:    b.l.t (, %s10)
116  %5 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
117  ret <256 x double> %5
118}
119
120; Function Attrs: nounwind readnone
121declare <256 x double> @llvm.ve.vl.vsrawsx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
122
123; Function Attrs: nounwind readnone
124define fastcc <256 x double> @vsrawsx_vvsmvl(<256 x double> %0, i32 signext %1, <256 x i1> %2, <256 x double> %3) {
125; CHECK-LABEL: vsrawsx_vvsmvl:
126; CHECK:       # %bb.0:
127; CHECK-NEXT:    and %s0, %s0, (32)0
128; CHECK-NEXT:    lea %s1, 128
129; CHECK-NEXT:    lvl %s1
130; CHECK-NEXT:    vsra.w.sx %v1, %v0, %s0, %vm1
131; CHECK-NEXT:    lea %s16, 256
132; CHECK-NEXT:    lvl %s16
133; CHECK-NEXT:    vor %v0, (0)1, %v1
134; CHECK-NEXT:    b.l.t (, %s10)
135  %5 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsmvl(<256 x double> %0, i32 %1, <256 x i1> %2, <256 x double> %3, i32 128)
136  ret <256 x double> %5
137}
138
139; Function Attrs: nounwind readnone
140declare <256 x double> @llvm.ve.vl.vsrawsx.vvsmvl(<256 x double>, i32, <256 x i1>, <256 x double>, i32)
141
142; Function Attrs: nounwind readnone
143define fastcc <256 x double> @vsrawsx_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
144; CHECK-LABEL: vsrawsx_vvsmvl_imm:
145; CHECK:       # %bb.0:
146; CHECK-NEXT:    lea %s0, 128
147; CHECK-NEXT:    lvl %s0
148; CHECK-NEXT:    vsra.w.sx %v1, %v0, 8, %vm1
149; CHECK-NEXT:    lea %s16, 256
150; CHECK-NEXT:    lvl %s16
151; CHECK-NEXT:    vor %v0, (0)1, %v1
152; CHECK-NEXT:    b.l.t (, %s10)
153  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrawsx.vvsmvl(<256 x double> %0, i32 8, <256 x i1> %1, <256 x double> %2, i32 128)
154  ret <256 x double> %4
155}
156
157; Function Attrs: nounwind readnone
158define fastcc <256 x double> @vsrawzx_vvvl(<256 x double> %0, <256 x double> %1) {
159; CHECK-LABEL: vsrawzx_vvvl:
160; CHECK:       # %bb.0:
161; CHECK-NEXT:    lea %s0, 256
162; CHECK-NEXT:    lvl %s0
163; CHECK-NEXT:    vsra.w.zx %v0, %v0, %v1
164; CHECK-NEXT:    b.l.t (, %s10)
165  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
166  ret <256 x double> %3
167}
168
169; Function Attrs: nounwind readnone
170declare <256 x double> @llvm.ve.vl.vsrawzx.vvvl(<256 x double>, <256 x double>, i32)
171
172; Function Attrs: nounwind readnone
173define fastcc <256 x double> @vsrawzx_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
174; CHECK-LABEL: vsrawzx_vvvvl:
175; CHECK:       # %bb.0:
176; CHECK-NEXT:    lea %s0, 128
177; CHECK-NEXT:    lvl %s0
178; CHECK-NEXT:    vsra.w.zx %v2, %v0, %v1
179; CHECK-NEXT:    lea %s16, 256
180; CHECK-NEXT:    lvl %s16
181; CHECK-NEXT:    vor %v0, (0)1, %v2
182; CHECK-NEXT:    b.l.t (, %s10)
183  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
184  ret <256 x double> %4
185}
186
187; Function Attrs: nounwind readnone
188declare <256 x double> @llvm.ve.vl.vsrawzx.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
189
190; Function Attrs: nounwind readnone
191define fastcc <256 x double> @vsrawzx_vvsl(<256 x double> %0, i32 signext %1) {
192; CHECK-LABEL: vsrawzx_vvsl:
193; CHECK:       # %bb.0:
194; CHECK-NEXT:    and %s0, %s0, (32)0
195; CHECK-NEXT:    lea %s1, 256
196; CHECK-NEXT:    lvl %s1
197; CHECK-NEXT:    vsra.w.zx %v0, %v0, %s0
198; CHECK-NEXT:    b.l.t (, %s10)
199  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvsl(<256 x double> %0, i32 %1, i32 256)
200  ret <256 x double> %3
201}
202
203; Function Attrs: nounwind readnone
204declare <256 x double> @llvm.ve.vl.vsrawzx.vvsl(<256 x double>, i32, i32)
205
206; Function Attrs: nounwind readnone
207define fastcc <256 x double> @vsrawzx_vvsvl(<256 x double> %0, i32 signext %1, <256 x double> %2) {
208; CHECK-LABEL: vsrawzx_vvsvl:
209; CHECK:       # %bb.0:
210; CHECK-NEXT:    and %s0, %s0, (32)0
211; CHECK-NEXT:    lea %s1, 128
212; CHECK-NEXT:    lvl %s1
213; CHECK-NEXT:    vsra.w.zx %v1, %v0, %s0
214; CHECK-NEXT:    lea %s16, 256
215; CHECK-NEXT:    lvl %s16
216; CHECK-NEXT:    vor %v0, (0)1, %v1
217; CHECK-NEXT:    b.l.t (, %s10)
218  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvsvl(<256 x double> %0, i32 %1, <256 x double> %2, i32 128)
219  ret <256 x double> %4
220}
221
222; Function Attrs: nounwind readnone
223declare <256 x double> @llvm.ve.vl.vsrawzx.vvsvl(<256 x double>, i32, <256 x double>, i32)
224
225; Function Attrs: nounwind readnone
226define fastcc <256 x double> @vsrawzx_vvsl_imm(<256 x double> %0) {
227; CHECK-LABEL: vsrawzx_vvsl_imm:
228; CHECK:       # %bb.0:
229; CHECK-NEXT:    lea %s0, 256
230; CHECK-NEXT:    lvl %s0
231; CHECK-NEXT:    vsra.w.zx %v0, %v0, 8
232; CHECK-NEXT:    b.l.t (, %s10)
233  %2 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvsl(<256 x double> %0, i32 8, i32 256)
234  ret <256 x double> %2
235}
236
237; Function Attrs: nounwind readnone
238define fastcc <256 x double> @vsrawzx_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
239; CHECK-LABEL: vsrawzx_vvsvl_imm:
240; CHECK:       # %bb.0:
241; CHECK-NEXT:    lea %s0, 128
242; CHECK-NEXT:    lvl %s0
243; CHECK-NEXT:    vsra.w.zx %v1, %v0, 8
244; CHECK-NEXT:    lea %s16, 256
245; CHECK-NEXT:    lvl %s16
246; CHECK-NEXT:    vor %v0, (0)1, %v1
247; CHECK-NEXT:    b.l.t (, %s10)
248  %3 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvsvl(<256 x double> %0, i32 8, <256 x double> %1, i32 128)
249  ret <256 x double> %3
250}
251
252; Function Attrs: nounwind readnone
253define fastcc <256 x double> @vsrawzx_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
254; CHECK-LABEL: vsrawzx_vvvmvl:
255; CHECK:       # %bb.0:
256; CHECK-NEXT:    lea %s0, 128
257; CHECK-NEXT:    lvl %s0
258; CHECK-NEXT:    vsra.w.zx %v2, %v0, %v1, %vm1
259; CHECK-NEXT:    lea %s16, 256
260; CHECK-NEXT:    lvl %s16
261; CHECK-NEXT:    vor %v0, (0)1, %v2
262; CHECK-NEXT:    b.l.t (, %s10)
263  %5 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
264  ret <256 x double> %5
265}
266
267; Function Attrs: nounwind readnone
268declare <256 x double> @llvm.ve.vl.vsrawzx.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
269
270; Function Attrs: nounwind readnone
271define fastcc <256 x double> @vsrawzx_vvsmvl(<256 x double> %0, i32 signext %1, <256 x i1> %2, <256 x double> %3) {
272; CHECK-LABEL: vsrawzx_vvsmvl:
273; CHECK:       # %bb.0:
274; CHECK-NEXT:    and %s0, %s0, (32)0
275; CHECK-NEXT:    lea %s1, 128
276; CHECK-NEXT:    lvl %s1
277; CHECK-NEXT:    vsra.w.zx %v1, %v0, %s0, %vm1
278; CHECK-NEXT:    lea %s16, 256
279; CHECK-NEXT:    lvl %s16
280; CHECK-NEXT:    vor %v0, (0)1, %v1
281; CHECK-NEXT:    b.l.t (, %s10)
282  %5 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvsmvl(<256 x double> %0, i32 %1, <256 x i1> %2, <256 x double> %3, i32 128)
283  ret <256 x double> %5
284}
285
286; Function Attrs: nounwind readnone
287declare <256 x double> @llvm.ve.vl.vsrawzx.vvsmvl(<256 x double>, i32, <256 x i1>, <256 x double>, i32)
288
289; Function Attrs: nounwind readnone
290define fastcc <256 x double> @vsrawzx_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
291; CHECK-LABEL: vsrawzx_vvsmvl_imm:
292; CHECK:       # %bb.0:
293; CHECK-NEXT:    lea %s0, 128
294; CHECK-NEXT:    lvl %s0
295; CHECK-NEXT:    vsra.w.zx %v1, %v0, 8, %vm1
296; CHECK-NEXT:    lea %s16, 256
297; CHECK-NEXT:    lvl %s16
298; CHECK-NEXT:    vor %v0, (0)1, %v1
299; CHECK-NEXT:    b.l.t (, %s10)
300  %4 = tail call fast <256 x double> @llvm.ve.vl.vsrawzx.vvsmvl(<256 x double> %0, i32 8, <256 x i1> %1, <256 x double> %2, i32 128)
301  ret <256 x double> %4
302}
303
304; Function Attrs: nounwind readnone
305define fastcc <256 x double> @vsral_vvvl(<256 x double> %0, <256 x double> %1) {
306; CHECK-LABEL: vsral_vvvl:
307; CHECK:       # %bb.0:
308; CHECK-NEXT:    lea %s0, 256
309; CHECK-NEXT:    lvl %s0
310; CHECK-NEXT:    vsra.l %v0, %v0, %v1
311; CHECK-NEXT:    b.l.t (, %s10)
312  %3 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
313  ret <256 x double> %3
314}
315
316; Function Attrs: nounwind readnone
317declare <256 x double> @llvm.ve.vl.vsral.vvvl(<256 x double>, <256 x double>, i32)
318
319; Function Attrs: nounwind readnone
320define fastcc <256 x double> @vsral_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
321; CHECK-LABEL: vsral_vvvvl:
322; CHECK:       # %bb.0:
323; CHECK-NEXT:    lea %s0, 128
324; CHECK-NEXT:    lvl %s0
325; CHECK-NEXT:    vsra.l %v2, %v0, %v1
326; CHECK-NEXT:    lea %s16, 256
327; CHECK-NEXT:    lvl %s16
328; CHECK-NEXT:    vor %v0, (0)1, %v2
329; CHECK-NEXT:    b.l.t (, %s10)
330  %4 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
331  ret <256 x double> %4
332}
333
334; Function Attrs: nounwind readnone
335declare <256 x double> @llvm.ve.vl.vsral.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
336
337; Function Attrs: nounwind readnone
338define fastcc <256 x double> @vsral_vvsl(<256 x double> %0, i64 %1) {
339; CHECK-LABEL: vsral_vvsl:
340; CHECK:       # %bb.0:
341; CHECK-NEXT:    lea %s1, 256
342; CHECK-NEXT:    lvl %s1
343; CHECK-NEXT:    vsra.l %v0, %v0, %s0
344; CHECK-NEXT:    b.l.t (, %s10)
345  %3 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvsl(<256 x double> %0, i64 %1, i32 256)
346  ret <256 x double> %3
347}
348
349; Function Attrs: nounwind readnone
350declare <256 x double> @llvm.ve.vl.vsral.vvsl(<256 x double>, i64, i32)
351
352; Function Attrs: nounwind readnone
353define fastcc <256 x double> @vsral_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
354; CHECK-LABEL: vsral_vvsvl:
355; CHECK:       # %bb.0:
356; CHECK-NEXT:    lea %s1, 128
357; CHECK-NEXT:    lvl %s1
358; CHECK-NEXT:    vsra.l %v1, %v0, %s0
359; CHECK-NEXT:    lea %s16, 256
360; CHECK-NEXT:    lvl %s16
361; CHECK-NEXT:    vor %v0, (0)1, %v1
362; CHECK-NEXT:    b.l.t (, %s10)
363  %4 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
364  ret <256 x double> %4
365}
366
367; Function Attrs: nounwind readnone
368declare <256 x double> @llvm.ve.vl.vsral.vvsvl(<256 x double>, i64, <256 x double>, i32)
369
370; Function Attrs: nounwind readnone
371define fastcc <256 x double> @vsral_vvsl_imm(<256 x double> %0) {
372; CHECK-LABEL: vsral_vvsl_imm:
373; CHECK:       # %bb.0:
374; CHECK-NEXT:    lea %s0, 256
375; CHECK-NEXT:    lvl %s0
376; CHECK-NEXT:    vsra.l %v0, %v0, 8
377; CHECK-NEXT:    b.l.t (, %s10)
378  %2 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvsl(<256 x double> %0, i64 8, i32 256)
379  ret <256 x double> %2
380}
381
382; Function Attrs: nounwind readnone
383define fastcc <256 x double> @vsral_vvsvl_imm(<256 x double> %0, <256 x double> %1) {
384; CHECK-LABEL: vsral_vvsvl_imm:
385; CHECK:       # %bb.0:
386; CHECK-NEXT:    lea %s0, 128
387; CHECK-NEXT:    lvl %s0
388; CHECK-NEXT:    vsra.l %v1, %v0, 8
389; CHECK-NEXT:    lea %s16, 256
390; CHECK-NEXT:    lvl %s16
391; CHECK-NEXT:    vor %v0, (0)1, %v1
392; CHECK-NEXT:    b.l.t (, %s10)
393  %3 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvsvl(<256 x double> %0, i64 8, <256 x double> %1, i32 128)
394  ret <256 x double> %3
395}
396
397; Function Attrs: nounwind readnone
398define fastcc <256 x double> @vsral_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) {
399; CHECK-LABEL: vsral_vvvmvl:
400; CHECK:       # %bb.0:
401; CHECK-NEXT:    lea %s0, 128
402; CHECK-NEXT:    lvl %s0
403; CHECK-NEXT:    vsra.l %v2, %v0, %v1, %vm1
404; CHECK-NEXT:    lea %s16, 256
405; CHECK-NEXT:    lvl %s16
406; CHECK-NEXT:    vor %v0, (0)1, %v2
407; CHECK-NEXT:    b.l.t (, %s10)
408  %5 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128)
409  ret <256 x double> %5
410}
411
412; Function Attrs: nounwind readnone
413declare <256 x double> @llvm.ve.vl.vsral.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32)
414
415; Function Attrs: nounwind readnone
416define fastcc <256 x double> @vsral_vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
417; CHECK-LABEL: vsral_vvsmvl:
418; CHECK:       # %bb.0:
419; CHECK-NEXT:    lea %s1, 128
420; CHECK-NEXT:    lvl %s1
421; CHECK-NEXT:    vsra.l %v1, %v0, %s0, %vm1
422; CHECK-NEXT:    lea %s16, 256
423; CHECK-NEXT:    lvl %s16
424; CHECK-NEXT:    vor %v0, (0)1, %v1
425; CHECK-NEXT:    b.l.t (, %s10)
426  %5 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvsmvl(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)
427  ret <256 x double> %5
428}
429
430; Function Attrs: nounwind readnone
431declare <256 x double> @llvm.ve.vl.vsral.vvsmvl(<256 x double>, i64, <256 x i1>, <256 x double>, i32)
432
433; Function Attrs: nounwind readnone
434define fastcc <256 x double> @vsral_vvsmvl_imm(<256 x double> %0, <256 x i1> %1, <256 x double> %2) {
435; CHECK-LABEL: vsral_vvsmvl_imm:
436; CHECK:       # %bb.0:
437; CHECK-NEXT:    lea %s0, 128
438; CHECK-NEXT:    lvl %s0
439; CHECK-NEXT:    vsra.l %v1, %v0, 8, %vm1
440; CHECK-NEXT:    lea %s16, 256
441; CHECK-NEXT:    lvl %s16
442; CHECK-NEXT:    vor %v0, (0)1, %v1
443; CHECK-NEXT:    b.l.t (, %s10)
444  %4 = tail call fast <256 x double> @llvm.ve.vl.vsral.vvsmvl(<256 x double> %0, i64 8, <256 x i1> %1, <256 x double> %2, i32 128)
445  ret <256 x double> %4
446}
447
448; Function Attrs: nounwind readnone
449define fastcc <256 x double> @pvsra_vvvl(<256 x double> %0, <256 x double> %1) {
450; CHECK-LABEL: pvsra_vvvl:
451; CHECK:       # %bb.0:
452; CHECK-NEXT:    lea %s0, 256
453; CHECK-NEXT:    lvl %s0
454; CHECK-NEXT:    pvsra %v0, %v0, %v1
455; CHECK-NEXT:    b.l.t (, %s10)
456  %3 = tail call fast <256 x double> @llvm.ve.vl.pvsra.vvvl(<256 x double> %0, <256 x double> %1, i32 256)
457  ret <256 x double> %3
458}
459
460; Function Attrs: nounwind readnone
461declare <256 x double> @llvm.ve.vl.pvsra.vvvl(<256 x double>, <256 x double>, i32)
462
463; Function Attrs: nounwind readnone
464define fastcc <256 x double> @pvsra_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) {
465; CHECK-LABEL: pvsra_vvvvl:
466; CHECK:       # %bb.0:
467; CHECK-NEXT:    lea %s0, 128
468; CHECK-NEXT:    lvl %s0
469; CHECK-NEXT:    pvsra %v2, %v0, %v1
470; CHECK-NEXT:    lea %s16, 256
471; CHECK-NEXT:    lvl %s16
472; CHECK-NEXT:    vor %v0, (0)1, %v2
473; CHECK-NEXT:    b.l.t (, %s10)
474  %4 = tail call fast <256 x double> @llvm.ve.vl.pvsra.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128)
475  ret <256 x double> %4
476}
477
478; Function Attrs: nounwind readnone
479declare <256 x double> @llvm.ve.vl.pvsra.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32)
480
481; Function Attrs: nounwind readnone
482define fastcc <256 x double> @pvsra_vvsl(<256 x double> %0, i64 %1) {
483; CHECK-LABEL: pvsra_vvsl:
484; CHECK:       # %bb.0:
485; CHECK-NEXT:    lea %s1, 256
486; CHECK-NEXT:    lvl %s1
487; CHECK-NEXT:    pvsra %v0, %v0, %s0
488; CHECK-NEXT:    b.l.t (, %s10)
489  %3 = tail call fast <256 x double> @llvm.ve.vl.pvsra.vvsl(<256 x double> %0, i64 %1, i32 256)
490  ret <256 x double> %3
491}
492
493; Function Attrs: nounwind readnone
494declare <256 x double> @llvm.ve.vl.pvsra.vvsl(<256 x double>, i64, i32)
495
496; Function Attrs: nounwind readnone
497define fastcc <256 x double> @pvsra_vvsvl(<256 x double> %0, i64 %1, <256 x double> %2) {
498; CHECK-LABEL: pvsra_vvsvl:
499; CHECK:       # %bb.0:
500; CHECK-NEXT:    lea %s1, 128
501; CHECK-NEXT:    lvl %s1
502; CHECK-NEXT:    pvsra %v1, %v0, %s0
503; CHECK-NEXT:    lea %s16, 256
504; CHECK-NEXT:    lvl %s16
505; CHECK-NEXT:    vor %v0, (0)1, %v1
506; CHECK-NEXT:    b.l.t (, %s10)
507  %4 = tail call fast <256 x double> @llvm.ve.vl.pvsra.vvsvl(<256 x double> %0, i64 %1, <256 x double> %2, i32 128)
508  ret <256 x double> %4
509}
510
511; Function Attrs: nounwind readnone
512declare <256 x double> @llvm.ve.vl.pvsra.vvsvl(<256 x double>, i64, <256 x double>, i32)
513
514; Function Attrs: nounwind readnone
515define fastcc <256 x double> @pvsra_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) {
516; CHECK-LABEL: pvsra_vvvMvl:
517; CHECK:       # %bb.0:
518; CHECK-NEXT:    lea %s0, 128
519; CHECK-NEXT:    lvl %s0
520; CHECK-NEXT:    pvsra %v2, %v0, %v1, %vm2
521; CHECK-NEXT:    lea %s16, 256
522; CHECK-NEXT:    lvl %s16
523; CHECK-NEXT:    vor %v0, (0)1, %v2
524; CHECK-NEXT:    b.l.t (, %s10)
525  %5 = tail call fast <256 x double> @llvm.ve.vl.pvsra.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128)
526  ret <256 x double> %5
527}
528
529; Function Attrs: nounwind readnone
530declare <256 x double> @llvm.ve.vl.pvsra.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32)
531
532; Function Attrs: nounwind readnone
533define fastcc <256 x double> @pvsra_vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3) {
534; CHECK-LABEL: pvsra_vvsMvl:
535; CHECK:       # %bb.0:
536; CHECK-NEXT:    lea %s1, 128
537; CHECK-NEXT:    lvl %s1
538; CHECK-NEXT:    pvsra %v1, %v0, %s0, %vm2
539; CHECK-NEXT:    lea %s16, 256
540; CHECK-NEXT:    lvl %s16
541; CHECK-NEXT:    vor %v0, (0)1, %v1
542; CHECK-NEXT:    b.l.t (, %s10)
543  %5 = tail call fast <256 x double> @llvm.ve.vl.pvsra.vvsMvl(<256 x double> %0, i64 %1, <512 x i1> %2, <256 x double> %3, i32 128)
544  ret <256 x double> %5
545}
546
547; Function Attrs: nounwind readnone
548declare <256 x double> @llvm.ve.vl.pvsra.vvsMvl(<256 x double>, i64, <512 x i1>, <256 x double>, i32)
549