Home
last modified time | relevance | path

Searched refs:vmovl (Results 1 – 25 of 62) sorted by relevance

123

/external/llvm/test/CodeGen/ARM/
Dvcvt-cost.ll9 ; CHECK: vmovl.s8
10 ; CHECK: vmovl.s16
11 ; CHECK: vmovl.s16
25 ; CHECK: vmovl.u8
26 ; CHECK: vmovl.u16
27 ; CHECK: vmovl.u16
55 ; CHECK: vmovl.s16
56 ; CHECK: vmovl.s16
57 ; CHECK: vmovl.s16
58 ; CHECK: vmovl.s16
[all …]
D2012-08-09-neon-extload.ll22 ; CHECK: vmovl.s8 {{q[0-9]+}}, d[[LOAD]]
23 ; CHECK: vmovl.s16 {{q[0-9]+}}, {{d[0-9]+}}
36 ; CHECK: vmovl.s8 {{q[0-9]+}}, d[[LOAD]]
37 ; CHECK: vmovl.s16 {{q[0-9]+}}, {{d[0-9]+}}
38 ; CHECK: vmovl.s32 {{q[0-9]+}}, {{d[0-9]+}}
54 ; CHECK: vmovl.s8 {{q[0-9]+}}, d[[LOAD]]
55 ; CHECK-NOT: vmovl.s16
69 ; CHECK: vmovl.s8 {{q[0-9]+}}, d[[LOAD]]
70 ; CHECK: vmovl.s16 {{q[0-9]+}}, {{d[0-9]+}}
83 ; CHECK: vmovl.s16 {{q[0-9]+}}, d[[LOAD]]
[all …]
Dbig-endian-neon-extend.ll7 ; CHECK-NEXT: vmovl.u8 [[QREG:q[0-9]+]], [[REG]]
8 ; CHECK-NEXT: vmovl.u16 [[QREG]], [[REG]]
9 ; CHECK-NEXT: vmovl.u32 [[QREG]], [[REG]]
22 ; CHECK-NEXT: vmovl.u16 [[QREG:q[0-9]+]], [[REG]]
23 ; CHECK-NEXT: vmovl.u32 [[QREG]], [[REG]]
37 ; CHECK-NEXT: vmovl.u8 [[QREG:q[0-9]+]], [[REG]]
38 ; CHECK-NEXT: vmovl.u16 [[QREG]], [[REG]]
52 ; CHECK-NEXT: vmovl.u16 [[QREG:q[0-9]+]], [[REG]]
66 ; CHECK-NEXT: vmovl.u8 [[QREG:q[0-9]+]], [[REG]]
67 ; CHECK-NEXT: vmovl.u16 [[QREG]], [[REG]]
[all …]
Dvector-extend-narrow.ll6 ; CHECK: vmovl.u16
26 ; CHECK: vmovl.u8
27 ; CHECK: vmovl.u16
55 ; CHECK: vmovl.s8
56 ; CHECK: vmovl.s16
68 ; CHECK: vmovl.u8
69 ; CHECK: vmovl.u16
Dint-to-fp.ll6 ; CHECK: vmovl.s16
14 ; CHECK: vmovl.u16
Dpopcnt.ll26 ; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
38 ; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
50 ; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
53 ; CHECK: vmovl.u16 {{q[0-9]+}}, {{d[0-9]+}}
65 ; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
68 ; CHECK: vmovl.u16 {{q[0-9]+}}, {{d[0-9]+}}
Dvshll.ll86 ; And these have a shift just out of range so separate vmovl and vshl
90 ; CHECK: vmovl.u8
100 ; CHECK: vmovl.s16
110 ; CHECK: vmovl.u32
Dvmov.ll193 ;CHECK: vmovl.s8
201 ;CHECK: vmovl.s16
209 ;CHECK: vmovl.s32
217 ;CHECK: vmovl.u8
225 ;CHECK: vmovl.u16
233 ;CHECK: vmovl.u32
385 ; Vector any_extends must be selected as either vmovl.u or vmovl.s.
390 ;CHECK: vmovl
/external/libhevc/common/arm/
Dihevc_inter_pred_chroma_copy_w16out.s134 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp)
142 vmovl.u8 q11,d22 @vmovl_u8(vld1_u8(pu1_src_tmp)
145 vmovl.u8 q12,d24 @vmovl_u8(vld1_u8(pu1_src_tmp)
150 vmovl.u8 q13,d26 @vmovl_u8(vld1_u8(pu1_src_tmp)
175 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp)
183 vmovl.u8 q11,d22 @vmovl_u8(vld1_u8(pu1_src_tmp)
186 vmovl.u8 q12,d24 @vmovl_u8(vld1_u8(pu1_src_tmp)
212 vmovl.u8 q8,d8 @vmovl_u8(vld1_u8(pu1_src_tmp))
213 vmovl.u8 q9,d10 @vmovl_u8(vld1_u8(pu1_src_tmp)
214 vmovl.u8 q10,d12 @vmovl_u8(vld1_u8(pu1_src_tmp)
[all …]
Dihevc_inter_pred_luma_copy_w16out.s104 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp)
112 vmovl.u8 q11,d22 @vmovl_u8(vld1_u8(pu1_src_tmp)
115 vmovl.u8 q12,d24 @vmovl_u8(vld1_u8(pu1_src_tmp)
120 vmovl.u8 q13,d26 @vmovl_u8(vld1_u8(pu1_src_tmp)
152 vmovl.u8 q8,d8 @vmovl_u8(vld1_u8(pu1_src_tmp))
153 vmovl.u8 q9,d10 @vmovl_u8(vld1_u8(pu1_src_tmp)
154 vmovl.u8 q10,d12 @vmovl_u8(vld1_u8(pu1_src_tmp)
155 vmovl.u8 q11,d14 @vmovl_u8(vld1_u8(pu1_src_tmp)
182 vmovl.u8 q8,d8 @vmovl_u8(vld1_u8(pu1_src_tmp))
185 vmovl.u8 q9,d10 @vmovl_u8(vld1_u8(pu1_src_tmp)
[all …]
Dihevc_deblk_chroma_horz.s64 vmovl.u8 q0,d0
74 vmovl.u8 q1,d2
85 vmovl.u8 q2,d4
97 vmovl.u8 q8,d16
Dihevc_deblk_luma_vert.s468 vmovl.s8 q8,d20
469 vmovl.u8 q9,d2
473 vmovl.u8 q9,d4
508 vmovl.u8 q9,d3
509 vmovl.s8 q8,d16
565 vmovl.u8 q8,d5
566 vmovl.s8 q1,d3
/external/llvm/test/MC/ARM/
Dneont2-mov-encoding.s70 vmovl.s8 q8, d16
71 vmovl.s16 q8, d16
72 vmovl.s32 q8, d16
73 vmovl.u8 q8, d16
74 vmovl.u16 q8, d16
75 vmovl.u32 q8, d16
89 @ CHECK: vmovl.s8 q8, d16 @ encoding: [0xc8,0xef,0x30,0x0a]
90 @ CHECK: vmovl.s16 q8, d16 @ encoding: [0xd0,0xef,0x30,0x0a]
91 @ CHECK: vmovl.s32 q8, d16 @ encoding: [0xe0,0xef,0x30,0x0a]
92 @ CHECK: vmovl.u8 q8, d16 @ encoding: [0xc8,0xff,0x30,0x0a]
[all …]
Dneon-mov-encoding.s67 vmovl.s8 q8, d16
68 vmovl.s16 q8, d16
69 vmovl.s32 q8, d16
70 vmovl.u8 q8, d16
71 vmovl.u16 q8, d16
72 vmovl.u32 q8, d16
74 @ CHECK: vmovl.s8 q8, d16 @ encoding: [0x30,0x0a,0xc8,0xf2]
75 @ CHECK: vmovl.s16 q8, d16 @ encoding: [0x30,0x0a,0xd0,0xf2]
76 @ CHECK: vmovl.s32 q8, d16 @ encoding: [0x30,0x0a,0xe0,0xf2]
77 @ CHECK: vmovl.u8 q8, d16 @ encoding: [0x30,0x0a,0xc8,0xf3]
[all …]
/external/libavc/common/arm/
Dih264_weighted_bi_pred_a9q.s174 vmovl.u8 q2, d4 @converting rows 1,2 in source 1 to 16-bit
177 vmovl.u8 q3, d6 @converting rows 1,2 in source 2 to 16-bit
181 vmovl.u8 q4, d8 @converting rows 3,4 in source 1 to 16-bit
182 vmovl.u8 q5, d10 @converting rows 3,4 in source 2 to 16-bit
214 vmovl.u8 q2, d4 @converting row 1 in source 1 to 16-bit
217 vmovl.u8 q3, d6 @converting row 1 in source 2 to 16-bit
221 vmovl.u8 q4, d8 @converting row 2 in source 1 to 16-bit
222 vmovl.u8 q5, d10 @converting row 2 in source 2 to 16-bit
226 vmovl.u8 q6, d12 @converting row 3 in source 1 to 16-bit
227 vmovl.u8 q7, d14 @converting row 3 in source 2 to 16-bit
[all …]
Dih264_weighted_pred_a9q.s141 vmovl.u8 q2, d4 @converting rows 1,2 to 16-bit
142 vmovl.u8 q3, d6 @converting rows 3,4 to 16-bit
171 vmovl.u8 q2, d4 @converting row 1 to 16-bit
173 vmovl.u8 q3, d6 @converting row 2 to 16-bit
175 vmovl.u8 q4, d8 @converting row 3 to 16-bit
177 vmovl.u8 q5, d10 @converting row 4 to 16-bit
210 vmovl.u8 q6, d4 @converting row 1L to 16-bit
212 vmovl.u8 q7, d5 @converting row 1H to 16-bit
215 vmovl.u8 q8, d6 @converting row 2L to 16-bit
217 vmovl.u8 q9, d7 @converting row 2H to 16-bit
[all …]
Dih264_intra_pred_chroma_a9q.s122 vmovl.u8 q1, d3
123 vmovl.u8 q0, d0
146 vmovl.u8 q1, d0
147 vmovl.u8 q2, d1
159 vmovl.u8 q1, d0
160 vmovl.u8 q2, d1
/external/libvpx/libvpx/vpx_dsp/arm/
Dvpx_convolve8_neon_asm.asm87 vmovl.u8 q8, d24
88 vmovl.u8 q9, d25
89 vmovl.u8 q10, d26
90 vmovl.u8 q11, d27
117 vmovl.u8 q12, d28
118 vmovl.u8 q13, d29
200 vmovl.u8 q8, d16
201 vmovl.u8 q9, d18
202 vmovl.u8 q10, d20
203 vmovl.u8 q11, d22
[all …]
Dvpx_convolve8_avg_neon_asm.asm87 vmovl.u8 q8, d24
88 vmovl.u8 q9, d25
89 vmovl.u8 q10, d26
90 vmovl.u8 q11, d27
117 vmovl.u8 q12, d28
118 vmovl.u8 q13, d29
211 vmovl.u8 q8, d16
212 vmovl.u8 q9, d18
213 vmovl.u8 q10, d20
214 vmovl.u8 q11, d22
[all …]
Dintrapred_neon_asm.asm313 vmovl.u8 q1, d2
314 vmovl.u8 q2, d4
325 vmovl.u8 q1, d2
326 vmovl.u8 q2, d4
355 vmovl.u8 q10, d30
431 vmovl.u8 q10, d18
491 vmovl.u8 q10, d18
527 vmovl.u8 q3, d26
621 vmovl.u8 q3, d0
/external/llvm/test/CodeGen/AArch64/
Darm64-neon-3vdiff.ll57 %vmovl.i.i = sext <8 x i8> %a to <8 x i16>
58 %vmovl.i2.i = sext <8 x i8> %b to <8 x i16>
59 %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i
67 %vmovl.i.i = sext <4 x i16> %a to <4 x i32>
68 %vmovl.i2.i = sext <4 x i16> %b to <4 x i32>
69 %add.i = add <4 x i32> %vmovl.i.i, %vmovl.i2.i
77 %vmovl.i.i = sext <2 x i32> %a to <2 x i64>
78 %vmovl.i2.i = sext <2 x i32> %b to <2 x i64>
79 %add.i = add <2 x i64> %vmovl.i.i, %vmovl.i2.i
87 %vmovl.i.i = zext <8 x i8> %a to <8 x i16>
[all …]
Darm64-AnInfiniteLoopInDAGCombine.ll21 %vmovl.i.i = ashr <4 x i32> %sext, <i32 16, i32 16, i32 16, i32 16>
22 ret <4 x i32> %vmovl.i.i
Dfold-constants.ll31 %vmovl.i4.i = sext <8 x i1> %cmp.i603 to <8 x i16>
32 ret <8 x i16> %vmovl.i4.i
Darm64-vadd.ll166 %vmovl.i.i.i = sext <8 x i8> %tmp1 to <8 x i16>
170 %vmovl.i.i5.i = sext <8 x i8> %tmp3 to <8 x i16>
171 %add.i = add <8 x i16> %vmovl.i.i.i, %vmovl.i.i5.i
182 %vmovl.i.i.i = sext <4 x i16> %tmp1 to <4 x i32>
186 %vmovl.i.i5.i = sext <4 x i16> %tmp3 to <4 x i32>
187 %add.i = add <4 x i32> %vmovl.i.i.i, %vmovl.i.i5.i
198 %vmovl.i.i.i = sext <2 x i32> %tmp1 to <2 x i64>
202 %vmovl.i.i5.i = sext <2 x i32> %tmp3 to <2 x i64>
203 %add.i = add <2 x i64> %vmovl.i.i.i, %vmovl.i.i5.i
248 %vmovl.i.i.i = zext <8 x i8> %tmp1 to <8 x i16>
[all …]
/external/libmpeg2/common/arm/
Dimpeg2_idct.s228 vmovl.s16 q4, d2
229 vmovl.s16 q5, d3
238 vmovl.s16 q4, d2
239 vmovl.s16 q5, d3
248 vmovl.s16 q4, d2
249 vmovl.s16 q5, d3
258 vmovl.s16 q4, d2
259 vmovl.s16 q5, d3
268 vmovl.s16 q4, d2
269 vmovl.s16 q5, d3
[all …]

123