Home
last modified time | relevance | path

Searched refs:vmull (Results 1 – 25 of 122) sorted by relevance

12345

/external/libhevc/common/arm/
Dihevc_itrans_recon_8x8.s193 vmull.s16 q10,d2,d0[0] @// y0 * cos4(part of c0 and c1)
195 vmull.s16 q9,d3,d1[2] @// y2 * sin2 (q3 is freed by this time)(part of d1)
198 vmull.s16 q12,d6,d0[1] @// y1 * cos1(part of b0)
200 vmull.s16 q13,d6,d0[3] @// y1 * cos3(part of b1)
202 vmull.s16 q14,d6,d1[1] @// y1 * sin3(part of b2)
204 vmull.s16 q15,d6,d1[3] @// y1 * sin1(part of b3)
214 vmull.s16 q11,d10,d0[0] @// y4 * cos4(part of c0 and c1)
216 vmull.s16 q3,d3,d0[2] @// y2 * cos2(part of d0)
308 vmull.s16 q12,d6,d0[1] @// y1 * cos1(part of b0)
309 vmull.s16 q13,d6,d0[3] @// y1 * cos3(part of b1)
[all …]
Dihevc_intra_pred_luma_mode_3_to_9.s161 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col)
213 vmull.u8 q12, d12, d7 @mul (row 0)
223 vmull.u8 q11, d16, d7 @mul (row 1)
234 vmull.u8 q10, d14, d7 @mul (row 2)
245 vmull.u8 q9, d10, d7 @mul (row 3)
256 vmull.u8 q12, d12, d7 @mul (row 4)
267 vmull.u8 q11, d16, d7 @mul (row 5)
278 vmull.u8 q10, d14, d7 @mul (row 6)
282 vmull.u8 q9, d10, d7 @mul (row 7)
311 vmull.s8 q6, d30, d31 @(col+1)*intra_pred_angle [0:7](col)
[all …]
Dihevc_inter_pred_chroma_vert_w16out.s150 vmull.u8 q3,d9,d1 @vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)
155 vmull.u8 q2,d4,d1
193 vmull.u8 q2,d7,d1 @vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)
237 vmull.u8 q15,d5,d1 @mul with coeff 1
245 vmull.u8 q14,d6,d1 @mul_res 2
254 vmull.u8 q13,d7,d1
263 vmull.u8 q12,d8,d1
280 vmull.u8 q15,d5,d1 @mul with coeff 1
291 vmull.u8 q14,d6,d1 @mul_res 2
302 vmull.u8 q13,d7,d1
[all …]
Dihevc_itrans_recon_16x16.s243 vmull.s16 q12,d6,d0[1] @// y1 * cos1(part of b0)
244 vmull.s16 q13,d6,d0[3] @// y1 * cos3(part of b1)
245 vmull.s16 q14,d6,d1[1] @// y1 * sin3(part of b2)
246 vmull.s16 q15,d6,d1[3] @// y1 * sin1(part of b3)
258 vmull.s16 q6,d10,d0[0]
260 vmull.s16 q7,d10,d0[0]
262 vmull.s16 q8,d10,d0[0]
264 vmull.s16 q9,d10,d0[0]
420 vmull.s16 q12,d6,d2[1] @// y1 * cos1(part of b0)
421 vmull.s16 q13,d6,d2[3] @// y1 * cos3(part of b1)
[all …]
Dihevc_inter_pred_chroma_vert_w16inp_w16out.s145 vmull.s16 q0,d0,d12 @vmull_s16(src_tmp1, coeff_0)
148 vmull.s16 q4,d2,d12 @vmull_s16(src_tmp2, coeff_0)
192 vmull.s16 q15,d0,d12 @vmull_s16(src_tmp1, coeff_0)
200 vmull.s16 q14,d1,d12 @vmull_s16(src_tmp2, coeff_0)
211 vmull.s16 q13,d2,d12 @vmull_s16(src_tmp2, coeff_0)
221 vmull.s16 q12,d3,d12 @vmull_s16(src_tmp2, coeff_0)
236 vmull.s16 q15,d0,d12 @vmull_s16(src_tmp1, coeff_0)
246 vmull.s16 q14,d1,d12 @vmull_s16(src_tmp2, coeff_0)
257 vmull.s16 q13,d2,d12 @vmull_s16(src_tmp2, coeff_0)
269 vmull.s16 q12,d3,d12 @vmull_s16(src_tmp2, coeff_0)
[all …]
Dihevc_intra_pred_filters_luma_mode_11_to_17.s269 vmull.s8 q11, d30, d31 @(col+1)*intra_pred_angle [0:7](col)
323 vmull.u8 q12, d12, d7 @mul (row 0)
333 vmull.u8 q11, d16, d7 @mul (row 1)
344 vmull.u8 q10, d14, d7 @mul (row 2)
355 vmull.u8 q9, d10, d7 @mul (row 3)
366 vmull.u8 q12, d12, d7 @mul (row 4)
377 vmull.u8 q11, d16, d7 @mul (row 5)
388 vmull.u8 q10, d14, d7 @mul (row 6)
392 vmull.u8 q9, d10, d7 @mul (row 7)
421 vmull.s8 q6, d30, d31 @(col+1)*intra_pred_angle [0:7](col)
[all …]
Dihevc_intra_pred_chroma_mode_27_to_33.s150 vmull.u8 q1,d3,d0 @pos = ((row + 1) * intra_pred_ang)
179 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
190 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
205 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
222 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
240 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
256 vmull.u8 q7,d12,d28 @(vi)vmull_u8(ref_main_idx, dup_const_32_fract)
269 vmull.u8 q9,d16,d26 @(vii)vmull_u8(ref_main_idx, dup_const_32_fract)
281 vmull.u8 q1,d5,d0 @pos = ((row + 1) * intra_pred_ang)
301 vmull.u8 q11,d20,d24 @(viii)vmull_u8(ref_main_idx, dup_const_32_fract)
[all …]
Dihevc_intra_pred_luma_mode_27_to_33.s153 vmull.u8 q1,d3,d0 @pos = ((row + 1) * intra_pred_ang)
181 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
192 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
207 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
223 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
241 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
257 vmull.u8 q7,d12,d28 @(vi)vmull_u8(ref_main_idx, dup_const_32_fract)
270 vmull.u8 q9,d16,d26 @(vii)vmull_u8(ref_main_idx, dup_const_32_fract)
282 vmull.u8 q1,d5,d0 @pos = ((row + 1) * intra_pred_ang)
301 vmull.u8 q11,d20,d24 @(viii)vmull_u8(ref_main_idx, dup_const_32_fract)
[all …]
Dihevc_inter_pred_chroma_vert_w16inp.s145 vmull.s16 q0,d0,d12 @vmull_s16(src_tmp1, coeff_0)
148 vmull.s16 q4,d2,d12 @vmull_s16(src_tmp2, coeff_0)
193 vmull.s16 q15,d0,d12 @vmull_s16(src_tmp1, coeff_0)
201 vmull.s16 q14,d1,d12 @vmull_s16(src_tmp2, coeff_0)
212 vmull.s16 q13,d2,d12 @vmull_s16(src_tmp2, coeff_0)
223 vmull.s16 q12,d3,d12 @vmull_s16(src_tmp2, coeff_0)
239 vmull.s16 q15,d0,d12 @vmull_s16(src_tmp1, coeff_0)
250 vmull.s16 q14,d1,d12 @vmull_s16(src_tmp2, coeff_0)
262 vmull.s16 q13,d2,d12 @vmull_s16(src_tmp2, coeff_0)
275 vmull.s16 q12,d3,d12 @vmull_s16(src_tmp2, coeff_0)
[all …]
Dihevc_intra_pred_filters_luma_mode_19_to_25.s265 vmull.s8 q1,d3,d0 @pos = ((row + 1) * intra_pred_ang)
291 vmull.u8 q5,d8,d30 @(i row)vmull_u8(ref_main_idx, dup_const_32_fract)
301 vmull.u8 q7,d12,d28 @(ii)vmull_u8(ref_main_idx, dup_const_32_fract)
316 vmull.u8 q9,d16,d26 @(iii)vmull_u8(ref_main_idx, dup_const_32_fract)
331 vmull.u8 q11,d20,d24 @(iv)vmull_u8(ref_main_idx, dup_const_32_fract)
348 vmull.u8 q5,d8,d30 @(v)vmull_u8(ref_main_idx, dup_const_32_fract)
363 vmull.u8 q7,d12,d28 @(vi)vmull_u8(ref_main_idx, dup_const_32_fract)
376 vmull.u8 q9,d16,d26 @(vii)vmull_u8(ref_main_idx, dup_const_32_fract)
388 vmull.s8 q1,d5,d0 @pos = ((row + 1) * intra_pred_ang)
408 vmull.u8 q11,d20,d24 @(viii)vmull_u8(ref_main_idx, dup_const_32_fract)
[all …]
Dihevc_inter_pred_chroma_vert.s149 vmull.u8 q3,d9,d1 @vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)
154 vmull.u8 q2,d4,d1
194 vmull.u8 q2,d7,d1 @vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)
238 vmull.u8 q15,d5,d1 @mul with coeff 1
246 vmull.u8 q14,d6,d1 @mul_res 2
256 vmull.u8 q13,d7,d1
266 vmull.u8 q12,d8,d1
285 vmull.u8 q15,d5,d1 @mul with coeff 1
297 vmull.u8 q14,d6,d1 @mul_res 2
310 vmull.u8 q13,d7,d1
[all …]
/external/llvm/test/CodeGen/AArch64/
Darm64-scvt.ll81 %vmull.i = fmul float %val, %val
82 ret float %vmull.i
94 %vmull.i = fmul float %val, %val
95 ret float %vmull.i
107 %vmull.i = fmul float %val, %val
108 ret float %vmull.i
121 %vmull.i = fmul float %val, %val
122 ret float %vmull.i
135 %vmull.i = fmul float %val, %val
136 ret float %vmull.i
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/
Darm64-scvt.ll81 %vmull.i = fmul float %val, %val
82 ret float %vmull.i
94 %vmull.i = fmul float %val, %val
95 ret float %vmull.i
107 %vmull.i = fmul float %val, %val
108 ret float %vmull.i
121 %vmull.i = fmul float %val, %val
122 ret float %vmull.i
135 %vmull.i = fmul float %val, %val
136 ret float %vmull.i
[all …]
/external/swiftshader/third_party/LLVM/test/MC/ARM/
Dneont2-mul-encoding.s41 @ CHECK: vmull.s8 q8, d16, d17 @ encoding: [0xc0,0xef,0xa1,0x0c]
42 vmull.s8 q8, d16, d17
43 @ CHECK: vmull.s16 q8, d16, d17 @ encoding: [0xd0,0xef,0xa1,0x0c]
44 vmull.s16 q8, d16, d17
45 @ CHECK: vmull.s32 q8, d16, d17 @ encoding: [0xe0,0xef,0xa1,0x0c]
46 vmull.s32 q8, d16, d17
47 @ CHECK: vmull.u8 q8, d16, d17 @ encoding: [0xc0,0xff,0xa1,0x0c]
48 vmull.u8 q8, d16, d17
49 @ CHECK: vmull.u16 q8, d16, d17 @ encoding: [0xd0,0xff,0xa1,0x0c]
50 vmull.u16 q8, d16, d17
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/ARM/
Dneont2-mul-encoding.s54 vmull.s8 q8, d16, d17
55 vmull.s16 q8, d16, d17
56 vmull.s32 q8, d16, d17
57 vmull.u8 q8, d16, d17
58 vmull.u16 q8, d16, d17
59 vmull.u32 q8, d16, d17
60 vmull.p8 q8, d16, d17
62 @ CHECK: vmull.s8 q8, d16, d17 @ encoding: [0xc0,0xef,0xa1,0x0c]
63 @ CHECK: vmull.s16 q8, d16, d17 @ encoding: [0xd0,0xef,0xa1,0x0c]
64 @ CHECK: vmull.s32 q8, d16, d17 @ encoding: [0xe0,0xef,0xa1,0x0c]
[all …]
Dneon-mul-encoding.s82 vmull.s8 q8, d16, d17
83 vmull.s16 q8, d16, d17
84 vmull.s32 q8, d16, d17
85 vmull.u8 q8, d16, d17
86 vmull.u16 q8, d16, d17
87 vmull.u32 q8, d16, d17
88 vmull.p8 q8, d16, d17
90 @ CHECK: vmull.s8 q8, d16, d17 @ encoding: [0xa1,0x0c,0xc0,0xf2]
91 @ CHECK: vmull.s16 q8, d16, d17 @ encoding: [0xa1,0x0c,0xd0,0xf2]
92 @ CHECK: vmull.s32 q8, d16, d17 @ encoding: [0xa1,0x0c,0xe0,0xf2]
[all …]
/external/llvm/test/MC/ARM/
Dneont2-mul-encoding.s54 vmull.s8 q8, d16, d17
55 vmull.s16 q8, d16, d17
56 vmull.s32 q8, d16, d17
57 vmull.u8 q8, d16, d17
58 vmull.u16 q8, d16, d17
59 vmull.u32 q8, d16, d17
60 vmull.p8 q8, d16, d17
62 @ CHECK: vmull.s8 q8, d16, d17 @ encoding: [0xc0,0xef,0xa1,0x0c]
63 @ CHECK: vmull.s16 q8, d16, d17 @ encoding: [0xd0,0xef,0xa1,0x0c]
64 @ CHECK: vmull.s32 q8, d16, d17 @ encoding: [0xe0,0xef,0xa1,0x0c]
[all …]
Dneon-mul-encoding.s82 vmull.s8 q8, d16, d17
83 vmull.s16 q8, d16, d17
84 vmull.s32 q8, d16, d17
85 vmull.u8 q8, d16, d17
86 vmull.u16 q8, d16, d17
87 vmull.u32 q8, d16, d17
88 vmull.p8 q8, d16, d17
90 @ CHECK: vmull.s8 q8, d16, d17 @ encoding: [0xa1,0x0c,0xc0,0xf2]
91 @ CHECK: vmull.s16 q8, d16, d17 @ encoding: [0xa1,0x0c,0xd0,0xf2]
92 @ CHECK: vmull.s32 q8, d16, d17 @ encoding: [0xa1,0x0c,0xe0,0xf2]
[all …]
/external/libmpeg2/common/arm/
Dimpeg2_idct.s448 vmull.s16 q10, d2, d0[0] @// y0 * cos4(part of c0 and c1)
450 vmull.s16 q9, d3, d1[2] @// y2 * sin2 (Q3 is freed by this time)(part of d1)
453 vmull.s16 q12, d6, d0[1] @// y1 * cos1(part of b0)
455 vmull.s16 q13, d6, d0[3] @// y1 * cos3(part of b1)
457 vmull.s16 q14, d6, d1[1] @// y1 * sin3(part of b2)
459 vmull.s16 q15, d6, d1[3] @// y1 * sin1(part of b3)
469 vmull.s16 q11, d10, d0[0] @// y4 * cos4(part of c0 and c1)
471 vmull.s16 q3, d3, d0[2] @// y2 * cos2(part of d0)
567 vmull.s16 q12, d6, d0[1] @// y1 * cos1(part of b0)
568 vmull.s16 q13, d6, d0[3] @// y1 * cos3(part of b1)
[all …]
/external/capstone/suite/MC/ARM/
Dneont2-mul-encoding.s.cs22 0xc0,0xef,0xa1,0x0c = vmull.s8 q8, d16, d17
23 0xd0,0xef,0xa1,0x0c = vmull.s16 q8, d16, d17
24 0xe0,0xef,0xa1,0x0c = vmull.s32 q8, d16, d17
25 0xc0,0xff,0xa1,0x0c = vmull.u8 q8, d16, d17
26 0xd0,0xff,0xa1,0x0c = vmull.u16 q8, d16, d17
27 0xe0,0xff,0xa1,0x0c = vmull.u32 q8, d16, d17
28 0xc0,0xef,0xa1,0x0e = vmull.p8 q8, d16, d17
Dneon-mul-encoding.s.cs36 0xa1,0x0c,0xc0,0xf2 = vmull.s8 q8, d16, d17
37 0xa1,0x0c,0xd0,0xf2 = vmull.s16 q8, d16, d17
38 0xa1,0x0c,0xe0,0xf2 = vmull.s32 q8, d16, d17
39 0xa1,0x0c,0xc0,0xf3 = vmull.u8 q8, d16, d17
40 0xa1,0x0c,0xd0,0xf3 = vmull.u16 q8, d16, d17
41 0xa1,0x0c,0xe0,0xf3 = vmull.u32 q8, d16, d17
42 0xa1,0x0e,0xc0,0xf2 = vmull.p8 q8, d16, d17
/external/llvm/test/CodeGen/ARM/
D2012-08-23-legalize-vmull.ll4 ; Test generataion of code for vmull instruction when multiplying 128-bit
7 ; The vmull operation requires 64-bit vectors, so we must extend the original
8 ; vector size to 64 bits for vmull operation.
10 ; for vmull.
20 ;CHECK: vmull
33 ;CHECK: vmull
46 ;CHECK: vmull
64 ;CHECK: vmull
80 ;CHECK: vmull
96 ;CHECK: vmull
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/
D2012-08-23-legalize-vmull.ll4 ; Test generataion of code for vmull instruction when multiplying 128-bit
7 ; The vmull operation requires 64-bit vectors, so we must extend the original
8 ; vector size to 64 bits for vmull operation.
10 ; for vmull.
20 ;CHECK: vmull
33 ;CHECK: vmull
46 ;CHECK: vmull
64 ;CHECK: vmull
80 ;CHECK: vmull
96 ;CHECK: vmull
[all …]
/external/boringssl/ios-arm/crypto/fipsmodule/
Dghash-armv4.S450 vmull.p8 q8, d16, d6 @ F = A1*B
452 vmull.p8 q0, d26, d0 @ E = A*B1
454 vmull.p8 q9, d18, d6 @ H = A2*B
456 vmull.p8 q11, d26, d22 @ G = A*B2
459 vmull.p8 q10, d20, d6 @ J = A3*B
462 vmull.p8 q0, d26, d0 @ I = A*B3
468 vmull.p8 q11, d26, d22 @ K = A*B4
479 vmull.p8 q0, d26, d6 @ D = A*B
488 vmull.p8 q8, d16, d6 @ F = A1*B
490 vmull.p8 q1, d28, d2 @ E = A*B1
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/
Dvmul.ll152 ;CHECK: vmull.s8
163 ;CHECK: vmull.s8
172 ;CHECK: vmull.s16
183 ;CHECK: vmull.s16
192 ;CHECK: vmull.s32
203 ;CHECK: vmull.s32
212 ;CHECK: vmull.u8
223 ;CHECK: vmull.u8
232 ;CHECK: vmull.u16
243 ;CHECK: vmull.u16
[all …]

12345