Home
last modified time | relevance | path

Searched refs:d9 (Results 1 – 25 of 194) sorted by relevance

12345678

/external/llvm/test/MC/ARM/
Dneon-shuffle-encoding.s81 vtrn.8 d3, d9
82 vtrn.i8 d3, d9
83 vtrn.u8 d3, d9
84 vtrn.p8 d3, d9
85 vtrn.16 d3, d9
86 vtrn.i16 d3, d9
87 vtrn.u16 d3, d9
88 vtrn.p16 d3, d9
89 vtrn.32 d3, d9
90 vtrn.i32 d3, d9
[all …]
Dvpush-vpop.s6 vpush {d8, d9, d10, d11, d12}
8 vpop {d8, d9, d10, d11, d12}
11 vpush.s8 {d8, d9, d10, d11, d12}
13 vpop.f32 {d8, d9, d10, d11, d12}
16 @ CHECK-THUMB: vpush {d8, d9, d10, d11, d12} @ encoding: [0x2d,0xed,0x0a,0x8b]
18 @ CHECK-THUMB: vpop {d8, d9, d10, d11, d12} @ encoding: [0xbd,0xec,0x0a,0x8b]
21 @ CHECK-ARM: vpush {d8, d9, d10, d11, d12} @ encoding: [0x0a,0x8b,0x2d,0xed]
23 @ CHECK-ARM: vpop {d8, d9, d10, d11, d12} @ encoding: [0x0a,0x8b,0xbd,0xec]
26 @ CHECK-THUMB: vpush {d8, d9, d10, d11, d12} @ encoding: [0x2d,0xed,0x0a,0x8b]
28 @ CHECK-THUMB: vpop {d8, d9, d10, d11, d12} @ encoding: [0xbd,0xec,0x0a,0x8b]
[all …]
Deh-directive-integrated-test.s40 .vsave {d8, d9, d10, d11, d12}
41 vpush {d8, d9, d10, d11, d12}
45 vpop {d8, d9, d10, d11, d12}
74 .vsave {d8, d9, d10, d11, d12}
75 vpush {d8, d9, d10, d11, d12}
79 vpop {d8, d9, d10, d11, d12}
Dsingle-precision-fp.s9 vnmul.f64 d8, d9, d10
19 @ CHECK-ERRORS-NEXT: vnmul.f64 d8, d9, d10
21 vmla.f64 d11, d10, d9
27 vfnma.f64 d7, d8, d9
30 @ CHECK-ERRORS-NEXT: vmla.f64 d11, d10, d9
42 @ CHECK-ERRORS-NEXT: vfnma.f64 d7, d8, d9
78 vcvt.f64.s32 d9, s8
87 vcvt.u32.f64 d9, d10, #4
93 @ CHECK-ERRORS-NEXT: vcvt.f64.s32 d9, s8
111 @ CHECK-ERRORS-NEXT: vcvt.u32.f64 d9, d10, #4
Dneont2-dup-encoding.s23 vdup.16 q9, d9[0]
29 vdup.16 q9, d9[1]
36 @ CHECK: vdup.16 q9, d9[0] @ encoding: [0xf2,0xff,0x49,0x2c]
42 @ CHECK: vdup.16 q9, d9[1] @ encoding: [0xf6,0xff,0x49,0x2c]
Dneont2-minmax-encoding.s7 vmax.s32 d7, d8, d9
15 vmax.s32 d8, d9
39 @ CHECK: vmax.s32 d7, d8, d9 @ encoding: [0x28,0xef,0x09,0x76]
46 @ CHECK: vmax.s32 d8, d8, d9 @ encoding: [0x28,0xef,0x09,0x86]
69 vmin.s32 d7, d8, d9
77 vmin.s32 d8, d9
101 @ CHECK: vmin.s32 d7, d8, d9 @ encoding: [0x28,0xef,0x19,0x76]
108 @ CHECK: vmin.s32 d8, d8, d9 @ encoding: [0x28,0xef,0x19,0x86]
Dneon-minmax-encoding.s5 vmax.s32 d7, d8, d9
13 vmax.s32 d8, d9
37 @ CHECK: vmax.s32 d7, d8, d9 @ encoding: [0x09,0x76,0x28,0xf2]
44 @ CHECK: vmax.s32 d8, d8, d9 @ encoding: [0x09,0x86,0x28,0xf2]
67 vmin.s32 d7, d8, d9
75 vmin.s32 d8, d9
99 @ CHECK: vmin.s32 d7, d8, d9 @ encoding: [0x19,0x76,0x28,0xf2]
106 @ CHECK: vmin.s32 d8, d8, d9 @ encoding: [0x19,0x86,0x28,0xf2]
/external/libhevc/common/arm/
Dihevc_itrans_recon_32x32.s209 vld1.16 d9,[r0],r6
216 vmlal.s16 q12,d9,d0[3] @// y1 * cos1 + y3 * cos3(part of b0)
217 vmlal.s16 q13,d9,d2[1] @// y1 * cos3 - y3 * sin1(part of b1)
218 vmlal.s16 q14,d9,d3[3] @// y1 * sin3 - y3 * cos1(part of b2)
219 vmlal.s16 q15,d9,d5[1] @// y1 * sin1 - y3 * sin3(part of b3)
278 vld1.16 d9,[r0],r6
286 vmlal.s16 q12,d9,d2[3] @// y1 * cos1 + y3 * cos3(part of b0)
287 vmlsl.s16 q13,d9,d7[3] @// y1 * cos3 - y3 * sin1(part of b1)
288 vmlsl.s16 q14,d9,d2[1] @// y1 * sin3 - y3 * cos1(part of b2)
289 vmlsl.s16 q15,d9,d3[1] @// y1 * sin1 - y3 * sin3(part of b3)
[all …]
Dihevc_itrans_recon_16x16.s232 vld1.16 d9,[r9],r8
275 vmlal.s16 q12,d9,d1[3]
276 vmlsl.s16 q13,d9,d2[3]
277 vmlsl.s16 q14,d9,d0[3]
278 vmlal.s16 q15,d9,d3[3]
312 vld1.16 d9,[r9],r5
335 vmlal.s16 q12,d9,d3[3]
336 vmlsl.s16 q13,d9,d3[1]
337 vmlal.s16 q14,d9,d2[3]
338 vmlsl.s16 q15,d9,d2[1]
[all …]
Dihevc_intra_pred_luma_mode_3_to_9.s201 vsub.s8 d9, d8, d2 @ref_main_idx + 1 (row 0)
205 vtbl.8 d13, {d0,d1}, d9 @load from ref_main_idx + 1 (row 0)
207 vsub.s8 d5, d9, d2 @ref_main_idx + 1 (row 1)
215 vsub.s8 d9, d9, d3 @ref_main_idx + 1 (row 2)
223 vtbl.8 d15, {d0,d1}, d9 @load from ref_main_idx + 1 (row 2)
236 vsub.s8 d9, d9, d3 @ref_main_idx + 1 (row 4)
245 vtbl.8 d13, {d0,d1}, d9 @load from ref_main_idx + 1 (row 4)
258 vsub.s8 d9, d9, d3 @ref_main_idx + 1 (row 6)
267 vtbl.8 d15, {d0,d1}, d9 @load from ref_main_idx + 1 (row 6)
334 vsub.s8 d9, d8, d2 @ref_main_idx - 1
[all …]
Dihevc_intra_pred_filters_luma_mode_11_to_17.s311 vadd.s8 d9, d8, d2 @ref_main_idx + 1 (row 0)
315 vtbl.8 d13, {d0,d1}, d9 @load from ref_main_idx + 1 (row 0)
317 vadd.s8 d5, d9, d2 @ref_main_idx + 1 (row 1)
325 vadd.s8 d9, d9, d3 @ref_main_idx + 1 (row 2)
333 vtbl.8 d15, {d0,d1}, d9 @load from ref_main_idx + 1 (row 2)
346 vadd.s8 d9, d9, d3 @ref_main_idx + 1 (row 4)
355 vtbl.8 d13, {d0,d1}, d9 @load from ref_main_idx + 1 (row 4)
368 vadd.s8 d9, d9, d3 @ref_main_idx + 1 (row 6)
377 vtbl.8 d15, {d0,d1}, d9 @load from ref_main_idx + 1 (row 6)
439 vadd.s8 d9, d2, d8 @ref_main_idx + 1
[all …]
Dihevc_intra_pred_chroma_mode_3_to_9.s191 vmov.i8 d9, #22 @row 0 to 7
195 vadd.s8 d8, d8, d9 @to compensate the pu1_src idx incremented by 8
196 vsub.s8 d9, d8, d29 @ref_main_idx + 1 (row 0)
200 vtbl.8 d13, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 0)
202 vsub.s8 d5, d9, d29 @ref_main_idx + 1 (row 1)
212 vsub.s8 d9, d9, d29 @ref_main_idx + 1 (row 2)
220 vtbl.8 d15, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 2)
233 vsub.s8 d9, d9, d29 @ref_main_idx + 1 (row 4)
242 vtbl.8 d13, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 4)
255 vsub.s8 d9, d9, d29 @ref_main_idx + 1 (row 6)
[all …]
Dihevc_intra_pred_luma_planar.s185 vsub.s8 d9, d2, d8 @(1-8)[nt-1-col]
197 vmlal.u8 q6, d9, d20 @(1)(nt-1-col) * src[2nt-1-row]
211 vmlal.u8 q15, d9, d21 @(2)
228 vmlal.u8 q14, d9, d22 @(3)
245 vmlal.u8 q5, d9, d23 @(4)
262 vmlal.u8 q8, d9, d20 @(5)
278 vmlal.u8 q9, d9, d21 @(6)
295 vmlal.u8 q13, d9, d22 @(7)
311 vmlal.u8 q12, d9, d23 @(8)
337 vsub.s8 d9, d2, d8 @(1n)(1-8)[nt-1-col]
[all …]
Dihevc_intra_pred_chroma_planar.s169 vmov d9,d8
170 vzip.8 d8,d9
172 vsub.s8 d31, d2, d9
198 vmlal.u8 q14,d9,d1
218 vmlal.u8 q12,d9,d1
243 vmlal.u8 q10,d9,d1
265 vmlal.u8 q14,d9,d1
317 vmov d9,d8
318 vzip.8 d8,d9
320 vsub.s8 d31, d2, d9
[all …]
Dihevc_intra_pred_filters_chroma_mode_11_to_17.s309 vadd.s8 d9, d8, d29 @ref_main_idx + 1 (row 0)
313 vtbl.8 d13, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 0)
315 vadd.s8 d5, d9, d29 @ref_main_idx + 1 (row 1)
326 vadd.s8 d9, d9, d29 @ref_main_idx + 1 (row 2)
334 vtbl.8 d15, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 2)
347 vadd.s8 d9, d9, d29 @ref_main_idx + 1 (row 4)
356 vtbl.8 d13, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 4)
369 vadd.s8 d9, d9, d29 @ref_main_idx + 1 (row 6)
380 vtbl.8 d15, {d0,d1,d2,d3}, d9 @load from ref_main_idx + 1 (row 6)
448 vadd.s8 d9, d29, d8 @ref_main_idx + 1
[all …]
Dihevc_inter_pred_chroma_vert_w16out.s142 vld1.8 {d9},[r6],r2 @loads pu1_src
145 vmull.u8 q3,d9,d1 @vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)
153 vmlsl.u8 q2,d9,d0
245 vld1.8 {d9},[r6],r2
255 vmlsl.u8 q13,d9,d3
263 vmlal.u8 q12,d9,d2
292 vld1.8 {d9},[r6],r2
308 vmlsl.u8 q13,d9,d3
322 vmlal.u8 q12,d9,d2
345 vld1.8 {d9},[r6],r2
[all …]
/external/llvm/test/CodeGen/Thumb2/
Daligned-spill.ll17 …tail call void asm sideeffect "", "~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15}"() nounwi…
29 ; NEON: vst1.64 {d8, d9, d10, d11}, [r4:128]!
39 ; NEON: vld1.64 {d8, d9, d10, d11}, [r[[R4]]:128]!
50 tail call void asm sideeffect "", "~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14}"() nounwind
60 ; NEON: vst1.64 {d8, d9, d10, d11}, [r4:128]!
64 ; NEON: vld1.64 {d8, d9, d10, d11},
74 tail call void asm sideeffect "", "~{d8},~{d9},~{d10},~{d12},~{d13},~{d14},~{d15}"() nounwind
87 ; NEON: vst1.64 {d8, d9}, [r4:128]
90 ; NEON: vld1.64 {d8, d9},
/external/llvm/test/CodeGen/ARM/
Dvfp-regs-dwarf.ll5 ; asm("" ::: "d8", "d9", "d11", "d13");
17 ; CHECK: vpush {d8, d9}
21 ; CHECK: .cfi_offset {{265|d9}}, -24
24 ; CHECK: vpop {d8, d9}
27 call void asm sideeffect "", "~{d8},~{d9},~{d11},~{d13}"() #1
/external/llvm/test/MC/Disassembler/ARM/
Dneont2.txt1659 # CHECK: vst1.8 {d8, d9}, [r4]!
1661 # CHECK: vst1.16 {d8, d9}, [r4]!
1663 # CHECK: vst1.32 {d8, d9}, [r4]!
1665 # CHECK: vst1.64 {d8, d9}, [r4]!
1667 # CHECK: vst1.8 {d8, d9}, [r4], r6
1669 # CHECK: vst1.16 {d8, d9}, [r4], r6
1671 # CHECK: vst1.32 {d8, d9}, [r4], r6
1673 # CHECK: vst1.64 {d8, d9}, [r4], r6
1676 # CHECK: vst1.8 {d8, d9, d10}, [r4]!
1678 # CHECK: vst1.16 {d8, d9, d10}, [r4]!
[all …]
Dneon.txt1953 # CHECK: vst1.8 {d8, d9}, [r4]!
1955 # CHECK: vst1.16 {d8, d9}, [r4]!
1957 # CHECK: vst1.32 {d8, d9}, [r4]!
1959 # CHECK: vst1.64 {d8, d9}, [r4]!
1961 # CHECK: vst1.8 {d8, d9}, [r4], r6
1963 # CHECK: vst1.16 {d8, d9}, [r4], r6
1965 # CHECK: vst1.32 {d8, d9}, [r4], r6
1967 # CHECK: vst1.64 {d8, d9}, [r4], r6
1970 # CHECK: vst1.8 {d8, d9, d10}, [r4]!
1972 # CHECK: vst1.16 {d8, d9, d10}, [r4]!
[all …]
/external/libvpx/libvpx/vp8/common/arm/neon/
Dvp8_subpixelvariance16x16_neon.asm58 vld1.u8 {d8, d9, d10}, [r0], r1
76 vmull.u8 q12, d9, d0
82 vext.8 d8, d8, d9, #1
92 vext.8 d9, d9, d10, #1
97 vmlal.u8 q12, d9, d1
115 vld1.u8 {d8, d9, d10}, [r0], r1
129 vmull.u8 q14, d9, d0
133 vext.8 d8, d8, d9, #1
141 vext.8 d9, d9, d10, #1
145 vmlal.u8 q14, d9, d1
[all …]
Dloopfiltersimpleverticaledge_neon.asm30 vld4.8 {d6[0], d7[0], d8[0], d9[0]}, [r0], r12
31 vld4.8 {d6[1], d7[1], d8[1], d9[1]}, [r3], r12
32 vld4.8 {d6[2], d7[2], d8[2], d9[2]}, [r0], r12
33 vld4.8 {d6[3], d7[3], d8[3], d9[3]}, [r3], r12
34 vld4.8 {d6[4], d7[4], d8[4], d9[4]}, [r0], r12
35 vld4.8 {d6[5], d7[5], d8[5], d9[5]}, [r3], r12
36 vld4.8 {d6[6], d7[6], d8[6], d9[6]}, [r0], r12
37 vld4.8 {d6[7], d7[7], d8[7], d9[7]}, [r3], r12
49 vswp d12, d9
71 vsubl.s8 q13, d9, d11
/external/libavc/encoder/arm/
Dime_distortion_metrics_a9q.s101 vld1.8 {d8, d9}, [r0], r2
110 vabal.u8 q1, d11, d9
113 vld1.8 {d8, d9}, [r0], r2
121 vabal.u8 q1, d11, d9
184 vld1.8 {d8, d9}, [r0], r2
193 vabal.u8 q1, d11, d9
196 vld1.8 {d8, d9}, [r0], r2
204 vabal.u8 q1, d11, d9
268 vld1.8 {d8, d9}, [r0], r2
279 vabal.u8 q1, d11, d9
[all …]
Dih264e_evaluate_intra16x16_modes_a9q.s123 vaddl.u8 q15, d8, d9
242 vdup.8 q5, d9[7] @0
244 vdup.8 q6, d9[6] @1
246 vdup.8 q7, d9[5] @2
248 vdup.8 q8, d9[4] @3
250 vdup.8 q9, d9[3] @4
252 vdup.8 q10, d9[2] @5
254 vdup.8 q11, d9[1] @6
256 vdup.8 q12, d9[0] @7
/external/llvm/test/CodeGen/AArch64/
Darm64-register-pairing.ll10 ; CHECK: stp d9, d8, [sp, #48]
22 ; CHECK: ldp d9, d8, [sp, #48]
35 ; CHECK: stp d9, d8, [sp, #48]
47 ; CHECK: ldp d9, d8, [sp, #48]
51 …call void asm sideeffect "mov x0, #42", "~{x0},~{x20},~{x22},~{x24},~{x26},~{x28},~{d9},~{d11},~{d…

12345678