/external/libxaac/decoder/armv7/ |
D | ixheaacd_sbr_imdct_using_fft.s | 398 VTRN.32 q12, q5 401 VTRN.32 q9, q2 405 VTRN.32 q0, q7 409 VTRN.32 q14, q4 413 VTRN.32 q13, q6 417 VTRN.32 q10, q3 421 VTRN.32 q1, q8 425 VTRN.32 q15, q11 579 VTRN.32 q0, q4 582 VTRN.32 q2, q6 [all …]
|
D | ixheaacd_imdct_using_fft.s | 394 VTRN.32 q12, q5 396 VTRN.32 q9, q2 400 VTRN.32 q0, q7 404 VTRN.32 q14, q4 408 VTRN.32 q13, q6 412 VTRN.32 q10, q3 416 VTRN.32 q1, q8 420 VTRN.32 q15, q11 570 VTRN.32 q0, q4 573 VTRN.32 q2, q6 [all …]
|
D | ixheaacd_fft32x32_ld.s | 402 VTRN.32 q12, q5 405 VTRN.32 q9, q2 409 VTRN.32 q0, q7 413 VTRN.32 q14, q4 417 VTRN.32 q13, q6 421 VTRN.32 q10, q3 425 VTRN.32 q1, q8 429 VTRN.32 q15, q11 583 VTRN.32 q0, q4 586 VTRN.32 q2, q6 [all …]
|
D | ixheaacd_esbr_cos_sin_mod_loop2.s | 99 VTRN.32 D2, D3
|
/external/libhevc/common/arm/ |
D | ihevc_resi_trans_32x32_a9q.s | 129 VTRN.S32 D30,D31 @ D30 - [0 0] [0 1] [16 0] [16 1] 130 VTRN.S16 D30,D31 @ D31 - [8 0] [8 1] [24 0] [24 1] 249 VTRN.S32 D26,D27 267 VTRN.S16 D4,D5 311 VTRN.32 Q15, Q5 @R1 transpose1 -- dual issue 315 VTRN.32 Q4,Q12 @R2 transpose1 316 VTRN.32 Q8,Q2 @R2 transpose1 328 VTRN.32 Q9, Q13 @R1 transpose1 380 VTRN.32 Q2, Q5 @ 383 VTRN.32 Q9, Q8 @ 1st cycle dual issued [all …]
|
D | ihevc_resi_trans.s | 265 VTRN.16 d10,d11 @ Transpose step 1 271 VTRN.16 d12,d13 @ Transpose step 2 272 VTRN.32 q5,q6 @ Transpose step 3, Residue block transposed 303 VTRN.32 q7,q8 304 VTRN.32 q9,q10 512 VTRN.16 q0,q1 @ Transpose residue matrix step (1a) 514 VTRN.16 q2,q3 @ Transpose residue matrix step (1b) 516 VTRN.16 q4,q5 @ Transpose residue matrix step (1c) 517 VTRN.16 q6,q7 @ Transpose residue matrix step (1d) 518 VTRN.32 q0,q2 @ Transpose residue matrix step (2a) [all …]
|
/external/arm-neon-tests/ |
D | ref-rvct-neon-nofp16.txt | 3047 VTRN/VTRNQ chunk 0 output: 3048 VTRN/VTRNQ:0:result_int8x8 [] = { fffffff0, fffffff1, 11, 11, fffffff2, fffffff3, 11, 11, } 3049 VTRN/VTRNQ:1:result_int16x4 [] = { fffffff0, fffffff1, 22, 22, } 3050 VTRN/VTRNQ:2:result_int32x2 [] = { fffffff0, fffffff1, } 3051 VTRN/VTRNQ:3:result_int64x1 [] = { 3333333333333333, } 3052 VTRN/VTRNQ:4:result_uint8x8 [] = { f0, f1, 55, 55, f2, f3, 55, 55, } 3053 VTRN/VTRNQ:5:result_uint16x4 [] = { fff0, fff1, 66, 66, } 3054 VTRN/VTRNQ:6:result_uint32x2 [] = { fffffff0, fffffff1, } 3055 VTRN/VTRNQ:7:result_uint64x1 [] = { 3333333333333333, } 3056 VTRN/VTRNQ:8:result_poly8x8 [] = { f0, f1, 55, 55, f2, f3, 55, 55, } [all …]
|
D | ref-rvct-neon.txt | 3475 VTRN/VTRNQ chunk 0 output: 3476 VTRN/VTRNQ:0:result_int8x8 [] = { fffffff0, fffffff1, 11, 11, fffffff2, fffffff3, 11, 11, } 3477 VTRN/VTRNQ:1:result_int16x4 [] = { fffffff0, fffffff1, 22, 22, } 3478 VTRN/VTRNQ:2:result_int32x2 [] = { fffffff0, fffffff1, } 3479 VTRN/VTRNQ:3:result_int64x1 [] = { 3333333333333333, } 3480 VTRN/VTRNQ:4:result_uint8x8 [] = { f0, f1, 55, 55, f2, f3, 55, 55, } 3481 VTRN/VTRNQ:5:result_uint16x4 [] = { fff0, fff1, 66, 66, } 3482 VTRN/VTRNQ:6:result_uint32x2 [] = { fffffff0, fffffff1, } 3483 VTRN/VTRNQ:7:result_uint64x1 [] = { 3333333333333333, } 3484 VTRN/VTRNQ:8:result_poly8x8 [] = { f0, f1, 55, 55, f2, f3, 55, 55, } [all …]
|
D | ref-rvct-all.txt | 3475 VTRN/VTRNQ chunk 0 output: 3476 VTRN/VTRNQ:0:result_int8x8 [] = { fffffff0, fffffff1, 11, 11, fffffff2, fffffff3, 11, 11, } 3477 VTRN/VTRNQ:1:result_int16x4 [] = { fffffff0, fffffff1, 22, 22, } 3478 VTRN/VTRNQ:2:result_int32x2 [] = { fffffff0, fffffff1, } 3479 VTRN/VTRNQ:3:result_int64x1 [] = { 3333333333333333, } 3480 VTRN/VTRNQ:4:result_uint8x8 [] = { f0, f1, 55, 55, f2, f3, 55, 55, } 3481 VTRN/VTRNQ:5:result_uint16x4 [] = { fff0, fff1, 66, 66, } 3482 VTRN/VTRNQ:6:result_uint32x2 [] = { fffffff0, fffffff1, } 3483 VTRN/VTRNQ:7:result_uint64x1 [] = { 3333333333333333, } 3484 VTRN/VTRNQ:8:result_poly8x8 [] = { f0, f1, 55, 55, f2, f3, 55, 55, } [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/ARM/ |
D | neon-shuffle-encoding.s | 79 @ VTRN alternate size suffices
|
/external/llvm/test/MC/ARM/ |
D | neon-shuffle-encoding.s | 79 @ VTRN alternate size suffices
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
D | vuzp.ll | 27 ; VUZP.32 is equivalent to VTRN.32 for 64-bit vectors.
|
D | vzip.ll | 27 ; VZIP.32 is equivalent to VTRN.32 for 64-bit vectors.
|
D | vtrn.ll | 99 ; Undef shuffle indices should not prevent matching to VTRN:
|
/external/swiftshader/third_party/LLVM/lib/Target/ARM/ |
D | ARMISelLowering.h | 160 VTRN, // transpose enumerator
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-trn.ll | 108 ; Undef shuffle indices should not prevent matching to VTRN:
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | arm64-trn.ll | 108 ; Undef shuffle indices should not prevent matching to VTRN:
|
/external/llvm/lib/Target/ARM/ |
D | ARMISelLowering.h | 158 VTRN, // transpose enumerator
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/ARM/ |
D | ARMISelLowering.h | 190 VTRN, // transpose enumerator
|
D | ARMScheduleSwift.td | 601 (instregex "VSWP", "VTRN", "VUZP", "VZIP")>;
|
/external/llvm/test/CodeGen/ARM/ |
D | vzip.ll | 67 ; VZIP.32 is equivalent to VTRN.32 for 64-bit vectors.
|
D | vuzp.ll | 67 ; VUZP.32 is equivalent to VTRN.32 for 64-bit vectors.
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | vzip.ll | 68 ; VZIP.32 is equivalent to VTRN.32 for 64-bit vectors.
|
D | vuzp.ll | 68 ; VUZP.32 is equivalent to VTRN.32 for 64-bit vectors.
|
/external/v8/src/arm/ |
D | assembler-arm.cc | 4787 enum NeonSizedOp { VZIP, VUZP, VREV16, VREV32, VREV64, VTRN }; enumerator 4808 case VTRN: in EncodeNeonSizedOp() 4886 emit(EncodeNeonSizedOp(VTRN, NEON_D, size, src1.code(), src2.code())); in vtrn() 4893 emit(EncodeNeonSizedOp(VTRN, NEON_Q, size, src1.code(), src2.code())); in vtrn()
|