/external/compiler-rt/lib/builtins/arm/ |
D | addsf3vfp.S | 21 vmov s14, r0 // move first param from r0 into float register 23 vadd.f32 s14, s14, s15 24 vmov r0, s14 // move result back to r0
|
D | subsf3vfp.S | 22 vmov s14, r0 // move first param from r0 into float register 24 vsub.f32 s14, s14, s15 25 vmov r0, s14 // move result back to r0
|
D | mulsf3vfp.S | 21 vmov s14, r0 // move first param from r0 into float register 23 vmul.f32 s13, s14, s15
|
D | divsf3vfp.S | 21 vmov s14, r0 // move first param from r0 into float register 23 vdiv.f32 s13, s14, s15
|
D | eqsf2vfp.S | 22 vmov s14, r0 // move from GPR 0 to float register 24 vcmp.f32 s14, s15
|
D | gesf2vfp.S | 22 vmov s14, r0 // move from GPR 0 to float register 24 vcmp.f32 s14, s15
|
D | ltsf2vfp.S | 22 vmov s14, r0 // move from GPR 0 to float register 24 vcmp.f32 s14, s15
|
D | unordsf2vfp.S | 22 vmov s14, r0 // move from GPR 0 to float register 24 vcmp.f32 s14, s15
|
D | gtsf2vfp.S | 22 vmov s14, r0 // move from GPR 0 to float register 24 vcmp.f32 s14, s15
|
D | lesf2vfp.S | 22 vmov s14, r0 // move from GPR 0 to float register 24 vcmp.f32 s14, s15
|
D | nesf2vfp.S | 22 vmov s14, r0 // move from GPR 0 to float register 24 vcmp.f32 s14, s15
|
/external/llvm/test/MC/AArch64/ |
D | neon-scalar-shift-imm.s | 73 sqshl s14, s17, #22 86 uqshl s14, s19, #18 99 sqshlu s16, s14, #25 136 uqshrn h10, s14, #5 169 sqshrun h20, s14, #3
|
D | neon-scalar-recip.s | 34 frecpe s19, s14
|
D | neon-scalar-mul.s | 51 sqdmlsl s14, h12, h25
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | itrans16_dspr2.c | 1061 int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15; in iadst16_dspr2() local 1103 s14 = x14 * cospi_29_64 + x15 * cospi_3_64; in iadst16_dspr2() 1112 x6 = dct_const_round_shift(s6 + s14); in iadst16_dspr2() 1120 x14 = dct_const_round_shift(s6 - s14); in iadst16_dspr2() 1138 s14 = -x14 * cospi_12_64 + x15 * cospi_20_64; in iadst16_dspr2() 1151 x10 = dct_const_round_shift(s10 + s14); in iadst16_dspr2() 1155 x14 = dct_const_round_shift(s10 - s14); in iadst16_dspr2() 1173 s14 = -x14 * cospi_24_64 + x15 * cospi_8_64; in iadst16_dspr2() 1188 x12 = dct_const_round_shift(s12 + s14); in iadst16_dspr2() 1190 x14 = dct_const_round_shift(s12 - s14); in iadst16_dspr2() [all …]
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_dct.c | 348 tran_high_t s9, s10, s11, s12, s13, s14, s15; in fadst16() local 382 s14 = x14 * cospi_29_64 + x15 * cospi_3_64; in fadst16() 391 x6 = fdct_round_shift(s6 + s14); in fadst16() 399 x14 = fdct_round_shift(s6 - s14); in fadst16() 417 s14 = -x14 * cospi_12_64 + x15 * cospi_20_64; in fadst16() 430 x10 = fdct_round_shift(s10 + s14); in fadst16() 434 x14 = fdct_round_shift(s10 - s14); in fadst16() 452 s14 = -x14 * cospi_24_64 + x15 * cospi_8_64; in fadst16() 467 x12 = fdct_round_shift(s12 + s14); in fadst16() 469 x14 = fdct_round_shift(s12 - s14); in fadst16() [all …]
|
/external/llvm/test/MC/ARM/ |
D | fp-armv8.s | 76 vselvs.f32 s21, s16, s14 77 @ CHECK: vselvs.f32 s21, s16, s14 @ encoding: [0x07,0xaa,0x58,0xfe] 104 vrintxvs.f32 s10, s14 105 @ CHECK: vrintxvs.f32 s10, s14 @ encoding: [0x47,0x5a,0xb7,0x6e]
|
D | thumb-fp-armv8.s | 79 vselvs.f32 s21, s16, s14 80 @ CHECK: vselvs.f32 s21, s16, s14 @ encoding: [0x58,0xfe,0x07,0xaa] 110 vrintxvs.f32 s10, s14 111 @ CHECK: vrintxvs.f32 s10, s14 @ encoding: [0xb7,0xee,0x47,0x5a]
|
D | fullfp16.s | 157 vselvs.f16 s21, s16, s14 158 @ ARM: vselvs.f16 s21, s16, s14 @ encoding: [0x07,0xa9,0x58,0xfe] 159 @ THUMB: vselvs.f16 s21, s16, s14 @ encoding: [0x58,0xfe,0x07,0xa9] 177 vrintx.f16 s10, s14 178 @ ARM: vrintx.f16 s10, s14 @ encoding: [0x47,0x59,0xb7,0xee] 179 @ THUMB: vrintx.f16 s10, s14 @ encoding: [0xb7,0xee,0x47,0x59]
|
/external/boringssl/src/crypto/curve25519/ |
D | curve25519.c | 3807 int64_t s14 = 2097151 & (load_4(s + 36) >> 6); in x25519_sc_reduce() local 3838 s14 -= s23 * 997805; in x25519_sc_reduce() 3847 s14 += s22 * 136657; in x25519_sc_reduce() 3856 s14 -= s21 * 683901; in x25519_sc_reduce() 3895 carry14 = (s14 + (1 << 20)) >> 21; in x25519_sc_reduce() 3897 s14 -= carry14 << 21; in x25519_sc_reduce() 3912 s14 += carry13; in x25519_sc_reduce() 3942 s2 += s14 * 666643; in x25519_sc_reduce() 3943 s3 += s14 * 470296; in x25519_sc_reduce() 3944 s4 += s14 * 654183; in x25519_sc_reduce() [all …]
|
/external/clang/test/Sema/ |
D | bitfield-layout_1.c | 143 } s14; variable 144 CHECK_SIZE(s14,6)
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | highbd_vpx_convolve8_neon.c | 272 int16x8_t s11, s12, s13, s14; in vpx_highbd_convolve8_horiz_neon() local 302 &s12, &s13, &s14); in vpx_highbd_convolve8_horiz_neon() 303 transpose_s16_8x8(&s7, &s8, &s9, &s10, &s11, &s12, &s13, &s14); in vpx_highbd_convolve8_horiz_neon() 312 d7 = convolve8_8(s7, s8, s9, s10, s11, s12, s13, s14, filters, max); in vpx_highbd_convolve8_horiz_neon() 323 s6 = s14; in vpx_highbd_convolve8_horiz_neon() 492 int16x8_t s11, s12, s13, s14; in vpx_highbd_convolve8_avg_horiz_neon() local 522 &s12, &s13, &s14); in vpx_highbd_convolve8_avg_horiz_neon() 523 transpose_s16_8x8(&s7, &s8, &s9, &s10, &s11, &s12, &s13, &s14); in vpx_highbd_convolve8_avg_horiz_neon() 532 d7 = convolve8_8(s7, s8, s9, s10, s11, s12, s13, s14, filters, max); in vpx_highbd_convolve8_avg_horiz_neon() 553 s6 = s14; in vpx_highbd_convolve8_avg_horiz_neon()
|
D | loopfilter_neon.c | 549 uint8x##w##_t *s14, uint8x##w##_t *s15) { \ 578 *s14 = vld1##r##u8(s); \ 755 const uint8x16_t s13, const uint8x16_t s14, in store_16x16() argument 785 vst1q_u8(s, s14); in store_16x16() 845 uint8x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, in vpx_lpf_vertical_4_dual_neon() local 851 &s11, &s12, &s13, &s14, &s15); in vpx_lpf_vertical_4_dual_neon() 853 s14, s15, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); in vpx_lpf_vertical_4_dual_neon() 923 uint8x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, in vpx_lpf_vertical_8_dual_neon() local 930 &s11, &s12, &s13, &s14, &s15); in vpx_lpf_vertical_8_dual_neon() 932 s14, s15, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); in vpx_lpf_vertical_8_dual_neon() [all …]
|
/external/libvpx/libvpx/vpx_dsp/ |
D | inv_txfm.c | 388 tran_high_t s9, s10, s11, s12, s13, s14, s15; in iadst16_c() local 427 s14 = x14 * cospi_29_64 + x15 * cospi_3_64; in iadst16_c() 436 x6 = WRAPLOW(dct_const_round_shift(s6 + s14)); in iadst16_c() 444 x14 = WRAPLOW(dct_const_round_shift(s6 - s14)); in iadst16_c() 462 s14 = -x14 * cospi_12_64 + x15 * cospi_20_64; in iadst16_c() 475 x10 = WRAPLOW(dct_const_round_shift(s10 + s14)); in iadst16_c() 479 x14 = WRAPLOW(dct_const_round_shift(s10 - s14)); in iadst16_c() 497 s14 = -x14 * cospi_24_64 + x15 * cospi_8_64; in iadst16_c() 512 x12 = WRAPLOW(dct_const_round_shift(s12 + s14)); in iadst16_c() 514 x14 = WRAPLOW(dct_const_round_shift(s12 - s14)); in iadst16_c() [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | remat-float0.ll | 15 …2},~{s3},~{s4},~{s5},~{s6},~{s7},~{s8},~{s9},~{s10},~{s11},~{s12},~{s13},~{s14},~{s15},~{s16},~{s1…
|