/external/libvpx/libvpx/vp9/encoder/mips/msa/ |
D | vp9_fdct4x4_msa.c | 18 v8i16 in0, in1, in2, in3, in4; in vp9_fwht4x4_msa() local 20 LD_SH4(input, src_stride, in0, in1, in2, in3); in vp9_fwht4x4_msa() 23 in3 -= in2; in vp9_fwht4x4_msa() 25 SUB2(in4, in1, in4, in2, in1, in2); in vp9_fwht4x4_msa() 26 in0 -= in2; in vp9_fwht4x4_msa() 29 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in vp9_fwht4x4_msa() 31 in0 += in2; in vp9_fwht4x4_msa() 34 SUB2(in4, in2, in4, in3, in2, in3); in vp9_fwht4x4_msa() 36 in1 += in2; in vp9_fwht4x4_msa() 38 SLLI_4V(in0, in1, in2, in3, 2); in vp9_fwht4x4_msa() [all …]
|
D | vp9_fdct8x8_msa.c | 18 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_fht8x8_msa() local 20 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa() 21 SLLI_4V(in0, in1, in2, in3, 2); in vp9_fht8x8_msa() 26 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_fht8x8_msa() 27 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa() 28 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_fht8x8_msa() 29 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa() 30 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_fht8x8_msa() 31 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa() 34 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_fht8x8_msa() [all …]
|
D | vp9_fdct_msa.h | 18 #define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \ argument 47 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ 51 cnst1_m, cnst2_m, cnst3_m, in5, in2, \ 53 BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \ 72 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ 84 #define VP9_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) { \ argument 90 UNPCK_R_SH_SW(in2, in2_r_m); \
|
/external/libvpx/libvpx/vp9/common/mips/msa/ |
D | vp9_idct4x4_msa.c | 18 v8i16 in0, in1, in2, in3; in vp9_iht4x4_16_add_msa() local 21 LD4x4_SH(input, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 22 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 27 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 29 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 30 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 34 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 36 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 37 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() 41 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vp9_iht4x4_16_add_msa() [all …]
|
D | vp9_idct8x8_msa.c | 18 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_iht8x8_64_add_msa() local 21 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 23 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_iht8x8_64_add_msa() 24 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 29 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_iht8x8_64_add_msa() 30 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 32 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_iht8x8_64_add_msa() 33 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 34 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in vp9_iht8x8_64_add_msa() 35 in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() [all …]
|
/external/libchrome/crypto/ |
D | curve25519-donna.c | 73 static void fproduct(limb *output, const limb *in2, const limb *in) { in fproduct() argument 74 output[0] = ((limb) ((s32) in2[0])) * ((s32) in[0]); in fproduct() 75 output[1] = ((limb) ((s32) in2[0])) * ((s32) in[1]) + in fproduct() 76 ((limb) ((s32) in2[1])) * ((s32) in[0]); in fproduct() 77 output[2] = 2 * ((limb) ((s32) in2[1])) * ((s32) in[1]) + in fproduct() 78 ((limb) ((s32) in2[0])) * ((s32) in[2]) + in fproduct() 79 ((limb) ((s32) in2[2])) * ((s32) in[0]); in fproduct() 80 output[3] = ((limb) ((s32) in2[1])) * ((s32) in[2]) + in fproduct() 81 ((limb) ((s32) in2[2])) * ((s32) in[1]) + in fproduct() 82 ((limb) ((s32) in2[0])) * ((s32) in[3]) + in fproduct() [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | idct4x4_msa.c | 15 v8i16 in0, in1, in2, in3; in vpx_iwht4x4_16_add_msa() local 19 LD4x4_SH(input, in0, in2, in3, in1); in vpx_iwht4x4_16_add_msa() 20 TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); in vpx_iwht4x4_16_add_msa() 22 UNPCK_R_SH_SW(in2, in2_r); in vpx_iwht4x4_16_add_msa() 46 in0, in1, in2, in3); in vpx_iwht4x4_16_add_msa() 47 ADDBLK_ST4x4_UB(in0, in3, in1, in2, dst, dst_stride); in vpx_iwht4x4_16_add_msa() 72 v8i16 in0, in1, in2, in3; in vpx_idct4x4_16_add_msa() local 75 LD4x4_SH(input, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa() 77 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa() 78 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); in vpx_idct4x4_16_add_msa() [all …]
|
D | fwd_txfm_msa.c | 16 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x16_1d_column() local 29 in0, in1, in2, in3, in4, in5, in6, in7, in fdct8x16_1d_column() 31 SLLI_4V(in0, in1, in2, in3, 2); in fdct8x16_1d_column() 35 ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3); in fdct8x16_1d_column() 40 SUB4(in0, in15, in1, in14, in2, in13, in3, in12, in15, in14, in13, in12); in fdct8x16_1d_column() 135 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct16x8_1d_row() local 138 LD_SH8(input, 16, in0, in1, in2, in3, in4, in5, in6, in7); in fdct16x8_1d_row() 140 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in fdct16x8_1d_row() 141 in0, in1, in2, in3, in4, in5, in6, in7); in fdct16x8_1d_row() 144 ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3); in fdct16x8_1d_row() [all …]
|
D | idct8x8_msa.c | 15 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_idct8x8_64_add_msa() local 18 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 21 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vpx_idct8x8_64_add_msa() 22 in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 24 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in vpx_idct8x8_64_add_msa() 25 in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 27 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in vpx_idct8x8_64_add_msa() 28 in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 30 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in vpx_idct8x8_64_add_msa() 31 in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() [all …]
|
D | macros_msa.h | 268 #define SW4(in0, in1, in2, in3, pdst, stride) { \ argument 271 SW(in2, (pdst) + 2 * stride); \ 282 #define SD4(in0, in1, in2, in3, pdst, stride) { \ argument 285 SD(in2, (pdst) + 2 * stride); \ 406 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) { \ argument 408 ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ 412 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument 414 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ 430 #define ST_H4(RTYPE, in0, in1, in2, in3, pdst, stride) { \ argument 432 ST_H2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ [all …]
|
D | fwd_dct32x32_msa.c | 16 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x32_1d_column_load_butterfly() local 22 LD_SH4(input, src_stride, in0, in1, in2, in3); in fdct8x32_1d_column_load_butterfly() 26 SLLI_4V(in0, in1, in2, in3, 2); in fdct8x32_1d_column_load_butterfly() 30 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, in fdct8x32_1d_column_load_butterfly() 40 LD_SH4(input + (8 * src_stride), src_stride, in0, in1, in2, in3); in fdct8x32_1d_column_load_butterfly() 44 SLLI_4V(in0, in1, in2, in3, 2); in fdct8x32_1d_column_load_butterfly() 48 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, in fdct8x32_1d_column_load_butterfly() 59 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x32_1d_column_even_store() local 65 LD_SH4(input, 8, in0, in1, in2, in3); in fdct8x32_1d_column_even_store() 67 BUTTERFLY_8(in0, in1, in2, in3, in12, in13, in14, in15, in fdct8x32_1d_column_even_store() [all …]
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | dct_msa.c | 14 #define TRANSPOSE4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \ argument 18 ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ 20 ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ 71 v8i16 in0, in1, in2, in3; in vp8_short_fdct4x4_msa() local 78 LD_SH4(input, pitch / 2, in0, in1, in2, in3); in vp8_short_fdct4x4_msa() 79 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp8_short_fdct4x4_msa() 81 BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); in vp8_short_fdct4x4_msa() 84 in2 = temp0 - temp1; in vp8_short_fdct4x4_msa() 95 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); in vp8_short_fdct4x4_msa() 97 BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); in vp8_short_fdct4x4_msa() [all …]
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | vp8_macros_msa.h | 256 #define SW4(in0, in1, in2, in3, pdst, stride) \ argument 260 SW(in2, (pdst) + 2 * stride); \ 271 #define SD4(in0, in1, in2, in3, pdst, stride) \ argument 275 SD(in2, (pdst) + 2 * stride); \ 370 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \ argument 373 ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ 378 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument 381 ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ 568 #define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ argument 571 out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \ [all …]
|
D | idct_msa.c | 18 #define TRANSPOSE_TWO_4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \ argument 22 TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, s4_m, s5_m, s6_m, s7_m); \ 45 #define VP8_IDCT_1D_H(in0, in1, in2, in3, out0, out1, out2, out3) \ argument 52 a1_m = in0 + in2; \ 53 b1_m = in0 - in2; \ 67 #define VP8_IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) \ argument 75 a1_m = in0 + in2; \ 76 b1_m = in0 - in2; \ 91 v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; in idct4x4_addblk_msa() local 100 UNPCK_SH_SW(input1, in2, in3); in idct4x4_addblk_msa() [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | fwd_txfm_sse2.c | 49 __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); in vpx_fdct8x8_1_sse2() local 54 u1 = _mm_add_epi16(in2, in3); in vpx_fdct8x8_1_sse2() 58 in2 = _mm_load_si128((const __m128i *)(input + 6 * stride)); in vpx_fdct8x8_1_sse2() 64 in2 = _mm_add_epi16(in2, in3); in vpx_fdct8x8_1_sse2() 68 sum = _mm_add_epi16(sum, in2); in vpx_fdct8x8_1_sse2() 88 __m128i in0, in1, in2, in3; in vpx_fdct16x16_1_sse2() local 97 in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); in vpx_fdct16x16_1_sse2() 101 u1 = _mm_add_epi16(in2, in3); in vpx_fdct16x16_1_sse2() 106 in2 = _mm_load_si128((const __m128i *)(input + 6 * stride)); in vpx_fdct16x16_1_sse2() 111 u1 = _mm_add_epi16(in2, in3); in vpx_fdct16x16_1_sse2() [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | fcmp64.ll | 7 double addrspace(1)* %in2) { 9 %r1 = load double, double addrspace(1)* %in2 19 double addrspace(1)* %in2) { 21 %r1 = load double, double addrspace(1)* %in2 31 double addrspace(1)* %in2) { 33 %r1 = load double, double addrspace(1)* %in2 43 double addrspace(1)* %in2) { 45 %r1 = load double, double addrspace(1)* %in2 55 double addrspace(1)* %in2) { 57 %r1 = load double, double addrspace(1)* %in2 [all …]
|
D | frem.ll | 17 float addrspace(1)* %in2) #0 { 18 %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4 36 float addrspace(1)* %in2) #1 { 37 %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4 57 double addrspace(1)* %in2) #0 { 59 %r1 = load double, double addrspace(1)* %in2, align 8 73 double addrspace(1)* %in2) #1 { 75 %r1 = load double, double addrspace(1)* %in2, align 8 82 <2 x float> addrspace(1)* %in2) #0 { 83 %gep2 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in2, i32 4 [all …]
|
D | fmul64.ll | 7 double addrspace(1)* %in2) { 9 %r1 = load double, double addrspace(1)* %in2 19 <2 x double> addrspace(1)* %in2) { 21 %r1 = load <2 x double>, <2 x double> addrspace(1)* %in2 33 <4 x double> addrspace(1)* %in2) { 35 %r1 = load <4 x double>, <4 x double> addrspace(1)* %in2
|
D | image-resource-id.ll | 71 %opencl.image2d_t addrspace(1)* %in2, ; read_only 86 %opencl.image2d_t addrspace(1)* %in2, ; read_only 90 %opencl.image2d_t addrspace(1)* %in2) #0 101 %opencl.image3d_t addrspace(1)* %in2, ; read_only 116 %opencl.image3d_t addrspace(1)* %in2, ; read_only 120 %opencl.image3d_t addrspace(1)* %in2) #0 133 %opencl.image2d_t addrspace(1)* %in2, ; write_only 148 %opencl.image2d_t addrspace(1)* %in2, ; write_only 152 %opencl.image2d_t addrspace(1)* %in2) #0 163 %opencl.image3d_t addrspace(1)* %in2, ; write_only [all …]
|
/external/webrtc/webrtc/common_audio/signal_processing/ |
D | resample.c | 312 static void WebRtcSpl_DotProdIntToInt(const int32_t* in1, const int32_t* in2, in WebRtcSpl_DotProdIntToInt() argument 322 tmp2 += coef * in2[-0]; in WebRtcSpl_DotProdIntToInt() 326 tmp2 += coef * in2[-1]; in WebRtcSpl_DotProdIntToInt() 330 tmp2 += coef * in2[-2]; in WebRtcSpl_DotProdIntToInt() 334 tmp2 += coef * in2[-3]; in WebRtcSpl_DotProdIntToInt() 338 tmp2 += coef * in2[-4]; in WebRtcSpl_DotProdIntToInt() 342 tmp2 += coef * in2[-5]; in WebRtcSpl_DotProdIntToInt() 346 tmp2 += coef * in2[-6]; in WebRtcSpl_DotProdIntToInt() 350 tmp2 += coef * in2[-7]; in WebRtcSpl_DotProdIntToInt() 354 *out2 = tmp2 + coef * in2[-8]; in WebRtcSpl_DotProdIntToInt() [all …]
|
D | resample_fractional.c | 147 static void WebRtcSpl_ResampDotProduct(const int32_t *in1, const int32_t *in2, in WebRtcSpl_ResampDotProduct() argument 157 tmp2 += coef * in2[-0]; in WebRtcSpl_ResampDotProduct() 161 tmp2 += coef * in2[-1]; in WebRtcSpl_ResampDotProduct() 165 tmp2 += coef * in2[-2]; in WebRtcSpl_ResampDotProduct() 169 tmp2 += coef * in2[-3]; in WebRtcSpl_ResampDotProduct() 173 tmp2 += coef * in2[-4]; in WebRtcSpl_ResampDotProduct() 177 tmp2 += coef * in2[-5]; in WebRtcSpl_ResampDotProduct() 181 tmp2 += coef * in2[-6]; in WebRtcSpl_ResampDotProduct() 185 tmp2 += coef * in2[-7]; in WebRtcSpl_ResampDotProduct() 189 *out2 = tmp2 + coef * in2[-8]; in WebRtcSpl_ResampDotProduct()
|
/external/opencv3/modules/cudalegacy/src/cuda/ |
D | NCVAlg.hpp | 73 static __device__ __inline__ void reduce(volatile T &in1out, const volatile T &in2) in reduce() 75 in1out += in2; in reduce() 88 static __device__ __inline__ void reduce(volatile T &in1out, const volatile T &in2) in reduce() 90 in1out = in1out > in2 ? in2 : in1out; in reduce() 103 static __device__ __inline__ void reduce(volatile T &in1out, const volatile T &in2) in reduce() 105 in1out = in1out > in2 ? in1out : in2; in reduce()
|
/external/caliper/caliper/src/main/java/com/google/caliper/util/ |
D | LinearTranslation.java | 36 public LinearTranslation(double in1, double out1, double in2, double out2) { in LinearTranslation() argument 37 if (Math.abs(in1 - in2) < EQUALITY_TOLERANCE) { in LinearTranslation() 40 double divisor = in1 - in2; in LinearTranslation() 42 this.b = (in1 * out2 - in2 * out1) / divisor; in LinearTranslation()
|
/external/boringssl/src/crypto/aes/asm/ |
D | aesv8-armx.pl | 506 my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9)); 515 vorr $in2,$dat2,$dat2 522 vorr $in2,$dat2,$dat2 562 vorr $ivec,$in2,$in2 584 vld1.8 {$in2},[$inp],#16 599 vorr $dat2,$in2,$in2 648 vorr $ivec,$in2,$in2 655 vorr $ivec,$in2,$in2 682 my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9)); 771 vld1.8 {$in2},[$inp],#16 [all …]
|
/external/vulkan-validation-layers/libs/glm/detail/ |
D | intrinsic_matrix.inl | 39 __m128 const in2[4], 43 out[0] = _mm_mul_ps(in1[0], in2[0]); 44 out[1] = _mm_mul_ps(in1[1], in2[1]); 45 out[2] = _mm_mul_ps(in1[2], in2[2]); 46 out[3] = _mm_mul_ps(in1[3], in2[3]); 49 GLM_FUNC_QUALIFIER void sse_add_ps(__m128 const in1[4], __m128 const in2[4], __m128 out[4]) 52 out[0] = _mm_add_ps(in1[0], in2[0]); 53 out[1] = _mm_add_ps(in1[1], in2[1]); 54 out[2] = _mm_add_ps(in1[2], in2[2]); 55 out[3] = _mm_add_ps(in1[3], in2[3]); [all …]
|