/external/libvpx/libvpx/vp9/encoder/mips/msa/ |
D | vp9_fdct8x8_msa.c | 18 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_fht8x8_msa() local 20 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa() 22 SLLI_4V(in4, in5, in6, in7, 2); in vp9_fht8x8_msa() 26 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa() 28 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_fht8x8_msa() 29 in3, in4, in5, in6, in7); in vp9_fht8x8_msa() 30 VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa() 34 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in vp9_fht8x8_msa() 36 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_fht8x8_msa() 37 in3, in4, in5, in6, in7); in vp9_fht8x8_msa() [all …]
|
D | vp9_fdct4x4_msa.c | 18 v8i16 in0, in1, in2, in3, in4; in vp9_fwht4x4_msa() local 24 in4 = (in0 - in3) >> 1; in vp9_fwht4x4_msa() 25 SUB2(in4, in1, in4, in2, in1, in2); in vp9_fwht4x4_msa() 33 in4 = (in0 - in1) >> 1; in vp9_fwht4x4_msa() 34 SUB2(in4, in2, in4, in3, in2, in3); in vp9_fwht4x4_msa()
|
D | vp9_fdct_msa.h | 18 #define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \ argument 36 ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \ 38 cnst2_m, cnst3_m, in7, in0, in4, in3); \ 62 ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \
|
/external/libvpx/libvpx/vp9/common/mips/msa/ |
D | vp9_idct8x8_msa.c | 18 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vp9_iht8x8_64_add_msa() local 21 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 23 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa() 24 in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 29 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa() 30 in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 32 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in vp9_iht8x8_64_add_msa() 33 in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() 34 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vp9_iht8x8_64_add_msa() 35 in4, in5, in6, in7); in vp9_iht8x8_64_add_msa() [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | fwd_txfm_msa.c | 15 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_fdct8x8_1_msa() local 18 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_fdct8x8_1_msa() 19 ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6); in vpx_fdct8x8_1_msa() 20 ADD2(in0, in2, in4, in6, in0, in4); in vpx_fdct8x8_1_msa() 22 vec_w += __msa_hadd_s_w(in4, in4); in vpx_fdct8x8_1_msa() 31 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x16_1d_column() local 44 LD_SH16(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in fdct8x16_1d_column() 47 SLLI_4V(in4, in5, in6, in7, 2); in fdct8x16_1d_column() 51 ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7); in fdct8x16_1d_column() 56 SUB4(in4, in11, in5, in10, in6, in9, in7, in8, in11, in10, in9, in8); in fdct8x16_1d_column() [all …]
|
D | idct8x8_msa.c | 15 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in vpx_idct8x8_64_add_msa() local 18 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 21 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa() 22 in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 24 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa() 25 in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 27 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa() 28 in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 30 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in vpx_idct8x8_64_add_msa() 31 in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() [all …]
|
D | fwd_dct32x32_msa.c | 16 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x32_1d_column_load_butterfly() local 23 LD_SH4(input + (28 * src_stride), src_stride, in4, in5, in6, in7); in fdct8x32_1d_column_load_butterfly() 27 SLLI_4V(in4, in5, in6, in7, 2); in fdct8x32_1d_column_load_butterfly() 30 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2, in fdct8x32_1d_column_load_butterfly() 31 step3, in4, in5, in6, in7); in fdct8x32_1d_column_load_butterfly() 35 ST_SH4(in4, in5, in6, in7, temp_buff + (28 * 8), 8); in fdct8x32_1d_column_load_butterfly() 41 LD_SH4(input + (20 * src_stride), src_stride, in4, in5, in6, in7); in fdct8x32_1d_column_load_butterfly() 45 SLLI_4V(in4, in5, in6, in7, 2); in fdct8x32_1d_column_load_butterfly() 48 BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2, in fdct8x32_1d_column_load_butterfly() 49 step3, in4, in5, in6, in7); in fdct8x32_1d_column_load_butterfly() [all …]
|
D | macros_msa.h | 331 #define ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument 334 ST_V4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \ 488 #define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument 492 AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3) \ 1005 #define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument 1009 ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ 1062 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument 1066 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ 1073 #define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \ argument 1077 ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \ [all …]
|
D | inv_txfm_msa.h | 18 #define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \ argument 36 ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \ 38 cnst2_m, cnst3_m, in7, in0, in4, in3); \ 62 ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \ 214 #define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument 241 VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, in0, in4, in2, in6); \ 242 BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m); \ 247 #define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument 268 ILVRL_H2_SH(in5, in4, in_s1, in_s0); \ 298 BUTTERFLY_4(res0_m, res1_m, res3_m, res2_m, out0, in7, in4, in3); \ [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | aggregate_ops_cpu.h | 58 typename TTypes<T>::ConstFlat in4) { 59 Add4EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4); 68 typename TTypes<T>::ConstFlat in4, 70 Add5EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5); 79 typename TTypes<T>::ConstFlat in4, 82 Add6EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6); 91 typename TTypes<T>::ConstFlat in4, 95 Add7EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6, 105 typename TTypes<T>::ConstFlat in3, typename TTypes<T>::ConstFlat in4, 108 Add8EigenImpl<CPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6, [all …]
|
D | aggregate_ops_gpu.cu.cc | 57 typename TTypes<T>::ConstFlat in4) { in operator ()() 58 Add4EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4); in operator ()() 68 typename TTypes<T>::ConstFlat in4, in operator ()() 70 Add5EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5); in operator ()() 80 typename TTypes<T>::ConstFlat in4, in operator ()() 83 Add6EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6); in operator ()() 93 typename TTypes<T>::ConstFlat in4, in operator ()() 97 Add7EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6, in operator ()() 107 typename TTypes<T>::ConstFlat in3, typename TTypes<T>::ConstFlat in4, in operator ()() 110 Add8EigenImpl<GPUDevice, T>::Compute(d, out, in1, in2, in3, in4, in5, in6, in operator ()() [all …]
|
D | aggregate_ops.h | 67 typename TTypes<T>::ConstFlat in4); 76 typename TTypes<T>::ConstFlat in4) { in Compute() 77 out.device(d) = in1 + in2 + in3 + in4; in Compute() 87 typename TTypes<T>::ConstFlat in4, 97 typename TTypes<T>::ConstFlat in4, in Compute() 99 out.device(d) = in1 + in2 + in3 + in4 + in5; in Compute() 109 typename TTypes<T>::ConstFlat in4, 120 typename TTypes<T>::ConstFlat in4, in Compute() 123 out.device(d) = in1 + in2 + in3 + in4 + in5 + in6; in Compute() 133 typename TTypes<T>::ConstFlat in4, [all …]
|
/external/libaom/libaom/av1/encoder/mips/msa/ |
D | fdct4x4_msa.c | 18 v8i16 in0, in1, in2, in3, in4; in av1_fwht4x4_msa() local 24 in4 = (in0 - in3) >> 1; in av1_fwht4x4_msa() 25 SUB2(in4, in1, in4, in2, in1, in2); in av1_fwht4x4_msa() 33 in4 = (in0 - in1) >> 1; in av1_fwht4x4_msa() 34 SUB2(in4, in2, in4, in3, in2, in3); in av1_fwht4x4_msa()
|
/external/deqp-deps/glslang/Test/ |
D | spv.swizzleInversion.frag | 3 in vec4 in4; 8 vec3 v43 = interpolateAtCentroid(in4.wzx); 9 vec2 v42 = interpolateAtSample(in4.zx, 1); 10 vec4 v44 = interpolateAtOffset(in4.zyxw, vec2(2.0)); 11 float v41 = interpolateAtOffset(in4.y, vec2(2.0)); 15 float v31 = interpolateAtOffset(in4.y, vec2(2.0));
|
D | 450.frag | 6 in vec4 in4; 12 vec4 v4 = fwidth(in4); 13 v4 = dFdyFine(in4); 16 v4 = fwidthCoarse(in4) + fwidthFine(in4);
|
D | 400.frag | 69 in vec4 in4; 75 vec4 v4 = fwidthCoarse(in4) + fwidthFine(in4); // ERROR 84 vec4 v4 = fwidthCoarse(in4) + fwidthFine(in4);
|
/external/deqp-deps/glslang/Test/baseResults/ |
D | spv.swizzleInversion.frag.out | 15 Name 12 "in4" 30 12(in4): 11(ptr) Variable Input 53 13: 10(fvec4) ExtInst 1(GLSL.std.450) 76(InterpolateAtCentroid) 12(in4) 56 20: 10(fvec4) ExtInst 1(GLSL.std.450) 77(InterpolateAtSample) 12(in4) 19 59 26: 10(fvec4) ExtInst 1(GLSL.std.450) 78(InterpolateAtOffset) 12(in4) 25 62 33: 32(ptr) AccessChain 12(in4) 31 71 44: 32(ptr) AccessChain 12(in4) 31
|
D | 450.frag.out | 26 0:12 'in4' ( smooth in 4-component vector of float) 30 0:13 'in4' ( smooth in 4-component vector of float) 50 0:16 'in4' ( smooth in 4-component vector of float) 52 0:16 'in4' ( smooth in 4-component vector of float) 160 0:? 'in4' ( smooth in 4-component vector of float) 193 0:12 'in4' ( smooth in 4-component vector of float) 197 0:13 'in4' ( smooth in 4-component vector of float) 217 0:16 'in4' ( smooth in 4-component vector of float) 219 0:16 'in4' ( smooth in 4-component vector of float) 276 0:? 'in4' ( smooth in 4-component vector of float)
|
/external/boringssl/src/crypto/fipsmodule/aes/asm/ |
D | aesp8-ppc.pl | 672 my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13)); 676 my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment 780 lvx_u $in4,$x40,$inp 787 le?vperm $in4,$in4,$in4,$inpperm 794 vxor $out4,$in4,$rndkey0 901 vxor $in4,$in4,v31 918 vncipherlast $out5,$out5,$in4 920 lvx_u $in4,$x40,$inp 925 le?vperm $in4,$in4,$in4,$inpperm 949 vxor $out4,$in4,$rndkey0 [all …]
|
/external/libaom/libaom/aom_dsp/mips/ |
D | macros_msa.h | 423 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument 426 ST_B4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \ 449 #define ST_H8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument 452 ST_H4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \ 616 #define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument 620 AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3) \ 1116 #define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument 1120 ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ 1173 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument 1177 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ [all …]
|
/external/libaom/libaom/aom_dsp/x86/ |
D | fwd_txfm_impl_sse2.h | 56 __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); in FDCT8x8_2D() local 65 in4 = _mm_slli_epi16(in4, 2); in FDCT8x8_2D() 81 const __m128i q3 = ADD_EPI16(in3, in4); in FDCT8x8_2D() 82 const __m128i q4 = SUB_EPI16(in3, in4); in FDCT8x8_2D() 288 in4 = _mm_unpacklo_epi64(tr1_1, tr1_5); in FDCT8x8_2D() 311 const __m128i sign_in4 = _mm_srai_epi16(in4, 15); in FDCT8x8_2D() 319 in4 = _mm_sub_epi16(in4, sign_in4); in FDCT8x8_2D() 327 in4 = _mm_srai_epi16(in4, 1); in FDCT8x8_2D() 336 store_output(&in4, (output + 4 * 8)); in FDCT8x8_2D()
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_dct_ssse3.c | 50 __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); in vp9_fdct8x8_quant_ssse3() local 65 in4 = _mm_slli_epi16(in4, 2); in vp9_fdct8x8_quant_ssse3() 74 in[4] = &in4; in vp9_fdct8x8_quant_ssse3() 90 const __m128i q3 = _mm_add_epi16(in3, in4); in vp9_fdct8x8_quant_ssse3() 91 const __m128i q4 = _mm_sub_epi16(in3, in4); in vp9_fdct8x8_quant_ssse3() 240 in4 = _mm_unpacklo_epi64(tr1_1, tr1_5); in vp9_fdct8x8_quant_ssse3() 263 const __m128i sign_in4 = _mm_srai_epi16(in4, 15); in vp9_fdct8x8_quant_ssse3() 271 in4 = _mm_sub_epi16(in4, sign_in4); in vp9_fdct8x8_quant_ssse3() 279 in4 = _mm_srai_epi16(in4, 1); in vp9_fdct8x8_quant_ssse3()
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | vp8_macros_msa.h | 360 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ argument 363 ST_B4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \ 558 #define VSHF_B3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \ argument 562 out2 = (RTYPE)__msa_vshf_b((v16i8)mask2, (v16i8)in5, (v16i8)in4); \ 953 #define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument 957 ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ 1008 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument 1012 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ 1035 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ argument 1039 ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ [all …]
|
/external/webp/src/dsp/ |
D | msa_macro.h | 310 #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument 313 ST_B4(RTYPE, in4, in5, in6, in7, pdst + 4 * stride, stride); \ 873 #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument 876 ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ 899 #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument 902 ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ 923 #define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument 926 ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \ 984 #define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ argument 987 PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/Reassociate/ |
D | long-chains.ll | 4 define i8 @longchain(i8 %in1, i8 %in2, i8 %in3, i8 %in4, i8 %in5, i8 %in6, i8 %in7, i8 %in8, i8 %in… 7 %tmp3 = add i8 %tmp2, %in4 9 %tmp5 = add i8 %tmp4, %in4
|