/external/libvpx/libvpx/vp9/encoder/mips/msa/ |
D | vp9_fdct16x16_msa.c | 168 ST_SH8(r0, r1, r2, r3, r4, r5, r6, r7, out, 8); in fadst16_transpose_postproc_msa() 178 ST_SH8(r8, r9, r10, r11, r12, r13, r14, r15, out, 8); in fadst16_transpose_postproc_msa() 190 ST_SH8(r0, r1, r2, r3, r4, r5, r6, r7, out, 8); in fadst16_transpose_postproc_msa() 200 ST_SH8(r8, r9, r10, r11, r12, r13, r14, r15, out, 8); in fadst16_transpose_postproc_msa() 348 ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8); in fadst16_transpose_msa() 349 ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8); in fadst16_transpose_msa() 360 ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8); in fadst16_transpose_msa() 361 ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8); in fadst16_transpose_msa() 391 ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, temp, 16); in postproc_fdct16x8_1d_row() 400 ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, out, 16); in postproc_fdct16x8_1d_row() [all …]
|
D | vp9_fdct8x8_msa.c | 65 ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8); in vp9_fht8x8_msa()
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | fwd_txfm_msa.c | 39 ST_SH8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp_ptr, 32); in fdct8x16_1d_column() 155 ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, input, 16); in fdct16x8_1d_row() 163 ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, output, 16); in fdct16x8_1d_row() 166 ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, output + 8, 16); in fdct16x8_1d_row() 215 ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8); in vpx_fdct8x8_msa()
|
D | fwd_dct32x32_msa.c | 269 ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7, output, 8); in fdct8x32_1d_row_load_butterfly() 270 ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 24 * 8), 8); in fdct8x32_1d_row_load_butterfly() 283 ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7, in fdct8x32_1d_row_load_butterfly() 285 ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 16 * 8), 8); in fdct8x32_1d_row_load_butterfly() 306 ST_SH8(vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, interm_ptr, 8); in fdct8x32_1d_row_even_4x() 307 ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, interm_ptr + 64, 8); in fdct8x32_1d_row_even_4x() 626 ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 32); in fdct8x32_1d_row_transpose_store() 640 ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, in fdct8x32_1d_row_transpose_store() 644 ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output + 16, 32); in fdct8x32_1d_row_transpose_store() 658 ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, in fdct8x32_1d_row_transpose_store()
|
D | idct16x16_msa.c | 98 ST_SH8(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14, output, 16); in vpx_idct16_1d_rows_msa() 103 ST_SH8(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15, (output + 8), 16); in vpx_idct16_1d_rows_msa() 318 ST_SH8(l0, l1, l2, l3, l4, l5, l6, l7, output, 16); in vpx_iadst16_1d_rows_msa() 321 ST_SH8(l8, l9, l10, l11, l12, l13, l14, l15, (output + 8), 16); in vpx_iadst16_1d_rows_msa()
|
D | idct32x32_msa.c | 24 ST_SH8(m0, n0, m1, n1, m2, n2, m3, n3, (tmp_buf), 8); in idct32x8_row_transpose_store()
|
D | macros_msa.h | 440 #define ST_SH8(...) ST_H8(v8i16, __VA_ARGS__) macro
|