Home
last modified time | relevance | path

Searched refs:LD_UB8 (Results 1 – 17 of 17) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/mips/
Dloopfilter_4_msa.c25 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_4_msa()
53 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_4_dual_msa()
85 LD_UB8((src - 4), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_vertical_4_msa()
119 LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); in vpx_lpf_vertical_4_dual_msa()
120 LD_UB8(src - 4 + (8 * pitch), pitch, in vpx_lpf_vertical_4_dual_msa()
Dvpx_convolve_copy_msa.c22 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
51 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
108 LD_UB8(src_tmp, src_stride, in copy_16multx8mult_msa()
129 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width16_msa()
Dloopfilter_8_msa.c29 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_8_msa()
103 LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_8_dual_msa()
177 LD_UB8(src - 4, pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_vertical_8_msa()
261 LD_UB8(temp_src, pitch, p0, p1, p2, p3, row4, row5, row6, row7); in vpx_lpf_vertical_8_dual_msa()
263 LD_UB8(temp_src, pitch, q3, q2, q1, q0, row12, row13, row14, row15); in vpx_lpf_vertical_8_dual_msa()
Dloopfilter_16_msa.c29 LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_hz_lpf_t4_and_t8_16w()
95 LD_UB8((src - 8 * pitch), pitch, p7, p6, p5, p4, p3, p2, p1, p0); in vpx_hz_lpf_t16_16w()
96 LD_UB8(src, pitch, q0, q1, q2, q3, q4, q5, q6, q7); in vpx_hz_lpf_t16_16w()
446 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_16_msa()
657 LD_UB8(input, in_pitch, in transpose_16x8_to_8x16()
681 LD_UB8(input, in_pitch, p7, p6, p5, p4, p3, p2, p1, p0); in transpose_8x16_to_16x8()
682 LD_UB8(input + (8 * in_pitch), in_pitch, q0, q1, q2, q3, q4, q5, q6, q7); in transpose_8x16_to_16x8()
696 LD_UB8(input, in_pitch, row0, row1, row2, row3, row4, row5, row6, row7); in transpose_16x16()
698 LD_UB8(input, in_pitch, in transpose_16x16()
761 LD_UB8(src - (4 * 16), 16, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_vt_lpf_t4_and_t8_8w()
[all …]
Dvpx_convolve_avg_msa.c87 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in avg_width16_msa()
89 LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); in avg_width16_msa()
Dvpx_convolve8_avg_vert_msa.c313 LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); in common_vt_2t_and_aver_dst_4x8_msa()
392 LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8); in common_vt_2t_and_aver_dst_8x8mult_msa()
394 LD_UB8(dst, dst_stride, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8); in common_vt_2t_and_aver_dst_8x8mult_msa()
Dvpx_convolve8_avg_horiz_msa.c74 LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); in common_hz_8t_and_aver_dst_4x8_msa()
363 LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); in common_hz_2t_and_aver_dst_4x8_msa()
Dvpx_convolve8_avg_msa.c318 LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); in common_hv_2ht_2vt_and_aver_dst_4x8_msa()
Dvpx_convolve8_vert_msa.c407 LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8); in common_vt_2t_8x8mult_msa()
Dmacros_msa.h335 #define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__) macro
/external/libvpx/libvpx/vp8/common/mips/msa/
Dloopfilter_filters_msa.c286 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in loop_filter_horizontal_4_dual_msa()
321 LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); in loop_filter_vertical_4_dual_msa()
322 LD_UB8(src - 4 + (8 * pitch), pitch, in loop_filter_vertical_4_dual_msa()
367 LD_UB8(temp_src, pitch, p3, p2, p1, p0, q0, q1, q2, q3); in mbloop_filter_horizontal_edge_y_msa()
395 LD_UB8(temp_src, pitch, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u); in mbloop_filter_horizontal_edge_uv_msa()
397 LD_UB8(temp_src, pitch, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v); in mbloop_filter_horizontal_edge_uv_msa()
448 LD_UB8(temp_src, pitch, row0, row1, row2, row3, row4, row5, row6, row7); in mbloop_filter_vertical_edge_y_msa()
450 LD_UB8(temp_src, pitch, in mbloop_filter_vertical_edge_y_msa()
515 LD_UB8(src_u - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); in mbloop_filter_vertical_edge_uv_msa()
516 LD_UB8(src_v - 4, pitch, in mbloop_filter_vertical_edge_uv_msa()
[all …]
Dcopymem_msa.c43 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_16x16_msa()
45 LD_UB8(src, src_stride, src8, src9, src10, src11, src12, src13, src14, in copy_16x16_msa()
Dpostproc_msa.c271 LD_UB8(p_dst, dst_stride, in postproc_down_across_chroma_msa()
423 LD_UB8(p_dst, dst_stride, in postproc_down_across_luma_msa()
425 LD_UB8(p_dst + 8 * dst_stride, dst_stride, in postproc_down_across_luma_msa()
Dbilinear_filter_msa.c380 LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8); in common_vt_2t_8x8mult_msa()
Dvp8_macros_msa.h324 #define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__) macro
/external/libvpx/libvpx/vp9/encoder/mips/msa/
Dvp9_avg_msa.c20 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vp9_avg_8x8_msa()
/external/libvpx/libvpx/vp8/encoder/mips/msa/
Ddenoising_msa.c321 LD_UB8(sig_start, sig_stride, src0, src1, src2, src3, src4, src5, src6, in vp8_denoiser_filter_msa()
324 LD_UB8(sig_start, sig_stride, src8, src9, src10, src11, src12, src13, in vp8_denoiser_filter_msa()