/external/libvpx/libvpx/vpx_dsp/mips/ |
D | fwd_dct32x32_msa.c | 61 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in fdct8x32_1d_column_even_store() local 71 BUTTERFLY_8(in4, in5, in6, in7, in8, in9, in10, in11, vec4, vec5, vec6, vec7, in fdct8x32_1d_column_even_store() 75 ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3); in fdct8x32_1d_column_even_store() 87 SUB4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, vec7, vec6, vec5, vec4); in fdct8x32_1d_column_even_store() 89 ADD2(vec4, vec5, vec7, vec6, vec0, vec1); in fdct8x32_1d_column_even_store() 95 SUB2(vec4, vec5, vec7, vec6, vec4, vec7); in fdct8x32_1d_column_even_store() 96 DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0); in fdct8x32_1d_column_even_store() 105 ADD2(in0, in1, in2, in3, vec0, vec7); in fdct8x32_1d_column_even_store() 106 DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0); in fdct8x32_1d_column_even_store() 290 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in fdct8x32_1d_row_even_4x() local [all …]
|
D | vpx_convolve8_avg_horiz_msa.c | 147 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_8t_and_aver_dst_16w_msa() local 171 VSHF_B4_SH(src3, src3, mask0, mask1, mask2, mask3, vec3, vec7, vec11, in common_hz_8t_and_aver_dst_16w_msa() 177 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, vec1, in common_hz_8t_and_aver_dst_16w_msa() 201 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_8t_and_aver_dst_32w_msa() local 227 VSHF_B4_SH(src3, src3, mask0, mask1, mask2, mask3, vec3, vec7, vec11, in common_hz_8t_and_aver_dst_32w_msa() 233 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, vec1, in common_hz_8t_and_aver_dst_32w_msa() 256 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_8t_and_aver_dst_64w_msa() local 284 VSHF_B4_SH(src3, src3, mask0, mask1, mask2, mask3, vec3, vec7, vec11, in common_hz_8t_and_aver_dst_64w_msa() 290 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, in common_hz_8t_and_aver_dst_64w_msa() 340 v8u16 vec4, vec5, vec6, vec7, filt; in common_hz_2t_and_aver_dst_4x8_msa() local [all …]
|
D | vpx_convolve8_avg_vert_msa.c | 358 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_and_aver_dst_8x8mult_msa() local 377 vec7); in common_vt_2t_and_aver_dst_8x8mult_msa() 385 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, tmp0, tmp1, in common_vt_2t_and_aver_dst_8x8mult_msa() 414 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_vt_2t_and_aver_dst_16w_msa() local 437 ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); in common_vt_2t_and_aver_dst_16w_msa() 448 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); in common_vt_2t_and_aver_dst_16w_msa() 464 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_and_aver_dst_32w_msa() local 493 ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); in common_vt_2t_and_aver_dst_32w_msa() 498 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); in common_vt_2t_and_aver_dst_32w_msa() 513 ILVL_B2_UB(src8, src7, src9, src8, vec5, vec7); in common_vt_2t_and_aver_dst_32w_msa() [all …]
|
D | vpx_convolve8_horiz_msa.c | 344 v8u16 vec4, vec5, vec6, vec7, filt; in common_hz_2t_4x8_msa() local 356 vec6, vec7); in common_hz_2t_4x8_msa() 357 SRARI_H4_UH(vec4, vec5, vec6, vec7, FILTER_BITS); in common_hz_2t_4x8_msa() 358 PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2, in common_hz_2t_4x8_msa() 476 v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_2t_16w_msa() local 494 VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); in common_hz_2t_16w_msa() 497 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, in common_hz_2t_16w_msa() 518 VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); in common_hz_2t_16w_msa() 521 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, in common_hz_2t_16w_msa() 541 v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_2t_32w_msa() local [all …]
|
D | vpx_convolve8_vert_msa.c | 394 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_8x8mult_msa() local 413 vec7); in common_vt_2t_8x8mult_msa() 421 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, tmp0, tmp1, in common_vt_2t_8x8mult_msa() 447 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_16w_msa() local 470 ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); in common_vt_2t_16w_msa() 481 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); in common_vt_2t_16w_msa() 495 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_32w_msa() local 523 ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); in common_vt_2t_32w_msa() 528 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); in common_vt_2t_32w_msa() 543 ILVL_B2_UB(src8, src7, src9, src8, vec5, vec7); in common_vt_2t_32w_msa() [all …]
|
D | loopfilter_8_msa.c | 242 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in vpx_lpf_vertical_8_dual_msa() local 316 ILVRL_H2_SH(vec1, vec0, vec6, vec7); in vpx_lpf_vertical_8_dual_msa() 329 ST4x4_UB(vec7, vec7, 0, 1, 2, 3, src, pitch); in vpx_lpf_vertical_8_dual_msa()
|
D | vpx_convolve8_msa.c | 272 v8u16 hz_out7, hz_out8, vec4, vec5, vec6, vec7, filt; in common_hv_2ht_2vt_4x8_msa() local 299 vec5, vec6, vec7); in common_hv_2ht_2vt_4x8_msa() 300 SRARI_H4_UH(vec4, vec5, vec6, vec7, FILTER_BITS); in common_hv_2ht_2vt_4x8_msa() 301 PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2, in common_hv_2ht_2vt_4x8_msa()
|
D | sub_pixel_variance_msa.c | 471 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in sub_pixel_sse_diff_16width_h_msa() local 489 VSHF_B2_UH(src6, src6, src7, src7, mask, mask, vec6, vec7); in sub_pixel_sse_diff_16width_h_msa() 492 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, in sub_pixel_sse_diff_16width_h_msa() 641 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in sub_pixel_sse_diff_16width_v_msa() local 666 ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); in sub_pixel_sse_diff_16width_v_msa() 674 DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); in sub_pixel_sse_diff_16width_v_msa() 1053 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in subpel_avg_ssediff_16w_h_msa() local 1073 VSHF_B2_UH(src6, src6, src7, src7, mask, mask, vec6, vec7); in subpel_avg_ssediff_16w_h_msa() 1076 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, in subpel_avg_ssediff_16w_h_msa() 1252 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in subpel_avg_ssediff_16w_v_msa() local [all …]
|
D | loopfilter_16_msa.c | 1164 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in vpx_vt_lpf_t16_16w() local 1172 ILVRL_H2_SH(vec1, vec0, vec6, vec7); in vpx_vt_lpf_t16_16w() 1185 ST4x4_UB(vec7, vec7, 0, 1, 2, 3, src_org, pitch); in vpx_vt_lpf_t16_16w()
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | bilinear_filter_msa.c | 56 v8u16 vec4, vec5, vec6, vec7, filt; in common_hz_2t_4x8_msa() local 67 vec6, vec7); in common_hz_2t_4x8_msa() 68 SRARI_H4_UH(vec4, vec5, vec6, vec7, VP8_FILTER_SHIFT); in common_hz_2t_4x8_msa() 69 PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2, in common_hz_2t_4x8_msa() 185 v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_2t_16w_msa() local 202 VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); in common_hz_2t_16w_msa() 205 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, in common_hz_2t_16w_msa() 226 VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); in common_hz_2t_16w_msa() 229 DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, in common_hz_2t_16w_msa() 337 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_8x8mult_msa() local [all …]
|
/external/libyuv/files/source/ |
D | scale_msa.cc | 388 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in ScaleRowDown38_2_Box_MSA() local 415 vec7 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec3); in ScaleRowDown38_2_Box_MSA() 422 tmp3 = __msa_hadd_u_w(vec7, vec7); in ScaleRowDown38_2_Box_MSA() 458 v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in ScaleRowDown38_3_Box_MSA() local 483 vec7 = (v8u16)__msa_ilvl_b((v16i8)zero, (v16i8)src5); in ScaleRowDown38_3_Box_MSA() 491 vec3 += __msa_hadd_u_h((v16u8)vec7, (v16u8)vec7); in ScaleRowDown38_3_Box_MSA() 495 vec7 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec3); in ScaleRowDown38_3_Box_MSA() 502 tmp3 = __msa_hadd_u_w(vec7, vec7); in ScaleRowDown38_3_Box_MSA()
|
D | row_msa.cc | 825 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9; in ARGBToUVRow_MSA() local 851 vec7 = (v16u8)__msa_pckod_b((v16i8)src7, (v16i8)src6); in ARGBToUVRow_MSA() 855 vec5 = (v16u8)__msa_pckev_b((v16i8)vec7, (v16i8)vec6); in ARGBToUVRow_MSA() 879 vec7 = (v16u8)__msa_pckod_b((v16i8)src7, (v16i8)src6); in ARGBToUVRow_MSA() 883 vec5 = (v16u8)__msa_pckev_b((v16i8)vec7, (v16i8)vec6); in ARGBToUVRow_MSA() 986 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in ARGBToRGB565Row_MSA() local 1003 vec7 = (v16u8)__msa_sldi_b(zero, (v16i8)src1, 2); in ARGBToRGB565Row_MSA() 1007 vec5 = __msa_binsli_b(vec6, vec7, 4); in ARGBToRGB565Row_MSA() 1020 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9; in ARGBToARGB1555Row_MSA() local 1034 vec7 = (v16u8)__msa_srai_b((v16i8)vec5, 3); in ARGBToARGB1555Row_MSA() [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | avx512bwvl-intrinsics.ll | 50 %vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7 51 ret <8 x i32> %vec7 99 %vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7 100 ret <8 x i32> %vec7 149 %vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7 150 ret <8 x i32> %vec7 198 %vec7 = insertelement <8 x i32> %vec6, i32 %res7, i32 7 199 ret <8 x i32> %vec7 247 %vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7 248 ret <8 x i16> %vec7 [all …]
|
D | avx512vl-intrinsics.ll | 49 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7 50 ret <8 x i8> %vec7 97 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7 98 ret <8 x i8> %vec7 146 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7 147 ret <8 x i8> %vec7 194 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7 195 ret <8 x i8> %vec7 243 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7 244 ret <8 x i8> %vec7 [all …]
|
D | avx512-intrinsics.ll | 939 %vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7 940 ret <8 x i16> %vec7 987 %vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7 988 ret <8 x i16> %vec7 1036 %vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7 1037 ret <8 x i16> %vec7 1084 %vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7 1085 ret <8 x i16> %vec7 1133 %vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7 1134 ret <8 x i8> %vec7 [all …]
|