/external/libvpx/libvpx/vpx_dsp/mips/ |
D | sum_squares_msa.c | 35 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local 37 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 40 DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 47 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local 49 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 52 DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 54 LD_SH8(src + 8, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 58 DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1); in vpx_sum_squares_2d_i16_msa() 60 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 63 DPADD_SH2_SW(src4, src5, src4, src5, mul0, mul1); in vpx_sum_squares_2d_i16_msa() [all …]
|
D | vpx_convolve_copy_msa.c | 18 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_width8_msa() local 22 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa() 29 out4 = __msa_copy_u_d((v2i64)src4, 0); in copy_width8_msa() 51 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa() 58 out4 = __msa_copy_u_d((v2i64)src4, 0); in copy_width8_msa() 101 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_16multx8mult_msa() local 108 LD_UB8(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6, in copy_16multx8mult_msa() 112 ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst_tmp, in copy_16multx8mult_msa() 125 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_width16_msa() local 129 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width16_msa() [all …]
|
D | vpx_convolve8_vert_msa.c | 19 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_4w_msa() local 31 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_4w_msa() 34 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_4w_msa() 36 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_4w_msa() 70 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_8w_msa() local 81 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_8w_msa() 82 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_8w_msa() 84 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_8w_msa() 86 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_8w_msa() 124 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_16w_msa() local [all …]
|
D | avg_msa.c | 17 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_avg_8x8_msa() local 21 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_avg_8x8_msa() 23 HADD_UB4_UH(src4, src5, src6, src7, sum4, sum5, sum6, sum7); in vpx_avg_8x8_msa() 60 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_hadamard_8x8_msa() local 63 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_hadamard_8x8_msa() 64 BUTTERFLY_8(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2, tmp4, in vpx_hadamard_8x8_msa() 66 BUTTERFLY_8(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1, src4, in vpx_hadamard_8x8_msa() 68 BUTTERFLY_8(src0, src1, src2, src3, src7, src6, src5, src4, tmp0, tmp7, tmp3, in vpx_hadamard_8x8_msa() 71 src2, src3, src4, src5, src6, src7); in vpx_hadamard_8x8_msa() 72 BUTTERFLY_8(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2, tmp4, in vpx_hadamard_8x8_msa() [all …]
|
D | vpx_convolve8_avg_vert_msa.c | 20 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_4w_msa() local 32 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_and_aver_dst_4w_msa() 35 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_and_aver_dst_4w_msa() 37 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_and_aver_dst_4w_msa() 78 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_8w_msa() local 89 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_and_aver_dst_8w_msa() 92 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_and_aver_dst_8w_msa() 93 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_and_aver_dst_8w_msa() 95 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_and_aver_dst_8w_msa() 135 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_16w_mult_msa() local [all …]
|
D | vpx_convolve8_msa.c | 29 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_4w_msa() local 47 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_4w_msa() 48 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_4w_msa() 55 hz_out4 = HORIZ_8TAP_FILT(src4, src5, mask0, mask1, mask2, mask3, filt_hz0, in common_hv_8ht_8vt_4w_msa() 103 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_8w_msa() local 122 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_8w_msa() 125 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_8w_msa() 134 hz_out4 = HORIZ_8TAP_FILT(src4, src4, mask0, mask1, mask2, mask3, filt_hz0, in common_hv_8ht_8vt_8w_msa() 237 v16i8 src0, src1, src2, src3, src4, mask; in common_hv_2ht_2vt_4x4_msa() local 250 LD_SB5(src, src_stride, src0, src1, src2, src3, src4); in common_hv_2ht_2vt_4x4_msa() [all …]
|
D | vpx_convolve8_avg_msa.c | 19 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_and_aver_dst_4w_msa() local 37 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 38 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 45 hz_out4 = HORIZ_8TAP_FILT(src4, src5, mask0, mask1, mask2, mask3, filt_hz0, in common_hv_8ht_8vt_and_aver_dst_4w_msa() 97 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_and_aver_dst_8w_msa() local 116 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_and_aver_dst_8w_msa() 119 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_and_aver_dst_8w_msa() 128 hz_out4 = HORIZ_8TAP_FILT(src4, src4, mask0, mask1, mask2, mask3, filt_hz0, in common_hv_8ht_8vt_and_aver_dst_8w_msa() 228 v16i8 src0, src1, src2, src3, src4, mask; in common_hv_2ht_2vt_and_aver_dst_4x4_msa() local 242 LD_SB5(src, src_stride, src0, src1, src2, src3, src4); in common_hv_2ht_2vt_and_aver_dst_4x4_msa() [all …]
|
D | sub_pixel_variance_msa.c | 468 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in sub_pixel_sse_diff_16width_h_msa() local 480 LD_SB4(src, src_stride, src0, src2, src4, src6); in sub_pixel_sse_diff_16width_h_msa() 488 VSHF_B2_UH(src4, src4, src5, src5, mask, mask, vec4, vec5); in sub_pixel_sse_diff_16width_h_msa() 552 v16u8 src0, src1, src2, src3, src4, out; in sub_pixel_sse_diff_4width_v_msa() local 568 LD_UB4(src, src_stride, src1, src2, src3, src4); in sub_pixel_sse_diff_4width_v_msa() 574 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, in sub_pixel_sse_diff_4width_v_msa() 581 src0 = src4; in sub_pixel_sse_diff_4width_v_msa() 595 v16u8 src0, src1, src2, src3, src4; in sub_pixel_sse_diff_8width_v_msa() local 610 LD_UB4(src, src_stride, src1, src2, src3, src4); in sub_pixel_sse_diff_8width_v_msa() 616 ILVR_B4_UH(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2, in sub_pixel_sse_diff_8width_v_msa() [all …]
|
D | vpx_convolve_avg_msa.c | 83 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in avg_width16_msa() local 87 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in avg_width16_msa() 93 AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, dst4, dst5, in avg_width16_msa() 104 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in avg_width32_msa() local 110 LD_UB4(src, src_stride, src0, src2, src4, src6); in avg_width32_msa() 125 AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, dst4, dst5, in avg_width32_msa() 145 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in avg_width64_msa() local 153 LD_UB4(src, 16, src4, src5, src6, src7); in avg_width64_msa() 171 AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, dst4, dst5, in avg_width64_msa()
|
D | subtract_msa.c | 62 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in sub_blk_16x16_msa() local 68 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in sub_blk_16x16_msa() 95 ILVRL_B2_UB(src4, pred4, src_l0, src_l1); in sub_blk_16x16_msa() 121 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in sub_blk_32x32_msa() local 131 LD_SB2(src, 16, src4, src5); in sub_blk_32x32_msa() 161 ILVRL_B2_UB(src4, pred4, src_l0, src_l1); in sub_blk_32x32_msa() 183 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in sub_blk_64x64_msa() local 191 LD_SB4(src, 16, src4, src5, src6, src7); in sub_blk_64x64_msa() 213 ILVRL_B2_UB(src4, pred4, src_l0, src_l1); in sub_blk_64x64_msa()
|
D | vpx_convolve8_horiz_msa.c | 342 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; in common_hz_2t_4x8_msa() local 352 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in common_hz_2t_4x8_msa() 354 VSHF_B2_UB(src4, src5, src6, src7, mask, mask, vec2, vec3); in common_hz_2t_4x8_msa() 475 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; in common_hz_2t_16w_msa() local 487 LD_SB4(src, src_stride, src0, src2, src4, src6); in common_hz_2t_16w_msa() 493 VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); in common_hz_2t_16w_msa() 511 LD_SB4(src, src_stride, src0, src2, src4, src6); in common_hz_2t_16w_msa() 517 VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); in common_hz_2t_16w_msa() 540 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; in common_hz_2t_32w_msa() local 556 src4 = LD_SB(src); in common_hz_2t_32w_msa() [all …]
|
D | vpx_convolve8_avg_horiz_msa.c | 337 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; in common_hz_2t_and_aver_dst_4x8_msa() local 348 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in common_hz_2t_and_aver_dst_4x8_msa() 351 VSHF_B2_UB(src4, src5, src6, src7, mask, mask, vec2, vec3); in common_hz_2t_and_aver_dst_4x8_msa() 482 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; in common_hz_2t_and_aver_dst_16w_msa() local 493 LD_SB4(src, src_stride, src0, src2, src4, src6); in common_hz_2t_and_aver_dst_16w_msa() 499 VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); in common_hz_2t_and_aver_dst_16w_msa() 518 LD_SB4(src, src_stride, src0, src2, src4, src6); in common_hz_2t_and_aver_dst_16w_msa() 524 VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); in common_hz_2t_and_aver_dst_16w_msa() 549 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; in common_hz_2t_and_aver_dst_32w_msa() local 566 src4 = LD_SB(src); in common_hz_2t_and_aver_dst_32w_msa() [all …]
|
/external/libpng/mips/ |
D | filter_msa_intrinsics.c | 373 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in png_read_filter_row_up_msa() local 378 LD_UB4(pp, 16, src4, src5, src6, src7); in png_read_filter_row_up_msa() 381 ADD4(src0, src4, src1, src5, src2, src6, src3, src7, in png_read_filter_row_up_msa() 399 LD_UB4(pp, 16, src4, src5, src6, src7); in png_read_filter_row_up_msa() 401 ADD4(src0, src4, src1, src5, src2, src6, src3, src7, in png_read_filter_row_up_msa() 410 LD_UB2(pp, 16, src4, src5); in png_read_filter_row_up_msa() 415 ADD3(src0, src4, src1, src5, src2, src6, src0, src1, src2); in png_read_filter_row_up_msa() 425 LD_UB2(pp, 16, src4, src5); in png_read_filter_row_up_msa() 427 ADD2(src0, src4, src1, src5, src0, src1); in png_read_filter_row_up_msa() 436 LD_UB2(pp, 16, src4, src5); in png_read_filter_row_up_msa() [all …]
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | bilinear_filter_msa.c | 54 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; in common_hz_2t_4x8_msa() local 63 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in common_hz_2t_4x8_msa() 65 VSHF_B2_UB(src4, src5, src6, src7, mask, mask, vec2, vec3); in common_hz_2t_4x8_msa() 184 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; in common_hz_2t_16w_msa() local 195 LD_SB4(src, src_stride, src0, src2, src4, src6); in common_hz_2t_16w_msa() 201 VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); in common_hz_2t_16w_msa() 219 LD_SB4(src, src_stride, src0, src2, src4, src6); in common_hz_2t_16w_msa() 225 VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); in common_hz_2t_16w_msa() 247 v16i8 src0, src1, src2, src3, src4; in common_vt_2t_4x4_msa() local 256 LD_SB5(src, src_stride, src0, src1, src2, src3, src4); in common_vt_2t_4x4_msa() [all …]
|
D | sixtap_filter_msa.c | 253 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, filt0, filt1, filt2; in common_hz_6t_16w_msa() local 267 LD_SB4(src, src_stride, src0, src2, src4, src6); in common_hz_6t_16w_msa() 269 XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7); in common_hz_6t_16w_msa() 274 HORIZ_6TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, mask2, in common_hz_6t_16w_msa() 299 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; in common_vt_6t_4w_msa() local 310 LD_SB5(src, src_stride, src0, src1, src2, src3, src4); in common_vt_6t_4w_msa() 313 ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, in common_vt_6t_4w_msa() 322 ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, in common_vt_6t_4w_msa() 336 src4 = src8; in common_vt_6t_4w_msa() 344 v16i8 src0, src1, src2, src3, src4, src7, src8, src9, src10; in common_vt_6t_8w_msa() local [all …]
|
D | copymem_msa.c | 37 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_16x16_msa() local 40 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_16x16_msa() 44 ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride); in copy_16x16_msa()
|
/external/llvm/lib/Target/X86/ |
D | X86InstrXOP.td | 350 (ins VR128:$src1, VR128:$src2, VR128:$src3, u8imm:$src4), 352 "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"), 355 (id128 VR128:$src3), (i8 imm:$src4))))]>; 357 (ins VR128:$src1, VR128:$src2, i128mem:$src3, u8imm:$src4), 359 "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"), 363 (i8 imm:$src4))))]>, 366 (ins VR128:$src1, f128mem:$src2, VR128:$src3, u8imm:$src4), 368 "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"), 372 (id128 VR128:$src3), (i8 imm:$src4))))]>; 376 (ins VR128:$src1, VR128:$src2, VR128:$src3, u8imm:$src4), [all …]
|
/external/libyuv/files/source/ |
D | scale_msa.cc | 230 v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0, dst1; in ScaleRowDown2Box_MSA() local 238 src4 = (v16u8)__msa_ld_b((v16i8*)t, 0); in ScaleRowDown2Box_MSA() 246 vec0 += __msa_hadd_u_h(src4, src4); in ScaleRowDown2Box_MSA() 294 v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0; in ScaleRowDown4Box_MSA() local 303 src4 = (v16u8)__msa_ld_b((v16i8*)t0, 0); in ScaleRowDown4Box_MSA() 311 vec0 += __msa_hadd_u_h(src4, src4); in ScaleRowDown4Box_MSA() 319 src4 = (v16u8)__msa_ld_b((v16i8*)t2, 0); in ScaleRowDown4Box_MSA() 327 vec0 += __msa_hadd_u_h(src4, src4); in ScaleRowDown4Box_MSA() 457 v16u8 src0, src1, src2, src3, src4, src5, out; in ScaleRowDown38_3_Box_MSA() local 474 src4 = (v16u8)__msa_ld_b((v16i8*)t1, 0); in ScaleRowDown38_3_Box_MSA() [all …]
|
/external/v8/src/arm/ |
D | macro-assembler-arm.h | 361 Register src4, 365 if (src3.code() > src4.code()) { 368 src1.bit() | src2.bit() | src3.bit() | src4.bit(), 372 str(src4, MemOperand(sp, 4, NegPreIndex), cond); 376 Push(src3, src4, cond); 380 Push(src2, src3, src4, cond); 385 void Push(Register src1, Register src2, Register src3, Register src4, 389 if (src3.code() > src4.code()) { 390 if (src4.code() > src5.code()) { 392 src1.bit() | src2.bit() | src3.bit() | src4.bit() | src5.bit(), [all …]
|
/external/llvm/lib/Target/Hexagon/ |
D | HexagonInstrEnc.td | 401 bits<5> src4; 405 let Inst{13-0} = { src3_vector{3}, src1{1-0}, src3_vector{2-0}, opc{2-0}, src4{4-0} }; 413 bits<5> src4; 417 let Inst{13-0} = { src3_vector{3}, src1{1-0}, src3_vector{2-0}, opc{2-0}, src4{4-0} }; 447 bits<3> src4; 451 let Inst{13-0} = { src3_vector{3}, src1{1-0}, src3_vector{2-0}, 0b01, opc{2-0}, src4{2-0} }; 464 bits<3> src4; 468 let Inst{13-0} = { src3_vector{3}, src1{1-0}, src3_vector{2-0}, 0b01, opc{2-0}, src4{2-0} }; 579 // TODO: Change script to generate src1, src2,src3 and src4 instead of 586 bits<5> src4; [all …]
|
D | HexagonIntrinsicsV60.td | 449 def: Pat<(IntID VecDblRegs:$src1, VecDblRegs:$src2, IntRegs:$src3, imm:$src4), 450 (MI VecDblRegs:$src1, VecDblRegs:$src2, IntRegs:$src3, imm:$src4)>, 455 IntRegs:$src3, imm:$src4), 458 IntRegs:$src3, imm:$src4)>, 464 IntRegs:$src4), 466 IntRegs:$src4)>, 472 IntRegs:$src4), 476 IntRegs:$src4)>, 482 IntRegs:$src4), 484 IntRegs:$src4)>, [all …]
|
/external/opencv/cv/src/ |
D | cvderiv.cpp | 609 … const int *src0 = src[-2], *src1 = src[-1], *src2 = src[0], *src3 = src[1], *src4 = src[2]; in icvLaplaceCol_32s16s() local 614 int s0 = src0[i] - src2[i]*2 + src4[i] + src0[i+width] + src4[i+width] + in icvLaplaceCol_32s16s() 616 int s1 = src0[i+1] - src2[i+1]*2 + src4[i+1] + src0[i+width+1] + in icvLaplaceCol_32s16s() 617 src4[i+width+1] + (src1[i+width+1] + src3[i+width+1])*4 + in icvLaplaceCol_32s16s() 624 int s0 = CV_DESCALE(src0[i] - src2[i]*2 + src4[i] + in icvLaplaceCol_32s16s() 625 src0[i+width] + src4[i+width] + in icvLaplaceCol_32s16s() 627 int s1 = CV_DESCALE(src0[i+1] - src2[i+1]*2 + src4[i+1] + in icvLaplaceCol_32s16s() 628 src0[i+width+1] + src4[i+width+1] + in icvLaplaceCol_32s16s() 751 … const float *src0 = src[-2], *src1 = src[-1], *src2 = src[0], *src3 = src[1], *src4 = src[2]; in icvLaplaceCol_32f() local 754 float s0 = (src0[i] - src2[i]*2 + src4[i] + in icvLaplaceCol_32f() [all …]
|
/external/mesa3d/src/gallium/drivers/softpipe/ |
D | sp_quad_blend.c | 156 uint *src4 = (uint *) src; in logicop_quad() local 182 res4[j] = ~(src4[j] | dst4[j]); in logicop_quad() 186 res4[j] = ~src4[j] & dst4[j]; in logicop_quad() 190 res4[j] = ~src4[j]; in logicop_quad() 194 res4[j] = src4[j] & ~dst4[j]; in logicop_quad() 202 res4[j] = dst4[j] ^ src4[j]; in logicop_quad() 206 res4[j] = ~(src4[j] & dst4[j]); in logicop_quad() 210 res4[j] = src4[j] & dst4[j]; in logicop_quad() 214 res4[j] = ~(src4[j] ^ dst4[j]); in logicop_quad() 222 res4[j] = ~src4[j] | dst4[j]; in logicop_quad() [all …]
|
/external/mesa3d/src/mesa/main/ |
D | image.c | 542 const GLfloat (*src4)[4] = (const GLfloat (*)[4]) src; in _mesa_convert_colors() local 547 _mesa_unclamped_float_rgba_to_ubyte(dst1[i], src4[i]); in _mesa_convert_colors() 553 const GLfloat (*src4)[4] = (const GLfloat (*)[4]) src; in _mesa_convert_colors() local 559 UNCLAMPED_FLOAT_TO_USHORT(dst2[i][RCOMP], src4[i][RCOMP]); in _mesa_convert_colors() 560 UNCLAMPED_FLOAT_TO_USHORT(dst2[i][GCOMP], src4[i][GCOMP]); in _mesa_convert_colors() 561 UNCLAMPED_FLOAT_TO_USHORT(dst2[i][BCOMP], src4[i][BCOMP]); in _mesa_convert_colors() 562 UNCLAMPED_FLOAT_TO_USHORT(dst2[i][ACOMP], src4[i][ACOMP]); in _mesa_convert_colors()
|
/external/skia/samplecode/ |
D | SamplePolyToPoly.cpp | 154 const int src4[] = { 0, 0, 64, 0, 64, 64, 0, 64 }; in onDrawContent() local 156 doDraw(canvas, &paint, src4, dst4, 4); in onDrawContent()
|