Home
last modified time | relevance | path

Searched refs:src3 (Results 1 – 25 of 189) sorted by relevance

12345678

/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/Hexagon/
DHexagonMapAsm2IntrinV65.gen.td11 …sat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat HvxVR:$src1, HvxVR:$src2, IntReg…
12 …28B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruwuhsat HvxVR:$src1, HvxVR:$src2, IntReg…
13 …sat HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat HvxVR:$src1, HvxVR:$src2, IntReg…
14 …28B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubsat HvxVR:$src1, HvxVR:$src2, IntReg…
15 …at HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsat HvxVR:$src1, HvxVR:$src2, IntR…
16 …8B HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3), (V6_vasruhubrndsat HvxVR:$src1, HvxVR:$src2, IntR…
21 …_vaslh_acc HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vaslh_acc HvxVR:$src1, HvxVR:$src2, IntRe…
22 …h_acc_128B HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vaslh_acc HvxVR:$src1, HvxVR:$src2, IntRe…
23 …_vasrh_acc HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vasrh_acc HvxVR:$src1, HvxVR:$src2, IntRe…
24 …h_acc_128B HvxVR:$src1, HvxVR:$src2, IntRegs:$src3), (V6_vasrh_acc HvxVR:$src1, HvxVR:$src2, IntRe…
[all …]
DHexagonMapAsm2IntrinV62.gen.td18 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3),
19 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>;
21 IntRegsLow8:$src3),
22 (MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>;
40 def: Pat<(IntID HvxWR:$src1, HvxVR:$src2, HvxVR:$src3),
41 (MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3)>;
43 HvxVR:$src3),
44 (MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3)>;
55 def: Pat<(IntID HvxWR:$src1, HvxWR:$src2, IntRegs:$src3),
56 (MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3)>;
[all …]
DHexagonIntrinsicsV60.td172 def: Pat<(IntID HvxWR:$src1, HvxWR:$src2, IntRegs:$src3),
173 (MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3)>;
176 IntRegs:$src3),
177 (MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3)>;
181 def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegs:$src3),
182 (MI HvxVR:$src1, HvxVR:$src2, IntRegs:$src3)>;
185 IntRegs:$src3),
186 (MI HvxVR:$src1, HvxVR:$src2, IntRegs:$src3)>;
190 def: Pat<(IntID HvxWR:$src1, HvxVR:$src2, IntRegs:$src3),
191 (MI HvxWR:$src1, HvxVR:$src2, IntRegs:$src3)>;
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Dsum_squares_msa.c22 uint64_t src0, src1, src2, src3; in vpx_sum_squares_2d_i16_msa() local
26 LD4(src, src_stride, src0, src1, src2, src3); in vpx_sum_squares_2d_i16_msa()
28 INSERT_D2_SH(src2, src3, diff1); in vpx_sum_squares_2d_i16_msa()
35 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local
37 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa()
39 DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1); in vpx_sum_squares_2d_i16_msa()
47 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_sum_squares_2d_i16_msa() local
49 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa()
51 DPADD_SH2_SW(src2, src3, src2, src3, mul0, mul1); in vpx_sum_squares_2d_i16_msa()
54 LD_SH8(src + 8, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa()
[all …]
Dvpx_convolve_copy_msa.c19 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_width8_msa() local
23 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
29 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa()
40 LD_UB4(src, src_stride, src0, src1, src2, src3); in copy_width8_msa()
46 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa()
52 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
58 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa()
71 LD_UB4(src, src_stride, src0, src1, src2, src3); in copy_width8_msa()
76 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa()
102 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_16multx8mult_msa() local
[all …]
Dvpx_convolve8_horiz_msa.c19 v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; in common_hz_8t_4x4_msa() local
33 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x4_msa()
34 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x4_msa()
35 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x4_msa()
47 v16i8 src0, src1, src2, src3; in common_hz_8t_4x8_msa() local
62 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x8_msa()
63 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x8_msa()
65 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x8_msa()
67 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x8_msa()
68 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x8_msa()
[all …]
Dvpx_convolve8_avg_horiz_msa.c20 v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; in common_hz_8t_and_aver_dst_4x4_msa() local
36 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x4_msa()
37 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x4_msa()
38 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_and_aver_dst_4x4_msa()
54 v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; in common_hz_8t_and_aver_dst_4x8_msa() local
70 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_msa()
71 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_msa()
77 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_and_aver_dst_4x8_msa()
79 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_msa()
80 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_msa()
[all …]
Dvpx_convolve8_vert_msa.c19 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_4w_msa() local
31 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_4w_msa()
34 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_4w_msa()
36 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_4w_msa()
70 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_8w_msa() local
81 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_8w_msa()
82 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_8w_msa()
84 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_8w_msa()
86 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_8w_msa()
124 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_16w_msa() local
[all …]
Dvpx_convolve_msa.h50 #define HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ argument
57 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \
59 VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \
61 VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \
63 VSHF_B2_SB(src0, src1, src2, src3, mask3, mask3, vec6_m, vec7_m); \
68 #define HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ argument
76 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \
80 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2_m, vec3_m); \
84 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec6_m, vec7_m); \
88 VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec6_m, vec7_m); \
Davg_msa.c17 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_avg_8x8_msa() local
21 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_avg_8x8_msa()
22 HADD_UB4_UH(src0, src1, src2, src3, sum0, sum1, sum2, sum3); in vpx_avg_8x8_msa()
39 uint32_t src0, src1, src2, src3; in vpx_avg_4x4_msa() local
45 LW4(src, src_stride, src0, src1, src2, src3); in vpx_avg_4x4_msa()
46 INSERT_W4_UB(src0, src1, src2, src3, vec); in vpx_avg_4x4_msa()
61 v8i16 src0, src1, src2, src3, src4, src5, src6, src7; in vpx_hadamard_8x8_msa() local
64 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_hadamard_8x8_msa()
65 BUTTERFLY_8(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2, tmp4, in vpx_hadamard_8x8_msa()
68 src5, src7, src6, src3, src2); in vpx_hadamard_8x8_msa()
[all …]
Dvpx_convolve8_avg_vert_msa.c21 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_4w_msa() local
33 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_and_aver_dst_4w_msa()
36 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_and_aver_dst_4w_msa()
38 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_and_aver_dst_4w_msa()
78 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_8w_msa() local
89 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_and_aver_dst_8w_msa()
92 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_and_aver_dst_8w_msa()
93 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_and_aver_dst_8w_msa()
95 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_and_aver_dst_8w_msa()
137 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_16w_mult_msa() local
[all …]
/external/llvm/lib/Target/X86/
DX86InstrXOP.td150 (ins VR128:$src1, VR128:$src2, VR128:$src3),
152 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
154 (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, XOP_4V, VEX_I8IMM;
156 (ins VR128:$src1, i128mem:$src2, VR128:$src3),
158 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
161 VR128:$src3))]>, XOP_4V, VEX_I8IMM;
201 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
203 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
207 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
209 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[all …]
DX86InstrFMA.td44 (ins VR128:$src1, VR128:$src2, VR128:$src3),
46 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
48 VR128:$src1, VR128:$src3)))]>;
52 (ins VR128:$src1, VR128:$src2, f128mem:$src3),
54 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
56 (MemFrag128 addr:$src3))))]>;
60 (ins VR256:$src1, VR256:$src2, VR256:$src3),
62 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
64 VR256:$src3)))]>, VEX_L;
68 (ins VR256:$src1, VR256:$src2, f256mem:$src3),
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/
DX86InstrFMA.td41 (ins RC:$src1, RC:$src2, RC:$src3),
43 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
44 [(set RC:$dst, (VT (Op RC:$src2, RC:$src1, RC:$src3)))]>,
49 (ins RC:$src1, RC:$src2, x86memop:$src3),
51 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
53 (MemFrag addr:$src3))))]>,
62 (ins RC:$src1, RC:$src2, RC:$src3),
64 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
69 (ins RC:$src1, RC:$src2, x86memop:$src3),
71 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[all …]
DX86InstrXOP.td173 (ins VR128:$src1, VR128:$src2, VR128:$src3),
175 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
177 (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, XOP_4V,
180 (ins VR128:$src1, i128mem:$src2, VR128:$src3),
182 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
185 VR128:$src3))]>, XOP_4V, Sched<[sched.Folded, ReadAfterLd]>;
219 (v8i16 VR128:$src3))),
220 (VPMACSWWrr VR128:$src1, VR128:$src2, VR128:$src3)>;
222 (v4i32 VR128:$src3))),
223 (VPMACSDDrr VR128:$src1, VR128:$src2, VR128:$src3)>;
[all …]
/external/libaom/libaom/aom_dsp/mips/
Daom_convolve_copy_msa.c19 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_width8_msa() local
23 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
29 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa()
40 LD_UB4(src, src_stride, src0, src1, src2, src3); in copy_width8_msa()
46 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa()
52 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
58 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa()
71 LD_UB4(src, src_stride, src0, src1, src2, src3); in copy_width8_msa()
76 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa()
102 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_16multx8mult_msa() local
[all …]
Daom_convolve8_horiz_msa.c22 v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; in common_hz_8t_4x4_msa() local
36 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x4_msa()
37 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x4_msa()
38 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x4_msa()
50 v16i8 src0, src1, src2, src3; in common_hz_8t_4x8_msa() local
65 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x8_msa()
66 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x8_msa()
68 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x8_msa()
70 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x8_msa()
71 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x8_msa()
[all …]
Daom_convolve_msa.h34 #define HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ argument
41 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \
43 VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \
45 VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \
47 VSHF_B2_SB(src0, src1, src2, src3, mask3, mask3, vec6_m, vec7_m); \
52 #define HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ argument
60 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \
64 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2_m, vec3_m); \
68 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec6_m, vec7_m); \
72 VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec6_m, vec7_m); \
Daom_convolve8_vert_msa.c22 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_4w_msa() local
34 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_4w_msa()
37 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_4w_msa()
39 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_4w_msa()
73 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_8w_msa() local
84 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_8w_msa()
85 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_8w_msa()
87 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_8w_msa()
89 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_8w_msa()
127 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_16w_msa() local
[all …]
Dsad_msa.c29 uint32_t src0, src1, src2, src3, ref0, ref1, ref2, ref3; in sad_4width_msa() local
36 LW4(src_ptr, src_stride, src0, src1, src2, src3); in sad_4width_msa()
41 INSERT_W4_UB(src0, src1, src2, src3, src); in sad_4width_msa()
55 v16u8 src0, src1, src2, src3, ref0, ref1, ref2, ref3; in sad_8width_msa() local
59 LD_UB4(src, src_stride, src0, src1, src2, src3); in sad_8width_msa()
64 PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1, in sad_8width_msa()
137 v16u8 src0, src1, src2, src3; in sad_64width_msa() local
143 LD_UB4(src, 16, src0, src1, src2, src3); in sad_64width_msa()
148 sad1 += SAD_UB2_UH(src2, src3, ref2, ref3); in sad_64width_msa()
150 LD_UB4(src, 16, src0, src1, src2, src3); in sad_64width_msa()
[all …]
Dsub_pixel_variance_msa.c43 uint32_t src0, src1, src2, src3; in avg_sse_diff_4width_msa() local
53 LW4(src_ptr, src_stride, src0, src1, src2, src3); in avg_sse_diff_4width_msa()
58 INSERT_W4_UB(src0, src1, src2, src3, src); in avg_sse_diff_4width_msa()
78 v16u8 src0, src1, src2, src3; in avg_sse_diff_8width_msa() local
87 LD_UB4(src_ptr, src_stride, src0, src1, src2, src3); in avg_sse_diff_8width_msa()
92 PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1, in avg_sse_diff_8width_msa()
285 v16u8 src0, src1, src2, src3; in avg_sse_diff_64x32_msa() local
295 LD_UB4(src_ptr, 16, src0, src1, src2, src3); in avg_sse_diff_64x32_msa()
299 AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, src0, src1, in avg_sse_diff_64x32_msa()
300 src2, src3); in avg_sse_diff_64x32_msa()
[all …]
/external/libvpx/libvpx/vp8/common/mips/msa/
Dcopymem_msa.c16 uint64_t src0, src1, src2, src3; in copy_8x4_msa() local
18 LD4(src, src_stride, src0, src1, src2, src3); in copy_8x4_msa()
19 SD4(src0, src1, src2, src3, dst, dst_stride); in copy_8x4_msa()
24 uint64_t src0, src1, src2, src3; in copy_8x8_msa() local
26 LD4(src, src_stride, src0, src1, src2, src3); in copy_8x8_msa()
28 SD4(src0, src1, src2, src3, dst, dst_stride); in copy_8x8_msa()
31 LD4(src, src_stride, src0, src1, src2, src3); in copy_8x8_msa()
32 SD4(src0, src1, src2, src3, dst, dst_stride); in copy_8x8_msa()
37 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_16x16_msa() local
40 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_16x16_msa()
[all …]
Dbilinear_filter_msa.c33 v16i8 src0, src1, src2, src3, mask; in common_hz_2t_4x4_msa() local
42 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_2t_4x4_msa()
43 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x4_msa()
54 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; in common_hz_2t_4x8_msa() local
63 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in common_hz_2t_4x8_msa()
64 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x8_msa()
90 v16i8 src0, src1, src2, src3, mask; in common_hz_2t_8x4_msa() local
98 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_2t_8x4_msa()
100 VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); in common_hz_2t_8x4_msa()
112 v16i8 src0, src1, src2, src3, mask, out0, out1; in common_hz_2t_8x8mult_msa() local
[all …]
Dsixtap_filter_msa.c52 #define HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ argument
57 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \
59 VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \
61 VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \
65 #define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ argument
72 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \
76 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2_m, vec3_m); \
78 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec6_m, vec7_m); \
109 #define HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ argument
114 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \
[all …]
/external/libyuv/files/source/
Dscale_msa.cc69 v16u8 src0, src1, src2, src3, vec0, vec1, vec2, vec3, dst0; in ScaleARGBRowDown2Box_MSA() local
77 src3 = (v16u8)__msa_ld_b((v16i8*)t, 16); in ScaleARGBRowDown2Box_MSA()
81 vec3 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src3, (v16i8)src3); in ScaleARGBRowDown2Box_MSA()
131 v16u8 src0 = {0}, src1 = {0}, src2 = {0}, src3 = {0}; in ScaleARGBRowDownEvenBox_MSA() local
151 src3 = (v16u8)__msa_insert_d((v2i64)src3, 0, data2); in ScaleARGBRowDownEvenBox_MSA()
152 src3 = (v16u8)__msa_insert_d((v2i64)src3, 1, data3); in ScaleARGBRowDownEvenBox_MSA()
154 vec1 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src1); in ScaleARGBRowDownEvenBox_MSA()
156 vec3 = (v16u8)__msa_ilvl_b((v16i8)src3, (v16i8)src1); in ScaleARGBRowDownEvenBox_MSA()
182 v16u8 src0, src1, src2, src3, dst0, dst1; in ScaleRowDown2_MSA() local
189 src3 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 48); in ScaleRowDown2_MSA()
[all …]

12345678