/external/libvpx/libvpx/vpx_dsp/ppc/ |
D | fdct32x32_vsx.c | 29 const int32x4_t sum_e = vec_add(ac_e, bc_e); in single_butterfly() 30 const int32x4_t sum_o = vec_add(ac_o, bc_o); in single_butterfly() 35 const int32x4_t rsum_o = vec_add(sum_o, vec_dct_const_rounding); in single_butterfly() 36 const int32x4_t rsum_e = vec_add(sum_e, vec_dct_const_rounding); in single_butterfly() 37 const int32x4_t rdiff_o = vec_add(diff_o, vec_dct_const_rounding); in single_butterfly() 38 const int32x4_t rdiff_e = vec_add(diff_e, vec_dct_const_rounding); in single_butterfly() 64 const int32x4_t sum_o = vec_add(ac1_o, bc2_o); in double_butterfly() 65 const int32x4_t sum_e = vec_add(ac1_e, bc2_e); in double_butterfly() 70 const int32x4_t rsum_o = vec_add(sum_o, vec_dct_const_rounding); in double_butterfly() 71 const int32x4_t rsum_e = vec_add(sum_e, vec_dct_const_rounding); in double_butterfly() [all …]
|
D | inv_txfm_vsx.c | 145 #define DCT_CONST_ROUND_SHIFT(vec) vec = vec_sra(vec_add(vec, shift), shift14); 151 #define PIXEL_ADD4(out, in) out = vec_sra(vec_add(in, add8), shift4); 154 t0 = vec_add(in0, in1); \ 157 temp1 = vec_sra(vec_add(vec_mule(tmp16_0, cospi16_v), shift), shift14); \ 158 temp2 = vec_sra(vec_add(vec_mulo(tmp16_0, cospi16_v), shift), shift14); \ 163 temp4 = vec_add(vec_mule(tmp16_0, cospi8_v), vec_mulo(tmp16_0, cospi24_v)); \ 168 out0 = vec_add(step0, step1); \ 173 tmp16_0 = vec_add(vec_perm(d_u0, d_u1, tr8_mask0), v0); \ 174 tmp16_1 = vec_add(vec_perm(d_u2, d_u3, tr8_mask0), v1); \ 271 temp10 = vec_add(vec_mule(tmp16_0, cospi1), vec_mulo(tmp16_0, cospi0)); \ [all …]
|
D | hadamard_vsx.c | 17 const int16x8_t b0 = vec_add(v[0], v[1]); in vpx_hadamard_s16_8x8_one_pass() 19 const int16x8_t b2 = vec_add(v[2], v[3]); in vpx_hadamard_s16_8x8_one_pass() 21 const int16x8_t b4 = vec_add(v[4], v[5]); in vpx_hadamard_s16_8x8_one_pass() 23 const int16x8_t b6 = vec_add(v[6], v[7]); in vpx_hadamard_s16_8x8_one_pass() 26 const int16x8_t c0 = vec_add(b0, b2); in vpx_hadamard_s16_8x8_one_pass() 27 const int16x8_t c1 = vec_add(b1, b3); in vpx_hadamard_s16_8x8_one_pass() 30 const int16x8_t c4 = vec_add(b4, b6); in vpx_hadamard_s16_8x8_one_pass() 31 const int16x8_t c5 = vec_add(b5, b7); in vpx_hadamard_s16_8x8_one_pass() 35 v[0] = vec_add(c0, c4); in vpx_hadamard_s16_8x8_one_pass() 38 v[3] = vec_add(c2, c6); in vpx_hadamard_s16_8x8_one_pass() [all …]
|
D | deblock_vsx.c | 132 const int16x8_t sum1 = vec_add(x, vec_slo(x, vec_splats((int8_t)(2 << 3)))); in slide_sum_s16() 134 const int16x8_t sum2 = vec_add(vec_slo(x, vec_splats((int8_t)(4 << 3))), in slide_sum_s16() 138 const int16x8_t sum3 = vec_add(vec_slo(x, vec_splats((int8_t)(8 << 3))), in slide_sum_s16() 142 const int16x8_t sum4 = vec_add(vec_slo(x, vec_splats((int8_t)(12 << 3))), in slide_sum_s16() 145 return vec_add(vec_add(sum1, sum2), vec_add(sum3, sum4)); in slide_sum_s16() 152 int32x4_t sumsq_1 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(4 << 3))), in slide_sumsq_s32() 156 int32x4_t sumsq_2 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(8 << 3))), in slide_sumsq_s32() 160 int32x4_t sumsq_3 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(12 << 3))), in slide_sumsq_s32() 162 sumsq_1 = vec_add(sumsq_1, xsq_even); in slide_sumsq_s32() 163 sumsq_2 = vec_add(sumsq_2, sumsq_3); in slide_sumsq_s32() [all …]
|
D | intrapred_vsx.c | 246 val = vec_sub(vec_add(vec_splat(l, 0), a), tl); in vpx_tm_predictor_4x4_vsx() 252 val = vec_sub(vec_add(vec_splat(l, 1), a), tl); in vpx_tm_predictor_4x4_vsx() 258 val = vec_sub(vec_add(vec_splat(l, 2), a), tl); in vpx_tm_predictor_4x4_vsx() 264 val = vec_sub(vec_add(vec_splat(l, 3), a), tl); in vpx_tm_predictor_4x4_vsx() 276 val = vec_sub(vec_add(vec_splat(l, 0), a), tl); in vpx_tm_predictor_8x8_vsx() 281 val = vec_sub(vec_add(vec_splat(l, 1), a), tl); in vpx_tm_predictor_8x8_vsx() 286 val = vec_sub(vec_add(vec_splat(l, 2), a), tl); in vpx_tm_predictor_8x8_vsx() 291 val = vec_sub(vec_add(vec_splat(l, 3), a), tl); in vpx_tm_predictor_8x8_vsx() 296 val = vec_sub(vec_add(vec_splat(l, 4), a), tl); in vpx_tm_predictor_8x8_vsx() 301 val = vec_sub(vec_add(vec_splat(l, 5), a), tl); in vpx_tm_predictor_8x8_vsx() [all …]
|
D | quantize_vsx.c | 20 return vec_xor(vec_add(a, mask), mask); in vec_sign() 44 qcoeff = vec_add(qcoeff, rounded); in quantize_coeff() 57 qcoeff = vec_add(qcoeff, rounded); in quantize_coeff_32() 74 dqcoeffe = vec_add(dqcoeffe, vec_is_neg(dqcoeffe)); in dequantize_coeff_32() 75 dqcoeffo = vec_add(dqcoeffo, vec_is_neg(dqcoeffo)); in dequantize_coeff_32() 235 zbin = vec_sra(vec_add(zbin, vec_ones_s16), vec_ones_u16); in vpx_quantize_b_32x32_vsx() 236 round = vec_sra(vec_add(round, vec_ones_s16), vec_ones_u16); in vpx_quantize_b_32x32_vsx()
|
/external/libjpeg-turbo/simd/powerpc/ |
D | jfdctfst-altivec.c | 48 tmp10 = vec_add(tmp0, tmp3); \ 50 tmp11 = vec_add(tmp1, tmp2); \ 53 out0 = vec_add(tmp10, tmp11); \ 56 z1 = vec_add(tmp12, tmp13); \ 60 out2 = vec_add(tmp13, z1); \ 65 tmp10 = vec_add(tmp4, tmp5); \ 66 tmp11 = vec_add(tmp5, tmp6); \ 67 tmp12 = vec_add(tmp6, tmp7); \ 80 z11 = vec_add(tmp7, z3); \ 83 out5 = vec_add(z13, z2); \ [all …]
|
D | jdsample-altivec.c | 94 last0l = vec_add(last0l, pw_one); in jsimd_h2v1_fancy_upsample_altivec() 98 next0l = vec_add(next0l, pw_two); in jsimd_h2v1_fancy_upsample_altivec() 100 outle = vec_add(this0l, last0l); in jsimd_h2v1_fancy_upsample_altivec() 101 outlo = vec_add(this0l, next0l); in jsimd_h2v1_fancy_upsample_altivec() 110 last0h = vec_add(last0h, pw_one); in jsimd_h2v1_fancy_upsample_altivec() 111 next0h = vec_add(next0h, pw_two); in jsimd_h2v1_fancy_upsample_altivec() 113 outhe = vec_add(this0h, last0h); in jsimd_h2v1_fancy_upsample_altivec() 114 outho = vec_add(this0h, next0h); in jsimd_h2v1_fancy_upsample_altivec() 192 thiscolsum_1l = vec_add(this0l, this_1l); in jsimd_h2v2_fancy_upsample_altivec() 193 thiscolsum_1h = vec_add(this0h, this_1h); in jsimd_h2v2_fancy_upsample_altivec() [all …]
|
D | jidctfst-altivec.c | 50 tmp10 = vec_add(in##0, in##4); \ 52 tmp13 = vec_add(in##2, in##6); \ 59 tmp0 = vec_add(tmp10, tmp13); \ 61 tmp1 = vec_add(tmp11, tmp12); \ 66 z13 = vec_add(in##5, in##3); \ 69 z11 = vec_add(in##1, in##7); \ 77 tmp7 = vec_add(z11, z13); \ 89 z5 = vec_add(z10s, z12s); \ 99 tmp4 = vec_add(tmp10, tmp5); \ 101 out0 = vec_add(tmp0, tmp7); \ [all …]
|
D | jidctint-altivec.c | 68 tmp0 = vec_add(in##0, in##4); \ 75 tmp0l = vec_add(tmp0l, pd_descale_p##PASS); \ 76 tmp0h = vec_add(tmp0h, pd_descale_p##PASS); \ 78 tmp10l = vec_add(tmp0l, tmp3l); \ 79 tmp10h = vec_add(tmp0h, tmp3h); \ 87 tmp1l = vec_add(tmp1l, pd_descale_p##PASS); \ 88 tmp1h = vec_add(tmp1h, pd_descale_p##PASS); \ 90 tmp11l = vec_add(tmp1l, tmp2l); \ 91 tmp11h = vec_add(tmp1h, tmp2h); \ 97 z3 = vec_add(in##3, in##7); \ [all …]
|
D | jdmrgext-altivec.c | 113 b_yl = vec_add(cbl, cbl); in jsimd_h2v1_merged_upsample_altivec() 114 b_yh = vec_add(cbh, cbh); in jsimd_h2v1_merged_upsample_altivec() 119 b_yl = vec_add(b_yl, cbl); in jsimd_h2v1_merged_upsample_altivec() 120 b_yh = vec_add(b_yh, cbh); in jsimd_h2v1_merged_upsample_altivec() 121 b_yl = vec_add(b_yl, cbl); in jsimd_h2v1_merged_upsample_altivec() 122 b_yh = vec_add(b_yh, cbh); in jsimd_h2v1_merged_upsample_altivec() 124 r_yl = vec_add(crl, crl); in jsimd_h2v1_merged_upsample_altivec() 125 r_yh = vec_add(crh, crh); in jsimd_h2v1_merged_upsample_altivec() 130 r_yl = vec_add(r_yl, crl); in jsimd_h2v1_merged_upsample_altivec() 131 r_yh = vec_add(r_yh, crh); in jsimd_h2v1_merged_upsample_altivec() [all …]
|
D | jfdctint-altivec.c | 76 z3 = vec_add(tmp4, tmp6); \ 77 z4 = vec_add(tmp5, tmp7); \ 150 tmp10 = vec_add(tmp0, tmp3); \ 152 tmp11 = vec_add(tmp1, tmp2); \ 155 out0 = vec_add(tmp10, tmp11); \ 166 tmp10 = vec_add(tmp0, tmp3); \ 168 tmp11 = vec_add(tmp1, tmp2); \ 171 out0 = vec_add(tmp10, tmp11); \ 172 out0 = vec_add(out0, pw_descale_p2x); \ 175 out4 = vec_add(out4, pw_descale_p2x); \ [all …]
|
D | jcsample-altivec.c | 64 outl = vec_add(this0e, this0o); in jsimd_h2v1_downsample_altivec() 65 outl = vec_add(outl, pw_bias); in jsimd_h2v1_downsample_altivec() 73 outh = vec_add(next0e, next0o); in jsimd_h2v1_downsample_altivec() 74 outh = vec_add(outh, pw_bias); in jsimd_h2v1_downsample_altivec() 124 out0l = vec_add(this0e, this0o); in jsimd_h2v2_downsample_altivec() 130 out1l = vec_add(this1e, this1o); in jsimd_h2v2_downsample_altivec() 132 outl = vec_add(out0l, out1l); in jsimd_h2v2_downsample_altivec() 133 outl = vec_add(outl, pw_bias); in jsimd_h2v2_downsample_altivec() 141 out0h = vec_add(next0e, next0o); in jsimd_h2v2_downsample_altivec() 147 out1h = vec_add(next1e, next1o); in jsimd_h2v2_downsample_altivec() [all …]
|
D | jdcolext-altivec.c | 111 bl = vec_add(cbl, cbl); in jsimd_ycc_rgb_convert_altivec() 112 bh = vec_add(cbh, cbh); in jsimd_ycc_rgb_convert_altivec() 117 bl = vec_add(bl, cbl); in jsimd_ycc_rgb_convert_altivec() 118 bh = vec_add(bh, cbh); in jsimd_ycc_rgb_convert_altivec() 119 bl = vec_add(bl, cbl); in jsimd_ycc_rgb_convert_altivec() 120 bh = vec_add(bh, cbh); in jsimd_ycc_rgb_convert_altivec() 121 bl = vec_add(bl, yl); in jsimd_ycc_rgb_convert_altivec() 122 bh = vec_add(bh, yh); in jsimd_ycc_rgb_convert_altivec() 124 rl = vec_add(crl, crl); in jsimd_ycc_rgb_convert_altivec() 125 rh = vec_add(crh, crh); in jsimd_ycc_rgb_convert_altivec() [all …]
|
D | jquanti-altivec.c | 180 row0 = vec_add(row0, corr0); in jsimd_quantize_altivec() 181 row1 = vec_add(row1, corr1); in jsimd_quantize_altivec() 182 row2 = vec_add(row2, corr2); in jsimd_quantize_altivec() 183 row3 = vec_add(row3, corr3); in jsimd_quantize_altivec() 184 row4 = vec_add(row4, corr4); in jsimd_quantize_altivec() 185 row5 = vec_add(row5, corr5); in jsimd_quantize_altivec() 186 row6 = vec_add(row6, corr6); in jsimd_quantize_altivec() 187 row7 = vec_add(row7, corr7); in jsimd_quantize_altivec()
|
/external/fec/ |
D | sumsq_av.c | 40 carries = vec_add(carries,vec_addc(sums,s1)); in sumsq_av() 41 sums = vec_add(sums,s1); in sumsq_av() 51 carries = vec_add(carries,vec_addc(sums,s1)); in sumsq_av() 52 sums = vec_add(sums,s1); in sumsq_av() 58 carries = vec_add(carries,vec_addc(sums,s1)); in sumsq_av() 59 sums = vec_add(sums,s1); in sumsq_av() 60 carries = vec_add(carries,s2); in sumsq_av() 65 carries = vec_add(carries,vec_addc(sums,s1)); in sumsq_av() 66 sums = vec_add(sums,s1); in sumsq_av() 67 carries = vec_add(carries,s2); in sumsq_av()
|
D | viterbi615_av.c | 143 m0 = vec_add(vec_xor(Branchtab615[0].v[i],sym0v),vec_xor(Branchtab615[1].v[i],sym1v)); in update_viterbi615_blk_av() 144 m1 = vec_add(vec_xor(Branchtab615[2].v[i],sym2v),vec_xor(Branchtab615[3].v[i],sym3v)); in update_viterbi615_blk_av() 145 m2 = vec_add(vec_xor(Branchtab615[4].v[i],sym4v),vec_xor(Branchtab615[5].v[i],sym5v)); in update_viterbi615_blk_av() 146 metric = vec_add(m0,m1); in update_viterbi615_blk_av() 147 metric = vec_add(metric,m2); in update_viterbi615_blk_av() 166 decisions = vec_add(decisions,decisions); /* Shift each byte 1 bit to the left */ in update_viterbi615_blk_av()
|
D | viterbi39_av.c | 140 m0 = vec_add(vec_xor(Branchtab39[0].v[i],sym0v),vec_xor(Branchtab39[1].v[i],sym1v)); in update_viterbi39_blk_av() 142 metric = vec_add(m0,m1); in update_viterbi39_blk_av() 161 decisions = vec_add(decisions,decisions); /* Shift each byte 1 bit to the left */ in update_viterbi39_blk_av()
|
/external/libpng/powerpc/ |
D | filter_vsx_intrinsics.c | 80 rp_vec = vec_add(rp_vec,pp_vec); in png_read_filter_row_up_vsx() 207 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub4_vsx() 210 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub4_vsx() 213 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub4_vsx() 264 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub3_vsx() 267 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub3_vsx() 270 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub3_vsx() 273 rp_vec = vec_add(rp_vec,part_vec); in png_read_filter_row_sub3_vsx() 350 rp_vec = vec_add(rp_vec,avg_vec); in png_read_filter_row_avg4_vsx() 356 rp_vec = vec_add(rp_vec,avg_vec); in png_read_filter_row_avg4_vsx() [all …]
|
/external/clang/test/CodeGen/ |
D | builtins-ppc-quadword.c | 28 res_vlll = vec_add(vlll, vlll); in test1() 33 res_vulll = vec_add(vulll, vulll); in test1()
|
D | builtins-ppc-altivec.c | 111 res_vsc = vec_add(vsc, vsc); in test1() 115 res_vsc = vec_add(vbc, vsc); in test1() 119 res_vsc = vec_add(vsc, vbc); in test1() 123 res_vuc = vec_add(vuc, vuc); in test1() 127 res_vuc = vec_add(vbc, vuc); in test1() 131 res_vuc = vec_add(vuc, vbc); in test1() 135 res_vs = vec_add(vs, vs); in test1() 139 res_vs = vec_add(vbs, vs); in test1() 143 res_vs = vec_add(vs, vbs); in test1() 147 res_vus = vec_add(vus, vus); in test1() [all …]
|
/external/libaom/libaom/av1/common/ppc/ |
D | cfl_ppc.c | 67 int32x4_t sum_32x4 = vec_add(sum_32x4_0, sum_32x4_1); in subtract_average_vsx() 70 sum_32x4 = vec_add(sum_32x4, perm_64); in subtract_average_vsx() 72 sum_32x4 = vec_add(sum_32x4, perm_32); in subtract_average_vsx()
|
/external/libunwind/tests/ |
D | ppc64-test-altivec.c | 130 vec_nor (v1, vec_add (v2, vec_sub (v3, vec_and (v4, vec_or (v5, v6))))); in vec_stack() 144 vec_add (v2, in vec_stack()
|
/external/libvpx/libvpx/vp9/encoder/ppc/ |
D | vp9_quantize_vsx.c | 30 return vec_xor(vec_add(a, mask), mask); in vec_sign() 160 dqcoeffe = vec_add(dqcoeffe, vec_is_neg(dqcoeffe)); in dequantize_coeff_32() 161 dqcoeffo = vec_add(dqcoeffo, vec_is_neg(dqcoeffo)); in dequantize_coeff_32() 202 round = vec_sra(vec_add(round, vec_ones_s16), vec_ones_u16); in vp9_quantize_fp_32x32_vsx()
|
/external/eigen/Eigen/src/Core/arch/AltiVec/ |
D | MathFunctions.h | 189 emm0 = vec_add(emm0, p4i_0x7f); 287 emm0 = vec_add(emm0, p2l_1023); 297 emm04i = vec_add(emm04i, p4i_1023);
|