Home
last modified time | relevance | path

Searched refs:res0 (Results 1 – 25 of 149) sorted by relevance

123456

/external/libaom/libaom/aom_dsp/x86/
Dfwd_txfm_sse2.h68 int res0, res1; in check_epi16_overflow_x8() local
69 res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3); in check_epi16_overflow_x8()
71 return res0 + res1; in check_epi16_overflow_x8()
79 int res0, res1; in check_epi16_overflow_x12() local
80 res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3); in check_epi16_overflow_x12()
82 if (!res0) res0 = check_epi16_overflow_x4(preg8, preg9, preg10, preg11); in check_epi16_overflow_x12()
83 return res0 + res1; in check_epi16_overflow_x12()
93 int res0, res1; in check_epi16_overflow_x16() local
94 res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3); in check_epi16_overflow_x16()
96 if (!res0) { in check_epi16_overflow_x16()
[all …]
Dmasked_sad4d_ssse3.c49 __m128i res0 = _mm_setzero_si128(); in masked_sadx4d_ssse3() local
81 res0 = _mm_add_epi32(_mm_unpacklo_epi32(res0, res1), in masked_sadx4d_ssse3()
82 _mm_unpackhi_epi32(res0, res1)); in masked_sadx4d_ssse3()
86 res0 = _mm_unpacklo_epi64(res0, res2); in masked_sadx4d_ssse3()
87 _mm_storeu_si128((__m128i *)sad_array, res0); in masked_sadx4d_ssse3()
116 __m128i res0 = _mm_setzero_si128(); in aom_masked_sad8xhx4d_ssse3() local
148 res0 = _mm_add_epi32(_mm_unpacklo_epi32(res0, res1), in aom_masked_sad8xhx4d_ssse3()
149 _mm_unpackhi_epi32(res0, res1)); in aom_masked_sad8xhx4d_ssse3()
152 res0 = _mm_unpacklo_epi64(res0, res2); in aom_masked_sad8xhx4d_ssse3()
153 _mm_storeu_si128((__m128i *)sad_array, res0); in aom_masked_sad8xhx4d_ssse3()
[all …]
/external/libvpx/libvpx/vp8/common/mips/msa/
Didct_msa.c91 v4i32 res0, res1, res2, res3; in idct4x4_addblk_msa() local
104 ILVR_B4_SW(zero, pred0, zero, pred1, zero, pred2, zero, pred3, res0, res1, in idct4x4_addblk_msa()
106 ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, res0, res1, res2, in idct4x4_addblk_msa()
108 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in idct4x4_addblk_msa()
109 res0 = CLIP_SW_0_255(res0); in idct4x4_addblk_msa()
113 PCKEV_B2_SW(res0, res1, res2, res3, vt0, vt1); in idct4x4_addblk_msa()
114 res0 = (v4i32)__msa_pckev_b((v16i8)vt0, (v16i8)vt1); in idct4x4_addblk_msa()
115 ST4x4_UB(res0, res0, 3, 2, 1, 0, dest, dest_stride); in idct4x4_addblk_msa()
121 v8i16 vec, res0, res1, res2, res3, dst0, dst1; in idct4x4_addconst_msa() local
128 ILVR_B4_SH(zero, pred0, zero, pred1, zero, pred2, zero, pred3, res0, res1, in idct4x4_addconst_msa()
[all …]
/external/llvm/test/CodeGen/X86/
Dvector-shuffle-combining-avx512bw.ll24 …%res0 = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %x0, <8 x i64> <i64 7,…
25 …permvar.df.512(<8 x double> %res0, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, …
38 …%res0 = call <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double> %x0, <8 x i64> <i64 7,…
39 …permvar.df.512(<8 x double> %res0, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, …
47 …%res0 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %x0, <8 x i64> <i64 7, i64 6…
48 …ask.permvar.di.512(<8 x i64> %res0, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1,…
61 …%res0 = call <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64> %x0, <8 x i64> <i64 7, i64 6…
62 …ask.permvar.di.512(<8 x i64> %res0, <8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1,…
70 …%res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 7, i64 6, i64 5,…
71 …i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x double> %res0, <8 x double> %res0, i8 -1)
[all …]
Dvector-shuffle-combining-xop.ll18 …%res0 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a1, <2 x double> %a0, <2 x i64> <…
19 …%res1 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %res0, <2 x double> undef, <2 x i6…
28 …%res0 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a1, <4 x double> %a0, <4 x i6…
29 …%res1 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %res0, <4 x double> undef, <4 …
38 …%res0 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a1, <4 x float> %a0, <4 x i32> <i32…
39 …%res1 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %res0, <4 x float> undef, <4 x i32> …
48 …%res0 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a1, <8 x float> %a0, <8 x i32> …
49 …%res1 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %res0, <8 x float> undef, <8 x i…
58 …%res0 = call <8 x float> @llvm.x86.xop.vpermil2ps.256(<8 x float> %a1, <8 x float> %a0, <8 x i32> …
59 ret <8 x float> %res0
[all …]
Davx512bwvl-intrinsics-fast-isel.ll18 %res0 = shufflevector <16 x i8> %arg0, <16 x i8> undef, <16 x i32> zeroinitializer
19 %res1 = bitcast <16 x i8> %res0 to <2 x i64>
39 %res0 = shufflevector <16 x i8> %arg2, <16 x i8> undef, <16 x i32> zeroinitializer
40 %res1 = select <16 x i1> %arg1, <16 x i8> %res0, <16 x i8> %arg0
60 %res0 = shufflevector <16 x i8> %arg1, <16 x i8> undef, <16 x i32> zeroinitializer
61 %res1 = select <16 x i1> %arg0, <16 x i8> %res0, <16 x i8> zeroinitializer
77 %res0 = shufflevector <16 x i8> %arg0, <16 x i8> undef, <32 x i32> zeroinitializer
78 %res1 = bitcast <32 x i8> %res0 to <4 x i64>
98 %res0 = shufflevector <16 x i8> %arg2, <16 x i8> undef, <32 x i32> zeroinitializer
99 %res1 = select <32 x i1> %arg1, <32 x i8> %res0, <32 x i8> %arg0
[all …]
Davx512-intrinsics-fast-isel.ll18 %res0 = shufflevector <4 x i32> %arg0, <4 x i32> undef, <16 x i32> zeroinitializer
19 %res1 = bitcast <16 x i32> %res0 to <8 x i64>
39 %res0 = shufflevector <4 x i32> %arg2, <4 x i32> undef, <16 x i32> zeroinitializer
40 %res1 = select <16 x i1> %arg1, <16 x i32> %res0, <16 x i32> %arg0
60 %res0 = shufflevector <4 x i32> %arg1, <4 x i32> undef, <16 x i32> zeroinitializer
61 %res1 = select <16 x i1> %arg0, <16 x i32> %res0, <16 x i32> zeroinitializer
94 %res0 = shufflevector <2 x i64> %a2, <2 x i64> undef, <8 x i32> zeroinitializer
95 %res1 = select <8 x i1> %arg1, <8 x i64> %res0, <8 x i64> %a0
113 %res0 = shufflevector <2 x i64> %a1, <2 x i64> undef, <8 x i32> zeroinitializer
114 %res1 = select <8 x i1> %arg0, <8 x i64> %res0, <8 x i64> zeroinitializer
[all …]
Davx512vl-intrinsics-fast-isel.ll18 %res0 = shufflevector <4 x i32> %arg0, <4 x i32> undef, <4 x i32> zeroinitializer
19 %res1 = bitcast <4 x i32> %res0 to <2 x i64>
50 %res0 = shufflevector <4 x i32> %arg2, <4 x i32> undef, <4 x i32> zeroinitializer
51 %res1 = select <4 x i1> %arg1, <4 x i32> %res0, <4 x i32> %arg0
82 %res0 = shufflevector <4 x i32> %arg1, <4 x i32> undef, <4 x i32> zeroinitializer
83 %res1 = select <4 x i1> %arg0, <4 x i32> %res0, <4 x i32> zeroinitializer
99 %res0 = shufflevector <4 x i32> %arg0, <4 x i32> undef, <8 x i32> zeroinitializer
100 %res1 = bitcast <8 x i32> %res0 to <4 x i64>
120 %res0 = shufflevector <4 x i32> %arg2, <4 x i32> undef, <8 x i32> zeroinitializer
121 %res1 = select <8 x i1> %arg1, <8 x i32> %res0, <8 x i32> %arg0
[all …]
Dmerge-consecutive-loads-128.ll31 %res0 = insertelement <2 x double> undef, double %val0, i32 0
32 %res1 = insertelement <2 x double> %res0, double %val1, i32 1
56 %res0 = insertelement <2 x i64> undef, i64 %val0, i32 0
57 %res1 = insertelement <2 x i64> %res0, i64 %val1, i32 1
85 %res0 = insertelement <4 x float> undef, float %val0, i32 0
86 %res1 = insertelement <4 x float> %res0, float %val1, i32 1
110 %res0 = insertelement <4 x float> undef, float %val0, i32 0
111 %res1 = insertelement <4 x float> %res0, float 0.0, i32 1
135 %res0 = insertelement <4 x float> undef, float %val0, i32 0
136 %res1 = insertelement <4 x float> %res0, float %val1, i32 1
[all …]
Dmerge-consecutive-loads-256.ll68 %res0 = insertelement <4 x double> undef, double %val0, i32 0
69 %res1 = insertelement <4 x double> %res0, double %val1, i32 1
88 %res0 = insertelement <4 x double> undef, double %val0, i32 0
89 %res1 = insertelement <4 x double> %res0, double 0.0, i32 1
108 %res0 = insertelement <4 x double> undef, double %val0, i32 0
109 %res1 = insertelement <4 x double> %res0, double %val1, i32 1
132 %res0 = insertelement <4 x double> zeroinitializer, double %val0, i32 0
133 %res1 = insertelement <4 x double> %res0, double %val1, i32 1
156 %res0 = insertelement <4 x double> undef, double %val0, i32 0
157 %res1 = insertelement <4 x double> %res0, double %val1, i32 1
[all …]
Dmerge-consecutive-loads-512.ll100 %res0 = insertelement <8 x double> undef, double %val0, i32 0
101 %res1 = insertelement <8 x double> %res0, double %val1, i32 1
129 %res0 = insertelement <8 x double> undef, double %val0, i32 0
130 %res1 = insertelement <8 x double> %res0, double %val1, i32 1
163 %res0 = insertelement <8 x double> undef, double %val0, i32 0
164 %res2 = insertelement <8 x double> %res0, double %val2, i32 2
215 %res0 = insertelement <8 x i64> undef, i64 %val0, i32 0
216 %res1 = insertelement <8 x i64> %res0, i64 %val1, i32 1
250 %res0 = insertelement <8 x i64> undef, i64 %val0, i32 0
251 %res2 = insertelement <8 x i64> %res0, i64 %val2, i32 2
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Dsum_squares_msa.c19 v2i64 res0 = { 0 }; in vpx_sum_squares_2d_i16_msa() local
31 res0 = __msa_hadd_s_d(mul0, mul0); in vpx_sum_squares_2d_i16_msa()
32 res0 += __msa_splati_d(res0, 1); in vpx_sum_squares_2d_i16_msa()
33 ss_res = (uint64_t)__msa_copy_s_d(res0, 0); in vpx_sum_squares_2d_i16_msa()
43 res0 = __msa_hadd_s_d(mul0, mul0); in vpx_sum_squares_2d_i16_msa()
44 res0 += __msa_splati_d(res0, 1); in vpx_sum_squares_2d_i16_msa()
45 ss_res = (uint64_t)__msa_copy_s_d(res0, 0); in vpx_sum_squares_2d_i16_msa()
71 res0 += __msa_hadd_s_d(mul0, mul0); in vpx_sum_squares_2d_i16_msa()
73 res0 += __msa_splati_d(res0, 1); in vpx_sum_squares_2d_i16_msa()
74 ss_res = (uint64_t)__msa_copy_s_d(res0, 0); in vpx_sum_squares_2d_i16_msa()
[all …]
Dvpx_convolve8_avg_horiz_msa.c23 v8i16 filt, res0, res1; in common_hz_8t_and_aver_dst_4x4_msa() local
39 filt0, filt1, filt2, filt3, res0, res1); in common_hz_8t_and_aver_dst_4x4_msa()
42 SRARI_H2_SH(res0, res1, FILTER_BITS); in common_hz_8t_and_aver_dst_4x4_msa()
43 SAT_SH2_SH(res0, res1, 7); in common_hz_8t_and_aver_dst_4x4_msa()
44 res = PCKEV_XORI128_UB(res0, res1); in common_hz_8t_and_aver_dst_4x4_msa()
55 v16u8 mask0, mask1, mask2, mask3, res0, res1, res2, res3; in common_hz_8t_and_aver_dst_4x8_msa() local
85 PCKEV_B4_UB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, res0, res1, res2, in common_hz_8t_and_aver_dst_4x8_msa()
87 ILVR_D2_UB(res1, res0, res3, res2, res0, res2); in common_hz_8t_and_aver_dst_4x8_msa()
88 XORI_B2_128_UB(res0, res2); in common_hz_8t_and_aver_dst_4x8_msa()
89 AVER_UB2_UB(res0, dst0, res2, dst1, res0, res2); in common_hz_8t_and_aver_dst_4x8_msa()
[all …]
/external/llvm-project/libc/src/math/
Dsincosf_utils.h122 uint64_t n, res0, res1, res2; in reduce_large() local
127 res0 = xi * arr[0]; in reduce_large()
130 res0 = (res2 >> 32) | (res0 << 32); in reduce_large()
131 res0 += res1; in reduce_large()
133 n = (res0 + (1ULL << 61)) >> 62; in reduce_large()
134 res0 -= n << 62; in reduce_large()
135 double x = (int64_t)res0; in reduce_large()
/external/libvpx/libvpx/vpx_dsp/x86/
Dfwd_txfm_sse2.h70 int res0, res1; in check_epi16_overflow_x8() local
71 res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3); in check_epi16_overflow_x8()
73 return res0 + res1; in check_epi16_overflow_x8()
81 int res0, res1; in check_epi16_overflow_x12() local
82 res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3); in check_epi16_overflow_x12()
84 if (!res0) res0 = check_epi16_overflow_x4(preg8, preg9, preg10, preg11); in check_epi16_overflow_x12()
85 return res0 + res1; in check_epi16_overflow_x12()
95 int res0, res1; in check_epi16_overflow_x16() local
96 res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3); in check_epi16_overflow_x16()
98 if (!res0) { in check_epi16_overflow_x16()
[all …]
Dhighbd_convolve_avx2.c342 __m256i signal[8], res0, res1; in vpx_highbd_filter_block1d8_h8_avx2() local
351 filter_8x1_pixels(signal, ff, &res0); in vpx_highbd_filter_block1d8_h8_avx2()
353 store_8x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch); in vpx_highbd_filter_block1d8_h8_avx2()
361 filter_8x1_pixels(signal, ff, &res0); in vpx_highbd_filter_block1d8_h8_avx2()
362 store_8x1_pixels(&res0, &max, dst_ptr); in vpx_highbd_filter_block1d8_h8_avx2()
369 __m256i signal[8], res0, res1; in vpx_highbd_filter_block1d16_h8_avx2() local
378 filter_8x1_pixels(signal, ff, &res0); in vpx_highbd_filter_block1d16_h8_avx2()
380 store_16x1_pixels(&res0, &res1, &max, dst_ptr); in vpx_highbd_filter_block1d16_h8_avx2()
462 __m256i signal[2], res0, res1; in vpx_highbd_filter_block1d8_h2_avx2() local
471 filter_16_2t_pixels(signal, &ff, &res0, &res1); in vpx_highbd_filter_block1d8_h2_avx2()
[all …]
/external/llvm-project/llvm/test/CodeGen/X86/
Dvector-shuffle-combining-avx512f.ll147 …%res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 7, i64 6, i64 5,…
148 …i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x double> %res0, <8 x double> %res0, i8 -1)
189 …%res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 7, i64 6, i64 5,…
190 …i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x double> %res0, <8 x double> %res0, i8 %m)
199 …%res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 0, i64 0, i64 2,…
200 ret <8 x double> %res0
214 …%res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 0, i64 0, i64 2,…
215 ret <8 x double> %res0
243 …%res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 0, i64 0, i64 2,…
244 ret <8 x double> %res0
[all …]
Dvector-shuffle-combining-xop.ll20 …%res0 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a1, <2 x double> %a0, <2 x i64> <…
21 …%res1 = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %res0, <2 x double> undef, <2 x i6…
30 …%res0 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a1, <4 x double> %a0, <4 x i6…
31 …%res1 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %res0, <4 x double> undef, <4 …
40 …%res0 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 undef, i32 7, i32 …
41 …%res1 = shufflevector <4 x double> %res0, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 7, i…
50 …%res0 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a1, <4 x float> %a0, <4 x i32> <i32…
51 …%res1 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %res0, <4 x float> undef, <4 x i32> …
62 …%res0 = call <4 x float> @llvm.x86.xop.vpermil2ps(<4 x float> %a0, <4 x float> %a1, <4 x i32> <i32…
63 …%res1 = shufflevector <4 x float> %res0, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 7, i32…
[all …]
Dvector-shuffle-combining-avx512vbmi.ll25 …%res0 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> <i8 15, i8 14, i8 13, i8…
26 … 24, i8 7, i8 22, i8 5, i8 20, i8 3, i8 18, i8 1, i8 16>, <16 x i8> %res0, <16 x i8> %res0, i16 -1)
47 …%res0 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> <i8 15, i8 14, i8 13, i8…
48 … 24, i8 7, i8 22, i8 5, i8 20, i8 3, i8 18, i8 1, i8 16>, <16 x i8> %res0, <16 x i8> %res0, i16 %m)
57 …%res0 = call <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8> %x0, <16 x i8> <i8 15, i8…
58 …6 x i8> %res0, <16 x i8> <i8 0, i8 15, i8 1, i8 14, i8 2, i8 13, i8 3, i8 12, i8 4, i8 11, i8 5, i…
68 …%res0 = shufflevector <32 x i8> %x0, <32 x i8> %x1, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 …
69res0, <32 x i8> <i8 0, i8 32, i8 2, i8 30, i8 4, i8 28, i8 6, i8 26, i8 8, i8 28, i8 10, i8 26, i8…
79 …%res0 = shufflevector <64 x i8> %x0, <64 x i8> %x1, <64 x i32> <i32 0, i32 64, i32 1, i32 65, i32 …
80res0, <64 x i8> <i8 0, i8 32, i8 2, i8 30, i8 4, i8 28, i8 6, i8 26, i8 8, i8 28, i8 10, i8 26, i8…
[all …]
Dvector-shuffle-combining-avx512bwvl.ll12 …%res0 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 15, i16 14, i16 1…
13 …, i16 22, i16 5, i16 20, i16 3, i16 18, i16 1, i16 16>, <16 x i16> %res0, <16 x i16> %res0, i16 -1)
34 …%res0 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 15, i16 14, i16 1…
35 …, i16 22, i16 5, i16 20, i16 3, i16 18, i16 1, i16 16>, <16 x i16> %res0, <16 x i16> %res0, i16 %m)
45 …%res0 = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %x0, <16 x i16> <i16 15…
46res0, <16 x i16> <i16 0, i16 15, i16 1, i16 14, i16 2, i16 13, i16 3, i16 12, i16 4, i16 11, i16 5…
56 …%res0 = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %x0, <16 x i16> <i16 0,…
57 …16 23, i16 10, i16 25, i16 12, i16 27, i16 14, i16 29>, <16 x i16> %res0, <16 x i16> %res0, i16 -1)
66 …%res0 = call <16 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.256(<16 x i16> %a0, <16 x i16> <i16 20…
67 ret <16 x i16> %res0
[all …]
Dmerge-consecutive-loads-512.ll100 %res0 = insertelement <8 x double> undef, double %val0, i32 0
101 %res1 = insertelement <8 x double> %res0, double %val1, i32 1
121 %res0 = insertelement <8 x double> undef, double %val0, i32 0
122 %res1 = insertelement <8 x double> %res0, double %val1, i32 1
151 %res0 = insertelement <8 x double> undef, double %val0, i32 0
152 %res2 = insertelement <8 x double> %res0, double %val2, i32 2
199 %res0 = insertelement <8 x i64> undef, i64 %val0, i32 0
200 %res1 = insertelement <8 x i64> %res0, i64 %val1, i32 1
230 %res0 = insertelement <8 x i64> undef, i64 %val0, i32 0
231 %res2 = insertelement <8 x i64> %res0, i64 %val2, i32 2
[all …]
Dmerge-consecutive-loads-256.ll64 %res0 = insertelement <4 x double> undef, double %val0, i32 0
65 %res1 = insertelement <4 x double> %res0, double %val1, i32 1
84 %res0 = insertelement <4 x double> undef, double %val0, i32 0
85 %res1 = insertelement <4 x double> %res0, double 0.0, i32 1
104 %res0 = insertelement <4 x double> undef, double %val0, i32 0
105 %res1 = insertelement <4 x double> %res0, double %val1, i32 1
124 %res0 = insertelement <4 x double> zeroinitializer, double %val0, i32 0
125 %res1 = insertelement <4 x double> %res0, double %val1, i32 1
148 %res0 = insertelement <4 x double> undef, double %val0, i32 0
149 %res1 = insertelement <4 x double> %res0, double %val1, i32 1
[all …]
/external/arm-optimized-routines/math/
Dsincosf.h137 uint64_t n, res0, res1, res2; in reduce_large() local
142 res0 = xi * arr[0]; in reduce_large()
145 res0 = (res2 >> 32) | (res0 << 32); in reduce_large()
146 res0 += res1; in reduce_large()
148 n = (res0 + (1ULL << 61)) >> 62; in reduce_large()
149 res0 -= n << 62; in reduce_large()
150 double x = (int64_t)res0; in reduce_large()
/external/libaom/libaom/av1/common/arm/
Dwiener_convolve_neon.c79 int16x8_t res0, res1, res2, res3; in av1_wiener_convolve_add_src_neon() local
118 res0 = vreinterpretq_s16_u16(vaddl_u8(t0, t6)); in av1_wiener_convolve_add_src_neon()
122 res4 = wiener_convolve8_horiz_8x8(res0, res1, res2, res3, filter_x_tmp, in av1_wiener_convolve_add_src_neon()
125 res0 = vreinterpretq_s16_u16(vaddl_u8(t1, t7)); in av1_wiener_convolve_add_src_neon()
129 res5 = wiener_convolve8_horiz_8x8(res0, res1, res2, res3, filter_x_tmp, in av1_wiener_convolve_add_src_neon()
132 res0 = vreinterpretq_s16_u16(vaddl_u8(t2, t8)); in av1_wiener_convolve_add_src_neon()
136 res6 = wiener_convolve8_horiz_8x8(res0, res1, res2, res3, filter_x_tmp, in av1_wiener_convolve_add_src_neon()
139 res0 = vreinterpretq_s16_u16(vaddl_u8(t3, t9)); in av1_wiener_convolve_add_src_neon()
143 res7 = wiener_convolve8_horiz_8x8(res0, res1, res2, res3, filter_x_tmp, in av1_wiener_convolve_add_src_neon()
146 res0 = vreinterpretq_s16_u16(vaddl_u8(t4, t10)); in av1_wiener_convolve_add_src_neon()
[all …]
/external/rust/crates/aho-corasick/src/packed/teddy/
Druntime.rs632 let (res0, res1) = members2m128(chunk, self.mask1, self.mask2); in candidate()
633 let res0prev0 = _mm_alignr_epi8(res0, *prev0, 15); in candidate()
694 let (res0, res1) = members2m256(chunk, self.mask1, self.mask2); in candidate()
695 let res0prev0 = alignr256_15(res0, *prev0); in candidate()
697 *prev0 = res0; in candidate()
760 let (res0, res1) = members2m256(chunk, self.mask1, self.mask2); in candidate()
761 let res0prev0 = _mm256_alignr_epi8(res0, *prev0, 15); in candidate()
763 *prev0 = res0; in candidate()
827 let (res0, res1, res2) = in candidate()
829 let res0prev0 = _mm_alignr_epi8(res0, *prev0, 14); in candidate()
[all …]

123456