Home
last modified time | relevance | path

Searched refs:_mm256_and_si256 (Results 1 – 25 of 74) sorted by relevance

123

/external/rust/crates/aho-corasick/src/packed/teddy/
Druntime.rs696 let res = _mm256_and_si256(res0prev0, res1); in candidate()
762 let res = _mm256_and_si256(res0prev0, res1); in candidate()
902 _mm256_and_si256(_mm256_and_si256(res0prev0, res1prev1), res2); in candidate()
975 _mm256_and_si256(_mm256_and_si256(res0prev0, res1prev1), res2); in candidate()
1076 let hlo = _mm256_and_si256(chunk, lomask); in members1m256()
1077 let hhi = _mm256_and_si256(_mm256_srli_epi16(chunk, 4), lomask); in members1m256()
1078 _mm256_and_si256( in members1m256()
1127 let hlo = _mm256_and_si256(chunk, lomask); in members2m256()
1128 let hhi = _mm256_and_si256(_mm256_srli_epi16(chunk, 4), lomask); in members2m256()
1129 let res0 = _mm256_and_si256( in members2m256()
[all …]
/external/XNNPACK/src/qs8-vaddc/gen/
Dminmax-avx2-mul32-ld64-x32.c48 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32()
49 …const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32()
50 …const __m256i vremGHIJKLMN = _mm256_add_epi32(_mm256_and_si256(vaccGHIJKLMN, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32()
51 …const __m256i vremOPQRSTUV = _mm256_add_epi32(_mm256_and_si256(vaccOPQRSTUV, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32()
78 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32()
Dminmax-avx2-mul32-ld64-x24.c46 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24()
47 …const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24()
48 …const __m256i vremGHIJKLMN = _mm256_add_epi32(_mm256_and_si256(vaccGHIJKLMN, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24()
74 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24()
Dminmax-avx2-mul32-ld64-x16.c44 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16()
45 …const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16()
66 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16()
Dminmax-avx2-mul32-ld64-x8.c42 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8()
61 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8()
/external/XNNPACK/src/f32-velu/gen/
Dvelu-avx2-rr1-lut16-p3-gather-x72.c74 const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72()
76 const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72()
78 const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72()
80 const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72()
82 const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72()
84 const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72()
86 const __m256i vidx6 = _mm256_and_si256(_mm256_castps_si256(vn6), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72()
88 const __m256i vidx7 = _mm256_and_si256(_mm256_castps_si256(vn7), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72()
90 const __m256i vidx8 = _mm256_and_si256(_mm256_castps_si256(vn8), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72()
226 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72()
[all …]
Dvelu-avx2-rr1-lut16-p3-gather-x80.c77 const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80()
79 const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80()
81 const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80()
83 const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80()
85 const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80()
87 const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80()
89 const __m256i vidx6 = _mm256_and_si256(_mm256_castps_si256(vn6), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80()
91 const __m256i vidx7 = _mm256_and_si256(_mm256_castps_si256(vn7), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80()
93 const __m256i vidx8 = _mm256_and_si256(_mm256_castps_si256(vn8), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80()
95 const __m256i vidx9 = _mm256_and_si256(_mm256_castps_si256(vn9), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80()
[all …]
Dvelu-avx2-rr1-lut16-p3-gather-x64.c71 const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64()
73 const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64()
75 const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64()
77 const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64()
79 const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64()
81 const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64()
83 const __m256i vidx6 = _mm256_and_si256(_mm256_castps_si256(vn6), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64()
85 const __m256i vidx7 = _mm256_and_si256(_mm256_castps_si256(vn7), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64()
208 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64()
241 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64()
Dvelu-avx2-rr1-lut16-p3-gather-x48.c65 const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48()
67 const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48()
69 const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48()
71 const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48()
73 const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48()
75 const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48()
172 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48()
205 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48()
Dvelu-avx2-rr1-lut16-p3-gather-x56.c68 const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56()
70 const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56()
72 const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56()
74 const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56()
76 const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56()
78 const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56()
80 const __m256i vidx6 = _mm256_and_si256(_mm256_castps_si256(vn6), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56()
190 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56()
223 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56()
Dvelu-avx2-rr1-lut16-p3-gather-x40.c62 const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40()
64 const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40()
66 const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40()
68 const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40()
70 const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40()
154 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40()
187 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40()
Dvelu-avx2-rr1-lut16-p3-gather-x32.c59 const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32()
61 const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32()
63 const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32()
65 const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32()
136 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32()
169 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32()
Dvelu-avx2-rr1-lut16-p3-gather-x24.c56 const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24()
58 const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24()
60 const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24()
118 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24()
151 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24()
Dvelu-avx2-rr1-lut16-p3-gather-x16.c53 const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16()
55 const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16()
100 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16()
133 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16()
Dvelu-avx2-rr1-lut16-p3-gather-x8.c49 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8()
82 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8()
/external/XNNPACK/src/qs8-vadd/gen/
Dminmax-avx2-mul32-ld64-x32.c57 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32()
58 …const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32()
59 …const __m256i vremGHIJKLMN = _mm256_add_epi32(_mm256_and_si256(vaccGHIJKLMN, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32()
60 …const __m256i vremOPQRSTUV = _mm256_add_epi32(_mm256_and_si256(vaccOPQRSTUV, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32()
91 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32()
Dminmax-avx2-mul32-ld64-x24.c53 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24()
54 …const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24()
55 …const __m256i vremGHIJKLMN = _mm256_add_epi32(_mm256_and_si256(vaccGHIJKLMN, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24()
85 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24()
Dminmax-avx2-mul32-ld64-x16.c49 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16()
50 …const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16()
75 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16()
Dminmax-avx2-mul32-ld64-x8.c45 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8()
67 …const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm… in xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8()
/external/libaom/libaom/aom_dsp/x86/
Dhighbd_quantize_intrin_avx2.c59 prod_lo = _mm256_and_si256(prod_lo, mask); in mm256_mul_shift_epi32()
86 q = _mm256_and_si256(q, flag2); in quantize()
87 dq = _mm256_and_si256(dq, flag2); in quantize()
103 cur_eob = _mm256_and_si256(cur_eob, nz); in quantize()
Dhighbd_adaptive_quantize_avx2.c41 temp_mask = _mm256_and_si256(*cmp_mask, iscan); in highbd_update_mask1_avx2()
176 qcoeff0 = _mm256_and_si256(qcoeff0, temp0); in aom_highbd_quantize_b_adaptive_avx2()
177 qcoeff1 = _mm256_and_si256(qcoeff1, temp1); in aom_highbd_quantize_b_adaptive_avx2()
209 qcoeff0 = _mm256_and_si256(qcoeff0, temp0); in aom_highbd_quantize_b_adaptive_avx2()
210 qcoeff1 = _mm256_and_si256(qcoeff1, temp1); in aom_highbd_quantize_b_adaptive_avx2()
358 qcoeff0 = _mm256_and_si256(qcoeff0, temp0); in aom_highbd_quantize_b_32x32_adaptive_avx2()
359 qcoeff1 = _mm256_and_si256(qcoeff1, temp1); in aom_highbd_quantize_b_32x32_adaptive_avx2()
393 qcoeff0 = _mm256_and_si256(qcoeff0, temp0); in aom_highbd_quantize_b_32x32_adaptive_avx2()
394 qcoeff1 = _mm256_and_si256(qcoeff1, temp1); in aom_highbd_quantize_b_32x32_adaptive_avx2()
Dadaptive_quantize_avx2.c51 temp_mask = _mm256_and_si256(*cmp_mask, iscan); in update_mask1_avx2()
158 qcoeff = _mm256_and_si256(qcoeff, temp0); in aom_quantize_b_adaptive_avx2()
183 qcoeff = _mm256_and_si256(qcoeff, temp0); in aom_quantize_b_adaptive_avx2()
/external/XNNPACK/src/f32-velu/
Davx2-rr1-lut16-p3-gather.c.in55 const __m256i vidx${N} = _mm256_and_si256(_mm256_castps_si256(vn${N}), vindex_mask);
96 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
129 const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
/external/gemmlowp/fixedpoint/
Dfixedpoint_avx.h54 return _mm256_and_si256(a, b);
59 return to_int16x16_m256i(_mm256_and_si256(a.v, b.v));
166 _mm256_or_si256(_mm256_and_si256(if_mask.v, then_val.v),
/external/libaom/libaom/aom_dsp/simd/
Dv256_intrinsics_x86.h493 SIMD_INLINE v256 v256_and(v256 a, v256 b) { return _mm256_and_si256(a, b); } in v256_and()
529 _mm256_and_si256(_mm256_xor_si256(a, b), v256_dup_8(1))); in v256_rdavg_u8()
535 _mm256_and_si256(_mm256_xor_si256(a, b), v256_dup_16(1))); in v256_rdavg_u16()
599 return _mm256_and_si256(_mm256_set1_epi8((uint8_t)(0xff << c)), in v256_shl_8()
604 return _mm256_and_si256(_mm256_set1_epi8((char)(0xff >> c)), in v256_shr_u8()
682 _mm256_and_si256(_mm256_set1_epi8((uint8_t)(0xff << (c))), \
685 _mm256_and_si256(_mm256_set1_epi8(0xff >> (c)), _mm256_srli_epi16(a, c))

123