Home
last modified time | relevance | path

Searched refs:__u (Results 1 – 25 of 42) sorted by relevance

12

/external/clang/test/CodeGen/
Davx512vlbw-builtins.c15 __mmask32 test_mm256_mask_cmpeq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) { in test_mm256_mask_cmpeq_epi8_mask() argument
19 return (__mmask32)_mm256_mask_cmpeq_epi8_mask(__u, __a, __b); in test_mm256_mask_cmpeq_epi8_mask()
28 __mmask16 test_mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { in test_mm_mask_cmpeq_epi8_mask() argument
32 return (__mmask16)_mm_mask_cmpeq_epi8_mask(__u, __a, __b); in test_mm_mask_cmpeq_epi8_mask()
41 __mmask16 test_mm256_mask_cmpeq_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) { in test_mm256_mask_cmpeq_epi16_mask() argument
45 return (__mmask16)_mm256_mask_cmpeq_epi16_mask(__u, __a, __b); in test_mm256_mask_cmpeq_epi16_mask()
54 __mmask8 test_mm_mask_cmpeq_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) { in test_mm_mask_cmpeq_epi16_mask() argument
58 return (__mmask8)_mm_mask_cmpeq_epi16_mask(__u, __a, __b); in test_mm_mask_cmpeq_epi16_mask()
67 __mmask32 test_mm256_mask_cmpgt_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) { in test_mm256_mask_cmpgt_epi8_mask() argument
71 return (__mmask32)_mm256_mask_cmpgt_epi8_mask(__u, __a, __b); in test_mm256_mask_cmpgt_epi8_mask()
[all …]
Davx512bw-builtins.c15 __mmask64 test_mm512_mask_cmpeq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { in test_mm512_mask_cmpeq_epi8_mask() argument
19 return (__mmask64)_mm512_mask_cmpeq_epi8_mask(__u, __a, __b); in test_mm512_mask_cmpeq_epi8_mask()
28 __mmask32 test_mm512_mask_cmpeq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) { in test_mm512_mask_cmpeq_epi16_mask() argument
32 return (__mmask32)_mm512_mask_cmpeq_epi16_mask(__u, __a, __b); in test_mm512_mask_cmpeq_epi16_mask()
41 __mmask64 test_mm512_mask_cmpgt_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { in test_mm512_mask_cmpgt_epi8_mask() argument
45 return (__mmask64)_mm512_mask_cmpgt_epi8_mask(__u, __a, __b); in test_mm512_mask_cmpgt_epi8_mask()
54 __mmask32 test_mm512_mask_cmpgt_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) { in test_mm512_mask_cmpgt_epi16_mask() argument
58 return (__mmask32)_mm512_mask_cmpgt_epi16_mask(__u, __a, __b); in test_mm512_mask_cmpgt_epi16_mask()
67 __mmask64 test_mm512_mask_cmpeq_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) { in test_mm512_mask_cmpeq_epu8_mask() argument
71 return (__mmask64)_mm512_mask_cmpeq_epu8_mask(__u, __a, __b); in test_mm512_mask_cmpeq_epu8_mask()
[all …]
Dunion.c19 unsigned int __u; in f2() member
20 }__u; in f2() local
21 return (int)(__u.__u >> 31); in f2()
Davx512vl-builtins.c15 __mmask8 test_mm_mask_cmpeq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) { in test_mm_mask_cmpeq_epu32_mask() argument
19 return (__mmask8)_mm_mask_cmpeq_epu32_mask(__u, __a, __b); in test_mm_mask_cmpeq_epu32_mask()
29 __mmask8 test_mm_mask_cmpeq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) { in test_mm_mask_cmpeq_epu64_mask() argument
33 return (__mmask8)_mm_mask_cmpeq_epu64_mask(__u, __a, __b); in test_mm_mask_cmpeq_epu64_mask()
42 __mmask8 test_mm_mask_cmpge_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) { in test_mm_mask_cmpge_epi32_mask() argument
46 return (__mmask8)_mm_mask_cmpge_epi32_mask(__u, __a, __b); in test_mm_mask_cmpge_epi32_mask()
55 __mmask8 test_mm_mask_cmpge_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) { in test_mm_mask_cmpge_epi64_mask() argument
59 return (__mmask8)_mm_mask_cmpge_epi64_mask(__u, __a, __b); in test_mm_mask_cmpge_epi64_mask()
68 __mmask8 test_mm256_mask_cmpge_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) { in test_mm256_mask_cmpge_epi32_mask() argument
72 return (__mmask8)_mm256_mask_cmpge_epi32_mask(__u, __a, __b); in test_mm256_mask_cmpge_epi32_mask()
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/
Dmovmsk.ll12 %__u.i = alloca %0, align 8
14 %1 = bitcast %0* %__u.i to i8*
16 %__f.i = getelementptr inbounds %0* %__u.i, i64 0, i32 0
30 %__u.i = alloca %0, align 8
33 %1 = bitcast %0* %__u.i to i8*
35 %__f.i = getelementptr inbounds %0* %__u.i, i64 0, i32 0
49 %__u.i = alloca %union.anon, align 4
51 %1 = bitcast %union.anon* %__u.i to i8*
53 %__f.i = getelementptr inbounds %union.anon* %__u.i, i64 0, i32 0
66 %__u.i = alloca %union.anon, align 4
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
Dmovmsk.ll17 %__u.i = alloca %0, align 8
19 %1 = bitcast %0* %__u.i to i8*
21 %__f.i = getelementptr inbounds %0, %0* %__u.i, i64 0, i32 0
40 %__u.i = alloca %0, align 8
43 %1 = bitcast %0* %__u.i to i8*
45 %__f.i = getelementptr inbounds %0, %0* %__u.i, i64 0, i32 0
63 %__u.i = alloca %union.anon, align 4
65 %1 = bitcast %union.anon* %__u.i to i8*
67 %__f.i = getelementptr inbounds %union.anon, %union.anon* %__u.i, i64 0, i32 0
85 %__u.i = alloca %union.anon, align 4
[all …]
Davx512vl-vec-masked-cmp.ll54 define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i6…
75 %3 = bitcast i16 %__u to <16 x i1>
82 define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 …
104 %3 = bitcast i16 %__u to <16 x i1>
163 define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i6…
184 %3 = bitcast i16 %__u to <16 x i1>
191 define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 …
213 %3 = bitcast i16 %__u to <16 x i1>
284 define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i6…
315 %3 = bitcast i32 %__u to <32 x i1>
[all …]
/external/llvm/test/CodeGen/X86/
Dmovmsk.ll17 %__u.i = alloca %0, align 8
19 %1 = bitcast %0* %__u.i to i8*
21 %__f.i = getelementptr inbounds %0, %0* %__u.i, i64 0, i32 0
40 %__u.i = alloca %0, align 8
43 %1 = bitcast %0* %__u.i to i8*
45 %__f.i = getelementptr inbounds %0, %0* %__u.i, i64 0, i32 0
63 %__u.i = alloca %union.anon, align 4
65 %1 = bitcast %union.anon* %__u.i to i8*
67 %__f.i = getelementptr inbounds %union.anon, %union.anon* %__u.i, i64 0, i32 0
85 %__u.i = alloca %union.anon, align 4
[all …]
/external/clang/lib/Headers/
Davx512vlbwintrin.h48 _mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { in _mm_mask_cmpeq_epi8_mask() argument
50 __u); in _mm_mask_cmpeq_epi8_mask()
60 _mm_mask_cmpeq_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) { in _mm_mask_cmpeq_epu8_mask() argument
62 __u); in _mm_mask_cmpeq_epu8_mask()
72 _mm256_mask_cmpeq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) { in _mm256_mask_cmpeq_epi8_mask() argument
74 __u); in _mm256_mask_cmpeq_epi8_mask()
84 _mm256_mask_cmpeq_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) { in _mm256_mask_cmpeq_epu8_mask() argument
86 __u); in _mm256_mask_cmpeq_epu8_mask()
96 _mm_mask_cmpeq_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) { in _mm_mask_cmpeq_epi16_mask() argument
98 __u); in _mm_mask_cmpeq_epi16_mask()
[all …]
Demmintrin.h484 double __u; in _mm_load1_pd() member
486 double __u = ((struct __mm_load1_pd_struct*)__dp)->__u; in _mm_load1_pd() local
487 return (__m128d){ __u, __u }; in _mm_load1_pd()
495 __m128d __u = *(__m128d*)__dp; in _mm_loadr_pd() local
496 return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0); in _mm_loadr_pd()
514 long long __u = ((struct __loadu_si64*)__a)->__v; in _mm_loadu_si64() local
515 return (__m128i){__u, 0L}; in _mm_loadu_si64()
522 double __u; in _mm_load_sd() member
524 double __u = ((struct __mm_load_sd_struct*)__dp)->__u; in _mm_load_sd() local
525 return (__m128d){ __u, 0 }; in _mm_load_sd()
[all …]
Davx512bwintrin.h66 _mm512_mask_cmpeq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { in _mm512_mask_cmpeq_epi8_mask() argument
68 __u); in _mm512_mask_cmpeq_epi8_mask()
78 _mm512_mask_cmpeq_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) { in _mm512_mask_cmpeq_epu8_mask() argument
80 __u); in _mm512_mask_cmpeq_epu8_mask()
90 _mm512_mask_cmpeq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) { in _mm512_mask_cmpeq_epi16_mask() argument
92 __u); in _mm512_mask_cmpeq_epi16_mask()
102 _mm512_mask_cmpeq_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) { in _mm512_mask_cmpeq_epu16_mask() argument
104 __u); in _mm512_mask_cmpeq_epu16_mask()
114 _mm512_mask_cmpge_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { in _mm512_mask_cmpge_epi8_mask() argument
116 __u); in _mm512_mask_cmpge_epi8_mask()
[all …]
Dxmmintrin.h1581 __mm_loadh_pi_v2f32 __u; in _mm_loadh_pi() member
1583 __mm_loadh_pi_v2f32 __b = ((struct __mm_loadh_pi_struct*)__p)->__u; in _mm_loadh_pi()
1608 __mm_loadl_pi_v2f32 __u; in _mm_loadl_pi() member
1610 __mm_loadl_pi_v2f32 __b = ((struct __mm_loadl_pi_struct*)__p)->__u; in _mm_loadl_pi()
1634 float __u; in _mm_load_ss() member
1636 float __u = ((struct __mm_load_ss_struct*)__p)->__u; in _mm_load_ss() local
1637 return (__m128){ __u, 0, 0, 0 }; in _mm_load_ss()
1656 float __u; in _mm_load1_ps() member
1658 float __u = ((struct __mm_load1_ps_struct*)__p)->__u; in _mm_load1_ps() local
1659 return (__m128){ __u, __u, __u, __u }; in _mm_load1_ps()
[all …]
/external/libcxx/include/experimental/
Dpropagate_const152 static _LIBCPP_CONSTEXPR element_type* __get_pointer(_Up* __u)
154 return __u;
158 static _LIBCPP_CONSTEXPR element_type* __get_pointer(_Up& __u)
160 return __get_pointer(__u.get());
164 static _LIBCPP_CONSTEXPR const element_type* __get_pointer(const _Up* __u)
166 return __u;
170 static _LIBCPP_CONSTEXPR const element_type* __get_pointer(const _Up& __u)
172 return __get_pointer(__u.get());
215 explicit _LIBCPP_CONSTEXPR propagate_const(_Up&& __u)
216 : __t_(std::forward<_Up>(__u))
[all …]
/external/libcxx/include/
D__hash_table1007 __hash_table(const __hash_table& __u);
1008 __hash_table(const __hash_table& __u, const allocator_type& __a);
1010 __hash_table(__hash_table&& __u)
1017 __hash_table(__hash_table&& __u, const allocator_type& __a);
1021 __hash_table& operator=(const __hash_table& __u);
1024 __hash_table& operator=(__hash_table&& __u)
1262 void swap(__hash_table& __u)
1368 void __copy_assign_alloc(const __hash_table& __u)
1369 {__copy_assign_alloc(__u, integral_constant<bool,
1371 void __copy_assign_alloc(const __hash_table& __u, true_type);
[all …]
Dunordered_set465 unordered_set(const unordered_set& __u);
466 unordered_set(const unordered_set& __u, const allocator_type& __a);
469 unordered_set(unordered_set&& __u)
471 unordered_set(unordered_set&& __u, const allocator_type& __a);
492 unordered_set& operator=(const unordered_set& __u)
494 __table_ = __u.__table_;
499 unordered_set& operator=(unordered_set&& __u)
663 void swap(unordered_set& __u)
665 {__table_.swap(__u.__table_);}
808 const unordered_set& __u)
[all …]
D__mutex_base163 unique_lock(unique_lock&& __u) _NOEXCEPT
164 : __m_(__u.__m_), __owns_(__u.__owns_)
165 {__u.__m_ = nullptr; __u.__owns_ = false;}
167 unique_lock& operator=(unique_lock&& __u) _NOEXCEPT
171 __m_ = __u.__m_;
172 __owns_ = __u.__owns_;
173 __u.__m_ = nullptr;
174 __u.__owns_ = false;
191 void swap(unique_lock& __u) _NOEXCEPT
193 _VSTD::swap(__m_, __u.__m_);
[all …]
Dunordered_map929 unordered_map(const unordered_map& __u);
930 unordered_map(const unordered_map& __u, const allocator_type& __a);
933 unordered_map(unordered_map&& __u)
935 unordered_map(unordered_map&& __u, const allocator_type& __a);
969 unordered_map& operator=(const unordered_map& __u)
972 __table_ = __u.__table_;
974 if (this != &__u) {
976 __table_.hash_function() = __u.__table_.hash_function();
977 __table_.key_eq() = __u.__table_.key_eq();
978 __table_.max_load_factor() = __u.__table_.max_load_factor();
[all …]
Dshared_mutex377 shared_lock(shared_lock&& __u) _NOEXCEPT
378 : __m_(__u.__m_),
379 __owns_(__u.__owns_)
381 __u.__m_ = nullptr;
382 __u.__owns_ = false;
386 shared_lock& operator=(shared_lock&& __u) _NOEXCEPT
392 __m_ = __u.__m_;
393 __owns_ = __u.__owns_;
394 __u.__m_ = nullptr;
395 __u.__owns_ = false;
[all …]
Dutility1055 static _Size __hash_len_16(_Size __u, _Size __v)
1059 _Size __a = (__u ^ __v) * __mul;
1220 } __u;
1221 __u.__a = 0;
1222 __u.__t = __v;
1223 return __u.__a;
1238 } __u;
1239 __u.__t = __v;
1240 return __u.__a;
1259 } __u;
[all …]
Dfunctional525 auto operator()(_T1&& __t, _T2&& __u) const
526 _NOEXCEPT_(noexcept(_VSTD::forward<_T1>(__t) + _VSTD::forward<_T2>(__u)))
527 -> decltype (_VSTD::forward<_T1>(__t) + _VSTD::forward<_T2>(__u))
528 { return _VSTD::forward<_T1>(__t) + _VSTD::forward<_T2>(__u); }
552 auto operator()(_T1&& __t, _T2&& __u) const
553 _NOEXCEPT_(noexcept(_VSTD::forward<_T1>(__t) - _VSTD::forward<_T2>(__u)))
554 -> decltype (_VSTD::forward<_T1>(__t) - _VSTD::forward<_T2>(__u))
555 { return _VSTD::forward<_T1>(__t) - _VSTD::forward<_T2>(__u); }
579 auto operator()(_T1&& __t, _T2&& __u) const
580 _NOEXCEPT_(noexcept(_VSTD::forward<_T1>(__t) * _VSTD::forward<_T2>(__u)))
[all …]
Dnumeric231 _Tp __init, _BinaryOp __b, _UnaryOp __u)
234 __init = __b(__init, __u(*__first));
366 _BinaryOp __b, _UnaryOp __u)
373 __init = __b(__init, __u(*__first));
384 _OutputIterator __result, _BinaryOp __b, _UnaryOp __u, _Tp __init)
387 __init = __b(__init, __u(*__first));
396 _OutputIterator __result, _BinaryOp __b, _UnaryOp __u)
399 typename std::iterator_traits<_InputIterator>::value_type __init = __u(*__first);
402 return _VSTD::transform_inclusive_scan(__first, __last, __result, __b, __u, __init);
/external/u-boot/include/linux/
Dcompiler.h279 union { typeof(x) __val; char __c[1]; } __u; \
281 __read_once_size(&(x), __u.__c, sizeof(x)); \
283 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
284 __u.__val; \
296 union { typeof(x) __val; char __c[1]; } __u = \
298 __write_once_size(&(x), __u.__c, sizeof(x)); \
299 __u.__val; \
/external/libcxx/include/ext/
Dhash_set257 hash_set(const hash_set& __u);
299 void swap(hash_set& __u) {__table_.swap(__u.__table_);}
380 const hash_set& __u)
381 : __table_(__u.__table_)
383 __table_.rehash(__u.bucket_count());
384 insert(__u.begin(), __u.end());
479 hash_multiset(const hash_multiset& __u);
520 void swap(hash_multiset& __u) {__table_.swap(__u.__table_);}
602 const hash_multiset& __u)
603 : __table_(__u.__table_)
[all …]
/external/swiftshader/third_party/LLVM/test/Transforms/DeadStoreElimination/
DPartialStore.ll30 %__u = alloca { [3 x i32] }
31 %tmp.1 = bitcast { [3 x i32] }* %__u to double*
33 %tmp.4 = getelementptr { [3 x i32] }* %__u, i32 0, i32 0, i32 1
/external/llvm/test/Transforms/DeadStoreElimination/
DPartialStore.ll30 %__u = alloca { [3 x i32] }
31 %tmp.1 = bitcast { [3 x i32] }* %__u to double*
33 %tmp.4 = getelementptr { [3 x i32] }, { [3 x i32] }* %__u, i32 0, i32 0, i32 1

12