Home
last modified time | relevance | path

Searched refs:__b (Results 1 – 25 of 97) sorted by relevance

1234

/external/clang/lib/Headers/
Daltivec.h40 vector signed char __b,
44 vector unsigned char __b,
48 vector bool char __b,
52 vector signed short __b,
56 vector unsigned short __b,
60 vector bool short __b,
63 static vector pixel __ATTRS_o_ai vec_perm(vector pixel __a, vector pixel __b,
67 vector signed int __b,
71 vector unsigned int __b,
75 vector bool int __b,
[all …]
Dvecintrin.h361 vec_perm(vector signed char __a, vector signed char __b, in vec_perm() argument
364 (vector unsigned char)__a, (vector unsigned char)__b, __c); in vec_perm()
368 vec_perm(vector unsigned char __a, vector unsigned char __b, in vec_perm() argument
371 (vector unsigned char)__a, (vector unsigned char)__b, __c); in vec_perm()
375 vec_perm(vector bool char __a, vector bool char __b, in vec_perm() argument
378 (vector unsigned char)__a, (vector unsigned char)__b, __c); in vec_perm()
382 vec_perm(vector signed short __a, vector signed short __b, in vec_perm() argument
385 (vector unsigned char)__a, (vector unsigned char)__b, __c); in vec_perm()
389 vec_perm(vector unsigned short __a, vector unsigned short __b, in vec_perm() argument
392 (vector unsigned char)__a, (vector unsigned char)__b, __c); in vec_perm()
[all …]
Dxmmintrin.h43 _mm_add_ss(__m128 __a, __m128 __b) in _mm_add_ss() argument
45 __a[0] += __b[0]; in _mm_add_ss()
50 _mm_add_ps(__m128 __a, __m128 __b) in _mm_add_ps() argument
52 return __a + __b; in _mm_add_ps()
56 _mm_sub_ss(__m128 __a, __m128 __b) in _mm_sub_ss() argument
58 __a[0] -= __b[0]; in _mm_sub_ss()
63 _mm_sub_ps(__m128 __a, __m128 __b) in _mm_sub_ps() argument
65 return __a - __b; in _mm_sub_ps()
69 _mm_mul_ss(__m128 __a, __m128 __b) in _mm_mul_ss() argument
71 __a[0] *= __b[0]; in _mm_mul_ss()
[all …]
Demmintrin.h48 _mm_add_sd(__m128d __a, __m128d __b) in _mm_add_sd() argument
50 __a[0] += __b[0]; in _mm_add_sd()
55 _mm_add_pd(__m128d __a, __m128d __b) in _mm_add_pd() argument
57 return __a + __b; in _mm_add_pd()
61 _mm_sub_sd(__m128d __a, __m128d __b) in _mm_sub_sd() argument
63 __a[0] -= __b[0]; in _mm_sub_sd()
68 _mm_sub_pd(__m128d __a, __m128d __b) in _mm_sub_pd() argument
70 return __a - __b; in _mm_sub_pd()
74 _mm_mul_sd(__m128d __a, __m128d __b) in _mm_mul_sd() argument
76 __a[0] *= __b[0]; in _mm_mul_sd()
[all …]
Dtmmintrin.h76 _mm_hadd_epi16(__m128i __a, __m128i __b) in _mm_hadd_epi16() argument
78 return (__m128i)__builtin_ia32_phaddw128((__v8hi)__a, (__v8hi)__b); in _mm_hadd_epi16()
82 _mm_hadd_epi32(__m128i __a, __m128i __b) in _mm_hadd_epi32() argument
84 return (__m128i)__builtin_ia32_phaddd128((__v4si)__a, (__v4si)__b); in _mm_hadd_epi32()
88 _mm_hadd_pi16(__m64 __a, __m64 __b) in _mm_hadd_pi16() argument
90 return (__m64)__builtin_ia32_phaddw((__v4hi)__a, (__v4hi)__b); in _mm_hadd_pi16()
94 _mm_hadd_pi32(__m64 __a, __m64 __b) in _mm_hadd_pi32() argument
96 return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b); in _mm_hadd_pi32()
100 _mm_hadds_epi16(__m128i __a, __m128i __b) in _mm_hadds_epi16() argument
102 return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b); in _mm_hadds_epi16()
[all …]
Davxintrin.h51 _mm256_add_pd(__m256d __a, __m256d __b) in _mm256_add_pd() argument
53 return __a+__b; in _mm256_add_pd()
57 _mm256_add_ps(__m256 __a, __m256 __b) in _mm256_add_ps() argument
59 return __a+__b; in _mm256_add_ps()
63 _mm256_sub_pd(__m256d __a, __m256d __b) in _mm256_sub_pd() argument
65 return __a-__b; in _mm256_sub_pd()
69 _mm256_sub_ps(__m256 __a, __m256 __b) in _mm256_sub_ps() argument
71 return __a-__b; in _mm256_sub_ps()
75 _mm256_addsub_pd(__m256d __a, __m256d __b) in _mm256_addsub_pd() argument
77 return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b); in _mm256_addsub_pd()
[all …]
Davx2intrin.h56 _mm256_packs_epi16(__m256i __a, __m256i __b) in _mm256_packs_epi16() argument
58 return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b); in _mm256_packs_epi16()
62 _mm256_packs_epi32(__m256i __a, __m256i __b) in _mm256_packs_epi32() argument
64 return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b); in _mm256_packs_epi32()
68 _mm256_packus_epi16(__m256i __a, __m256i __b) in _mm256_packus_epi16() argument
70 return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b); in _mm256_packus_epi16()
80 _mm256_add_epi8(__m256i __a, __m256i __b) in _mm256_add_epi8() argument
82 return (__m256i)((__v32qi)__a + (__v32qi)__b); in _mm256_add_epi8()
86 _mm256_add_epi16(__m256i __a, __m256i __b) in _mm256_add_epi16() argument
88 return (__m256i)((__v16hi)__a + (__v16hi)__b); in _mm256_add_epi16()
[all …]
Davx512vlbwintrin.h37 _mm_cmpeq_epi8_mask(__m128i __a, __m128i __b) { in _mm_cmpeq_epi8_mask() argument
38 return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b, in _mm_cmpeq_epi8_mask()
43 _mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { in _mm_mask_cmpeq_epi8_mask() argument
44 return (__mmask16)__builtin_ia32_pcmpeqb128_mask((__v16qi)__a, (__v16qi)__b, in _mm_mask_cmpeq_epi8_mask()
49 _mm_cmpeq_epu8_mask(__m128i __a, __m128i __b) { in _mm_cmpeq_epu8_mask() argument
50 return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 0, in _mm_cmpeq_epu8_mask()
55 _mm_mask_cmpeq_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) { in _mm_mask_cmpeq_epu8_mask() argument
56 return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 0, in _mm_mask_cmpeq_epu8_mask()
61 _mm256_cmpeq_epi8_mask(__m256i __a, __m256i __b) { in _mm256_cmpeq_epi8_mask() argument
62 return (__mmask32)__builtin_ia32_pcmpeqb256_mask((__v32qi)__a, (__v32qi)__b, in _mm256_cmpeq_epi8_mask()
[all …]
Dpmmintrin.h39 _mm_addsub_ps(__m128 __a, __m128 __b) in _mm_addsub_ps() argument
41 return __builtin_ia32_addsubps(__a, __b); in _mm_addsub_ps()
45 _mm_hadd_ps(__m128 __a, __m128 __b) in _mm_hadd_ps() argument
47 return __builtin_ia32_haddps(__a, __b); in _mm_hadd_ps()
51 _mm_hsub_ps(__m128 __a, __m128 __b) in _mm_hsub_ps() argument
53 return __builtin_ia32_hsubps(__a, __b); in _mm_hsub_ps()
69 _mm_addsub_pd(__m128d __a, __m128d __b) in _mm_addsub_pd() argument
71 return __builtin_ia32_addsubpd(__a, __b); in _mm_addsub_pd()
75 _mm_hadd_pd(__m128d __a, __m128d __b) in _mm_hadd_pd() argument
77 return __builtin_ia32_haddpd(__a, __b); in _mm_hadd_pd()
[all …]
Davx512bwintrin.h62 _mm512_cmpeq_epi8_mask(__m512i __a, __m512i __b) { in _mm512_cmpeq_epi8_mask() argument
63 return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__a, (__v64qi)__b, in _mm512_cmpeq_epi8_mask()
68 _mm512_mask_cmpeq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { in _mm512_mask_cmpeq_epi8_mask() argument
69 return (__mmask64)__builtin_ia32_pcmpeqb512_mask((__v64qi)__a, (__v64qi)__b, in _mm512_mask_cmpeq_epi8_mask()
74 _mm512_cmpeq_epu8_mask(__m512i __a, __m512i __b) { in _mm512_cmpeq_epu8_mask() argument
75 return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 0, in _mm512_cmpeq_epu8_mask()
80 _mm512_mask_cmpeq_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) { in _mm512_mask_cmpeq_epu8_mask() argument
81 return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 0, in _mm512_mask_cmpeq_epu8_mask()
86 _mm512_cmpeq_epi16_mask(__m512i __a, __m512i __b) { in _mm512_cmpeq_epi16_mask() argument
87 return (__mmask32)__builtin_ia32_pcmpeqw512_mask((__v32hi)__a, (__v32hi)__b, in _mm512_cmpeq_epi16_mask()
[all …]
/external/clang/test/CodeGen/
Dsse-builtins.c169 __m128 test_mm_cmpeq_ss(__m128 __a, __m128 __b) { in test_mm_cmpeq_ss() argument
172 return _mm_cmpeq_ss(__a, __b); in test_mm_cmpeq_ss()
175 __m128 test_mm_cmplt_ss(__m128 __a, __m128 __b) { in test_mm_cmplt_ss() argument
178 return _mm_cmplt_ss(__a, __b); in test_mm_cmplt_ss()
181 __m128 test_mm_cmple_ss(__m128 __a, __m128 __b) { in test_mm_cmple_ss() argument
184 return _mm_cmple_ss(__a, __b); in test_mm_cmple_ss()
187 __m128 test_mm_cmpunord_ss(__m128 __a, __m128 __b) { in test_mm_cmpunord_ss() argument
190 return _mm_cmpunord_ss(__a, __b); in test_mm_cmpunord_ss()
193 __m128 test_mm_cmpneq_ss(__m128 __a, __m128 __b) { in test_mm_cmpneq_ss() argument
196 return _mm_cmpneq_ss(__a, __b); in test_mm_cmpneq_ss()
[all …]
Davx512vlbw-builtins.c9 __mmask32 test_mm256_cmpeq_epi8_mask(__m256i __a, __m256i __b) { in test_mm256_cmpeq_epi8_mask() argument
12 return (__mmask32)_mm256_cmpeq_epi8_mask(__a, __b); in test_mm256_cmpeq_epi8_mask()
15 __mmask32 test_mm256_mask_cmpeq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) { in test_mm256_mask_cmpeq_epi8_mask() argument
18 return (__mmask32)_mm256_mask_cmpeq_epi8_mask(__u, __a, __b); in test_mm256_mask_cmpeq_epi8_mask()
21 __mmask16 test_mm_cmpeq_epi8_mask(__m128i __a, __m128i __b) { in test_mm_cmpeq_epi8_mask() argument
24 return (__mmask16)_mm_cmpeq_epi8_mask(__a, __b); in test_mm_cmpeq_epi8_mask()
27 __mmask16 test_mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { in test_mm_mask_cmpeq_epi8_mask() argument
30 return (__mmask16)_mm_mask_cmpeq_epi8_mask(__u, __a, __b); in test_mm_mask_cmpeq_epi8_mask()
33 __mmask16 test_mm256_cmpeq_epi16_mask(__m256i __a, __m256i __b) { in test_mm256_cmpeq_epi16_mask() argument
36 return (__mmask16)_mm256_cmpeq_epi16_mask(__a, __b); in test_mm256_cmpeq_epi16_mask()
[all …]
Davx512bw-builtins.c9 __mmask64 test_mm512_cmpeq_epi8_mask(__m512i __a, __m512i __b) { in test_mm512_cmpeq_epi8_mask() argument
12 return (__mmask64)_mm512_cmpeq_epi8_mask(__a, __b); in test_mm512_cmpeq_epi8_mask()
15 __mmask64 test_mm512_mask_cmpeq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { in test_mm512_mask_cmpeq_epi8_mask() argument
18 return (__mmask64)_mm512_mask_cmpeq_epi8_mask(__u, __a, __b); in test_mm512_mask_cmpeq_epi8_mask()
21 __mmask32 test_mm512_cmpeq_epi16_mask(__m512i __a, __m512i __b) { in test_mm512_cmpeq_epi16_mask() argument
24 return (__mmask32)_mm512_cmpeq_epi16_mask(__a, __b); in test_mm512_cmpeq_epi16_mask()
27 __mmask32 test_mm512_mask_cmpeq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) { in test_mm512_mask_cmpeq_epi16_mask() argument
30 return (__mmask32)_mm512_mask_cmpeq_epi16_mask(__u, __a, __b); in test_mm512_mask_cmpeq_epi16_mask()
33 __mmask64 test_mm512_cmpgt_epi8_mask(__m512i __a, __m512i __b) { in test_mm512_cmpgt_epi8_mask() argument
36 return (__mmask64)_mm512_cmpgt_epi8_mask(__a, __b); in test_mm512_cmpgt_epi8_mask()
[all …]
Davx512f-builtins.c686 __mmask16 test_mm512_cmpeq_epi32_mask(__m512i __a, __m512i __b) { in test_mm512_cmpeq_epi32_mask() argument
689 return (__mmask16)_mm512_cmpeq_epi32_mask(__a, __b); in test_mm512_cmpeq_epi32_mask()
692 __mmask16 test_mm512_mask_cmpeq_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) { in test_mm512_mask_cmpeq_epi32_mask() argument
695 return (__mmask16)_mm512_mask_cmpeq_epi32_mask(__u, __a, __b); in test_mm512_mask_cmpeq_epi32_mask()
698 __mmask8 test_mm512_mask_cmpeq_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) { in test_mm512_mask_cmpeq_epi64_mask() argument
701 return (__mmask8)_mm512_mask_cmpeq_epi64_mask(__u, __a, __b); in test_mm512_mask_cmpeq_epi64_mask()
704 __mmask8 test_mm512_cmpeq_epi64_mask(__m512i __a, __m512i __b) { in test_mm512_cmpeq_epi64_mask() argument
707 return (__mmask8)_mm512_cmpeq_epi64_mask(__a, __b); in test_mm512_cmpeq_epi64_mask()
710 __mmask16 test_mm512_cmpgt_epi32_mask(__m512i __a, __m512i __b) { in test_mm512_cmpgt_epi32_mask() argument
713 return (__mmask16)_mm512_cmpgt_epi32_mask(__a, __b); in test_mm512_cmpgt_epi32_mask()
[all …]
Davx512vl-builtins.c8 __mmask8 test_mm_cmpeq_epu32_mask(__m128i __a, __m128i __b) { in test_mm_cmpeq_epu32_mask() argument
11 return (__mmask8)_mm_cmpeq_epu32_mask(__a, __b); in test_mm_cmpeq_epu32_mask()
14 __mmask8 test_mm_mask_cmpeq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) { in test_mm_mask_cmpeq_epu32_mask() argument
17 return (__mmask8)_mm_mask_cmpeq_epu32_mask(__u, __a, __b); in test_mm_mask_cmpeq_epu32_mask()
20 __mmask8 test_mm_cmpeq_epu64_mask(__m128i __a, __m128i __b) { in test_mm_cmpeq_epu64_mask() argument
23 return (__mmask8)_mm_cmpeq_epu64_mask(__a, __b); in test_mm_cmpeq_epu64_mask()
26 __mmask8 test_mm_mask_cmpeq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) { in test_mm_mask_cmpeq_epu64_mask() argument
29 return (__mmask8)_mm_mask_cmpeq_epu64_mask(__u, __a, __b); in test_mm_mask_cmpeq_epu64_mask()
32 __mmask8 test_mm_cmpge_epi32_mask(__m128i __a, __m128i __b) { in test_mm_cmpge_epi32_mask() argument
35 return (__mmask8)_mm_cmpge_epi32_mask(__a, __b); in test_mm_cmpge_epi32_mask()
[all …]
/external/clang/test/Sema/
Dbuiltins-x86.c13 __m128 test__builtin_ia32_cmpps(__m128 __a, __m128 __b) { in test__builtin_ia32_cmpps() argument
14 __builtin_ia32_cmpps(__a, __b, 32); // expected-error {{argument should be a value from 0 to 31}} in test__builtin_ia32_cmpps()
17 __m128d test__builtin_ia32_cmppd(__m128d __a, __m128d __b) { in test__builtin_ia32_cmppd() argument
18 __builtin_ia32_cmppd(__a, __b, 32); // expected-error {{argument should be a value from 0 to 31}} in test__builtin_ia32_cmppd()
21 __m128 test__builtin_ia32_cmpss(__m128 __a, __m128 __b) { in test__builtin_ia32_cmpss() argument
22 __builtin_ia32_cmpss(__a, __b, 32); // expected-error {{argument should be a value from 0 to 31}} in test__builtin_ia32_cmpss()
25 __m128d test__builtin_ia32_cmpsd(__m128d __a, __m128d __b) { in test__builtin_ia32_cmpsd() argument
26 __builtin_ia32_cmpsd(__a, __b, 32); // expected-error {{argument should be a value from 0 to 31}} in test__builtin_ia32_cmpsd()
29 __mmask16 test__builtin_ia32_cmpps512_mask(__m512d __a, __m512d __b) { in test__builtin_ia32_cmpps512_mask() argument
30 …__builtin_ia32_cmpps512_mask(__a, __b, 32, -1, 0); // expected-error {{argument should be a value … in test__builtin_ia32_cmpps512_mask()
[all …]
/external/compiler-rt/lib/builtins/
Dmulsc3.c21 __mulsc3(float __a, float __b, float __c, float __d) in __mulsc3() argument
24 float __bd = __b * __d; in __mulsc3()
26 float __bc = __b * __c; in __mulsc3()
33 if (crt_isinf(__a) || crt_isinf(__b)) in __mulsc3()
36 __b = crt_copysignf(crt_isinf(__b) ? 1 : 0, __b); in __mulsc3()
49 if (crt_isnan(__b)) in __mulsc3()
50 __b = crt_copysignf(0, __b); in __mulsc3()
58 if (crt_isnan(__b)) in __mulsc3()
59 __b = crt_copysignf(0, __b); in __mulsc3()
68 COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c - __b * __d); in __mulsc3()
[all …]
Dmulxc3.c23 __mulxc3(long double __a, long double __b, long double __c, long double __d) in __mulxc3() argument
26 long double __bd = __b * __d; in __mulxc3()
28 long double __bc = __b * __c; in __mulxc3()
35 if (crt_isinf(__a) || crt_isinf(__b)) in __mulxc3()
38 __b = crt_copysignl(crt_isinf(__b) ? 1 : 0, __b); in __mulxc3()
51 if (crt_isnan(__b)) in __mulxc3()
52 __b = crt_copysignl(0, __b); in __mulxc3()
60 if (crt_isnan(__b)) in __mulxc3()
61 __b = crt_copysignl(0, __b); in __mulxc3()
70 COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c - __b * __d); in __mulxc3()
[all …]
Dmuldc3.c21 __muldc3(double __a, double __b, double __c, double __d) in __muldc3() argument
24 double __bd = __b * __d; in __muldc3()
26 double __bc = __b * __c; in __muldc3()
33 if (crt_isinf(__a) || crt_isinf(__b)) in __muldc3()
36 __b = crt_copysign(crt_isinf(__b) ? 1 : 0, __b); in __muldc3()
49 if (crt_isnan(__b)) in __muldc3()
50 __b = crt_copysign(0, __b); in __muldc3()
58 if (crt_isnan(__b)) in __muldc3()
59 __b = crt_copysign(0, __b); in __muldc3()
68 COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c - __b * __d); in __muldc3()
[all …]
Ddivtc3.c21 __divtc3(long double __a, long double __b, long double __c, long double __d) in __divtc3() argument
33 __real__ z = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw); in __divtc3()
34 __imag__ z = crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw); in __divtc3()
37 if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b))) in __divtc3()
40 __imag__ z = crt_copysignl(CRT_INFINITY, __c) * __b; in __divtc3()
42 else if ((crt_isinf(__a) || crt_isinf(__b)) && in __divtc3()
46 __b = crt_copysignl(crt_isinf(__b) ? 1.0 : 0.0, __b); in __divtc3()
47 __real__ z = CRT_INFINITY * (__a * __c + __b * __d); in __divtc3()
48 __imag__ z = CRT_INFINITY * (__b * __c - __a * __d); in __divtc3()
51 crt_isfinite(__a) && crt_isfinite(__b)) in __divtc3()
[all …]
Ddivdc3.c21 __divdc3(double __a, double __b, double __c, double __d) in __divdc3() argument
33 COMPLEX_REAL(z) = crt_scalbn((__a * __c + __b * __d) / __denom, -__ilogbw); in __divdc3()
34 COMPLEX_IMAGINARY(z) = crt_scalbn((__b * __c - __a * __d) / __denom, -__ilogbw); in __divdc3()
37 if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b))) in __divdc3()
40 COMPLEX_IMAGINARY(z) = crt_copysign(CRT_INFINITY, __c) * __b; in __divdc3()
42 else if ((crt_isinf(__a) || crt_isinf(__b)) && in __divdc3()
46 __b = crt_copysign(crt_isinf(__b) ? 1.0 : 0.0, __b); in __divdc3()
47 COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d); in __divdc3()
48 COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d); in __divdc3()
51 crt_isfinite(__a) && crt_isfinite(__b)) in __divdc3()
[all …]
Ddivxc3.c22 __divxc3(long double __a, long double __b, long double __c, long double __d) in __divxc3() argument
34 COMPLEX_REAL(z) = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw); in __divxc3()
35 COMPLEX_IMAGINARY(z) = crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw); in __divxc3()
38 if ((__denom == 0) && (!crt_isnan(__a) || !crt_isnan(__b))) in __divxc3()
41 COMPLEX_IMAGINARY(z) = crt_copysignl(CRT_INFINITY, __c) * __b; in __divxc3()
43 else if ((crt_isinf(__a) || crt_isinf(__b)) && in __divxc3()
47 __b = crt_copysignl(crt_isinf(__b) ? 1 : 0, __b); in __divxc3()
48 COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d); in __divxc3()
49 COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d); in __divxc3()
52 crt_isfinite(__a) && crt_isfinite(__b)) in __divxc3()
[all …]
Ddivsc3.c21 __divsc3(float __a, float __b, float __c, float __d) in __divsc3() argument
33 COMPLEX_REAL(z) = crt_scalbnf((__a * __c + __b * __d) / __denom, -__ilogbw); in __divsc3()
34 COMPLEX_IMAGINARY(z) = crt_scalbnf((__b * __c - __a * __d) / __denom, -__ilogbw); in __divsc3()
37 if ((__denom == 0) && (!crt_isnan(__a) || !crt_isnan(__b))) in __divsc3()
40 COMPLEX_IMAGINARY(z) = crt_copysignf(CRT_INFINITY, __c) * __b; in __divsc3()
42 else if ((crt_isinf(__a) || crt_isinf(__b)) && in __divsc3()
46 __b = crt_copysignf(crt_isinf(__b) ? 1 : 0, __b); in __divsc3()
47 COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d); in __divsc3()
48 COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d); in __divsc3()
51 crt_isfinite(__a) && crt_isfinite(__b)) in __divsc3()
[all …]
/external/llvm/test/CodeGen/PowerPC/
Dvsx-ldst-builtin-le.ll22 %__b.addr.i32 = alloca <4 x i32>*, align 8
24 %__b.addr.i30 = alloca <4 x float>*, align 8
26 %__b.addr.i28 = alloca <2 x i64>*, align 8
28 %__b.addr.i26 = alloca <2 x i64>*, align 8
30 %__b.addr.i24 = alloca <2 x double>*, align 8
32 %__b.addr.i21 = alloca i32, align 4
35 %__b.addr.i18 = alloca i32, align 4
38 %__b.addr.i15 = alloca i32, align 4
41 %__b.addr.i12 = alloca i32, align 4
44 %__b.addr.i9 = alloca i32, align 4
[all …]
/external/llvm/test/CodeGen/AArch64/
Darm64-vext.ll8 %__b = alloca <8 x i8>, align 8
12 store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
14 %tmp3 = load <8 x i8>, <8 x i8>* %__b, align 8
25 %__b = alloca <8 x i8>, align 8
29 store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
31 %tmp3 = load <8 x i8>, <8 x i8>* %__b, align 8
42 %__b = alloca <8 x i8>, align 8
46 store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
48 %tmp3 = load <8 x i8>, <8 x i8>* %__b, align 8
59 %__b = alloca <4 x i16>, align 8
[all …]

1234