/external/clang/test/CodeGen/ |
D | sse2-builtins.c | 33 __m128d test_mm_add_pd(__m128d A, __m128d B) { in test_mm_add_pd() 39 __m128d test_mm_add_sd(__m128d A, __m128d B) { in test_mm_add_sd() 69 __m128d test_mm_and_pd(__m128d A, __m128d B) { in test_mm_and_pd() 129 __m128d test_mm_cmpeq_pd(__m128d A, __m128d B) { in test_mm_cmpeq_pd() 135 __m128d test_mm_cmpeq_sd(__m128d A, __m128d B) { in test_mm_cmpeq_sd() 141 __m128d test_mm_cmpge_pd(__m128d A, __m128d B) { in test_mm_cmpge_pd() 147 __m128d test_mm_cmpge_sd(__m128d A, __m128d B) { in test_mm_cmpge_sd() 171 __m128d test_mm_cmpgt_pd(__m128d A, __m128d B) { in test_mm_cmpgt_pd() 177 __m128d test_mm_cmpgt_sd(__m128d A, __m128d B) { in test_mm_cmpgt_sd() 183 __m128d test_mm_cmple_pd(__m128d A, __m128d B) { in test_mm_cmple_pd() [all …]
|
D | sse-builtins.c | 71 __m128d test_load1_pd(__m128 x, void* y) { in test_load1_pd() 77 __m128d test_loadr_pd(__m128 x, void* y) { in test_loadr_pd() 83 __m128d test_load_sd(void* y) { in test_load_sd() 89 __m128d test_loadh_pd(__m128d x, void* y) { in test_loadh_pd() 95 __m128d test_loadl_pd(__m128d x, void* y) { in test_loadl_pd() 101 void test_store_sd(__m128d x, void* y) { in test_store_sd() 107 void test_store1_pd(__m128d x, void* y) { in test_store1_pd() 114 void test_storer_pd(__m128d x, void* y) { in test_storer_pd() 120 void test_storeh_pd(__m128d x, void* y) { in test_storeh_pd() 126 void test_storel_pd(__m128d x, void* y) { in test_storel_pd() [all …]
|
D | fma4-builtins.c | 14 __m128d test_mm_macc_pd(__m128d a, __m128d b, __m128d c) { in test_mm_macc_pd() 26 __m128d test_mm_macc_sd(__m128d a, __m128d b, __m128d c) { in test_mm_macc_sd() 38 __m128d test_mm_msub_pd(__m128d a, __m128d b, __m128d c) { in test_mm_msub_pd() 50 __m128d test_mm_msub_sd(__m128d a, __m128d b, __m128d c) { in test_mm_msub_sd() 62 __m128d test_mm_nmacc_pd(__m128d a, __m128d b, __m128d c) { in test_mm_nmacc_pd() 74 __m128d test_mm_nmacc_sd(__m128d a, __m128d b, __m128d c) { in test_mm_nmacc_sd() 86 __m128d test_mm_nmsub_pd(__m128d a, __m128d b, __m128d c) { in test_mm_nmsub_pd() 98 __m128d test_mm_nmsub_sd(__m128d a, __m128d b, __m128d c) { in test_mm_nmsub_sd() 110 __m128d test_mm_maddsub_pd(__m128d a, __m128d b, __m128d c) { in test_mm_maddsub_pd() 122 __m128d test_mm_msubadd_pd(__m128d a, __m128d b, __m128d c) { in test_mm_msubadd_pd()
|
D | fma-builtins.c | 13 __m128d test_mm_fmadd_pd(__m128d a, __m128d b, __m128d c) { in test_mm_fmadd_pd() 23 __m128d test_mm_fmadd_sd(__m128d a, __m128d b, __m128d c) { in test_mm_fmadd_sd() 33 __m128d test_mm_fmsub_pd(__m128d a, __m128d b, __m128d c) { in test_mm_fmsub_pd() 43 __m128d test_mm_fmsub_sd(__m128d a, __m128d b, __m128d c) { in test_mm_fmsub_sd() 53 __m128d test_mm_fnmadd_pd(__m128d a, __m128d b, __m128d c) { in test_mm_fnmadd_pd() 63 __m128d test_mm_fnmadd_sd(__m128d a, __m128d b, __m128d c) { in test_mm_fnmadd_sd() 73 __m128d test_mm_fnmsub_pd(__m128d a, __m128d b, __m128d c) { in test_mm_fnmsub_pd() 83 __m128d test_mm_fnmsub_sd(__m128d a, __m128d b, __m128d c) { in test_mm_fnmsub_sd() 93 __m128d test_mm_fmaddsub_pd(__m128d a, __m128d b, __m128d c) { in test_mm_fmaddsub_pd() 103 __m128d test_mm_fmsubadd_pd(__m128d a, __m128d b, __m128d c) { in test_mm_fmsubadd_pd()
|
D | avx-cmp-builtins.c | 13 __m128d test_cmp_pd(__m128d a, __m128d b) { in test_cmp_pd() 19 __m128d test_cmp_ps(__m128 a, __m128 b) { in test_cmp_ps() 37 __m128d test_cmp_sd(__m128d a, __m128d b) { in test_cmp_sd() 43 __m128d test_cmp_ss(__m128 a, __m128 b) { in test_cmp_ss() 73 __m128d test_cmpgt_sd(__m128d a, __m128d b) { in test_cmpgt_sd() 79 __m128d test_cmpge_sd(__m128d a, __m128d b) { in test_cmpge_sd() 85 __m128d test_cmpngt_sd(__m128d a, __m128d b) { in test_cmpngt_sd() 91 __m128d test_cmpnge_sd(__m128d a, __m128d b) { in test_cmpnge_sd()
|
D | avx512vldq-builtins.c | 56 __m128d test_mm_mask_andnot_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { in test_mm_mask_andnot_pd() 59 return (__m128d) _mm_mask_andnot_pd ( __W, __U, __A, __B); in test_mm_mask_andnot_pd() 62 __m128d test_mm_maskz_andnot_pd (__mmask8 __U, __m128d __A, __m128d __B) { in test_mm_maskz_andnot_pd() 65 return (__m128d) _mm_maskz_andnot_pd (__U, __A, __B); in test_mm_maskz_andnot_pd() 104 __m128d test_mm_mask_and_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { in test_mm_mask_and_pd() 107 return (__m128d) _mm_mask_and_pd ( __W, __U, __A, __B); in test_mm_mask_and_pd() 110 __m128d test_mm_maskz_and_pd (__mmask8 __U, __m128d __A, __m128d __B) { in test_mm_maskz_and_pd() 113 return (__m128d) _mm_maskz_and_pd (__U, __A, __B); in test_mm_maskz_and_pd() 152 __m128d test_mm_mask_xor_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { in test_mm_mask_xor_pd() 155 return (__m128d) _mm_mask_xor_pd ( __W, __U, __A, __B); in test_mm_mask_xor_pd() [all …]
|
D | sse41-builtins.c | 15 __m128d test_mm_blend_pd(__m128d V1, __m128d V2) { in test_mm_blend_pd() 33 __m128d test_mm_blendv_pd(__m128d V1, __m128d V2, __m128d V3) { in test_mm_blendv_pd() 45 __m128d test_mm_ceil_pd(__m128d x) { in test_mm_ceil_pd() 57 __m128d test_mm_ceil_sd(__m128d x, __m128d y) { in test_mm_ceil_sd() 147 __m128d test_mm_dp_pd(__m128d x, __m128d y) { in test_mm_dp_pd() 182 __m128d test_mm_floor_pd(__m128d x) { in test_mm_floor_pd() 194 __m128d test_mm_floor_sd(__m128d x, __m128d y) { in test_mm_floor_sd() 308 __m128d test_mm_round_pd(__m128d x) { in test_mm_round_pd() 320 __m128d test_mm_round_sd(__m128d x, __m128d y) { in test_mm_round_sd()
|
D | sse3-builtins.c | 8 __m128d test_mm_addsub_pd(__m128d A, __m128d B) { in test_mm_addsub_pd() 20 __m128d test_mm_hadd_pd(__m128d A, __m128d B) { in test_mm_hadd_pd() 32 __m128d test_mm_hsub_pd(__m128d A, __m128d B) { in test_mm_hsub_pd() 50 __m128d test_mm_loaddup_pd(double const* P) { in test_mm_loaddup_pd() 56 __m128d test_mm_movedup_pd(__m128d A) { in test_mm_movedup_pd()
|
D | avx512er-builtins.c | 116 __m128d test_mm_rsqrt28_round_sd(__m128d a, __m128d b) { in test_mm_rsqrt28_round_sd() 122 __m128d test_mm_mask_rsqrt28_round_sd(__m128d s, __mmask8 m, __m128d a, __m128d b) { in test_mm_mask_rsqrt28_round_sd() 128 __m128d test_mm_maskz_rsqrt28_round_sd(__mmask8 m, __m128d a, __m128d b) { in test_mm_maskz_rsqrt28_round_sd() 242 __m128d test_mm_rcp28_round_sd(__m128d a, __m128d b) { in test_mm_rcp28_round_sd() 248 __m128d test_mm_mask_rcp28_round_sd(__m128d s, __mmask8 m, __m128d a, __m128d b) { in test_mm_mask_rcp28_round_sd() 254 __m128d test_mm_maskz_rcp28_round_sd(__mmask8 m, __m128d a, __m128d b) { in test_mm_maskz_rcp28_round_sd() 260 __m128d test_mm_rcp28_sd(__m128d a, __m128d b) { in test_mm_rcp28_sd() 266 __m128d test_mm_mask_rcp28_sd(__m128d s, __mmask8 m, __m128d a, __m128d b) { in test_mm_mask_rcp28_sd() 272 __m128d test_mm_maskz_rcp28_sd(__mmask8 m, __m128d a, __m128d b) { in test_mm_maskz_rcp28_sd()
|
D | avx512vl-builtins.c | 1020 __mmask8 test_mm128_cmp_pd_mask(__m128d __A, __m128d __B) { in test_mm128_cmp_pd_mask() 1026 __mmask8 test_mm128_mask_cmp_pd_mask(__mmask8 m, __m128d __A, __m128d __B) { in test_mm128_mask_cmp_pd_mask() 1035 __m128d test_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) { in test_mm_mask_fmadd_pd() 1041 __m128d test_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) { in test_mm_mask_fmsub_pd() 1047 __m128d test_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) { in test_mm_mask3_fmadd_pd() 1053 __m128d test_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) { in test_mm_mask3_fnmadd_pd() 1059 __m128d test_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) { in test_mm_maskz_fmadd_pd() 1065 __m128d test_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) { in test_mm_maskz_fmsub_pd() 1071 __m128d test_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) { in test_mm_maskz_fnmadd_pd() 1077 __m128d test_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) { in test_mm_maskz_fnmsub_pd() [all …]
|
D | avx512f-builtins.c | 190 __m512d test_mm512_broadcastsd_pd(__m128d a) in test_mm512_broadcastsd_pd() 1453 __m128d test_mm_add_round_sd(__m128d __A, __m128d __B) { in test_mm_add_round_sd() 1458 __m128d test_mm_mask_add_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { in test_mm_mask_add_round_sd() 1463 __m128d test_mm_maskz_add_round_sd(__mmask8 __U, __m128d __A, __m128d __B) { in test_mm_maskz_add_round_sd() 1468 __m128d test_mm_mask_add_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { in test_mm_mask_add_sd() 1473 __m128d test_mm_maskz_add_sd(__mmask8 __U, __m128d __A, __m128d __B) { in test_mm_maskz_add_sd() 1553 __m128d test_mm_sub_round_sd(__m128d __A, __m128d __B) { in test_mm_sub_round_sd() 1558 __m128d test_mm_mask_sub_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { in test_mm_mask_sub_round_sd() 1563 __m128d test_mm_maskz_sub_round_sd(__mmask8 __U, __m128d __A, __m128d __B) { in test_mm_maskz_sub_round_sd() 1568 __m128d test_mm_mask_sub_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { in test_mm_mask_sub_sd() [all …]
|
D | avx-shuffle-builtins.c | 19 __m128d test_mm_permute_pd(__m128d a) { in test_mm_permute_pd() 110 __m256d test_mm256_insertf128_pd_0(__m256d a, __m128d b) { in test_mm256_insertf128_pd_0() 128 __m256d test_mm256_insertf128_pd_1(__m256d a, __m128d b) { in test_mm256_insertf128_pd_1() 148 __m128d test_mm256_extractf128_pd_0(__m256d a) { in test_mm256_extractf128_pd_0() 166 __m128d test_mm256_extractf128_pd_1(__m256d a) { in test_mm256_extractf128_pd_1() 184 __m256d test_mm256_set_m128d(__m128d hi, __m128d lo) { in test_mm256_set_m128d() 202 __m256d test_mm256_setr_m128d(__m128d hi, __m128d lo) { in test_mm256_setr_m128d()
|
/external/clang/lib/Headers/ |
D | emmintrin.h | 29 typedef double __m128d __attribute__((__vector_size__(16))); typedef 47 static __inline__ __m128d __DEFAULT_FN_ATTRS 48 _mm_add_sd(__m128d __a, __m128d __b) in _mm_add_sd() 54 static __inline__ __m128d __DEFAULT_FN_ATTRS 55 _mm_add_pd(__m128d __a, __m128d __b) in _mm_add_pd() 60 static __inline__ __m128d __DEFAULT_FN_ATTRS 61 _mm_sub_sd(__m128d __a, __m128d __b) in _mm_sub_sd() 67 static __inline__ __m128d __DEFAULT_FN_ATTRS 68 _mm_sub_pd(__m128d __a, __m128d __b) in _mm_sub_pd() 73 static __inline__ __m128d __DEFAULT_FN_ATTRS [all …]
|
D | fma4intrin.h | 42 static __inline__ __m128d __DEFAULT_FN_ATTRS 43 _mm_macc_pd(__m128d __A, __m128d __B, __m128d __C) in _mm_macc_pd() 45 return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C); in _mm_macc_pd() 54 static __inline__ __m128d __DEFAULT_FN_ATTRS 55 _mm_macc_sd(__m128d __A, __m128d __B, __m128d __C) in _mm_macc_sd() 57 return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C); in _mm_macc_sd() 66 static __inline__ __m128d __DEFAULT_FN_ATTRS 67 _mm_msub_pd(__m128d __A, __m128d __B, __m128d __C) in _mm_msub_pd() 69 return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C); in _mm_msub_pd() 78 static __inline__ __m128d __DEFAULT_FN_ATTRS [all …]
|
D | fmaintrin.h | 40 static __inline__ __m128d __DEFAULT_FN_ATTRS 41 _mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C) in _mm_fmadd_pd() 43 return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C); in _mm_fmadd_pd() 52 static __inline__ __m128d __DEFAULT_FN_ATTRS 53 _mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C) in _mm_fmadd_sd() 55 return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C); in _mm_fmadd_sd() 64 static __inline__ __m128d __DEFAULT_FN_ATTRS 65 _mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C) in _mm_fmsub_pd() 67 return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C); in _mm_fmsub_pd() 76 static __inline__ __m128d __DEFAULT_FN_ATTRS [all …]
|
D | avx512vldqintrin.h | 95 static __inline__ __m128d __DEFAULT_FN_ATTRS 96 _mm_mask_andnot_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { in _mm_mask_andnot_pd() 97 return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A, in _mm_mask_andnot_pd() 103 static __inline__ __m128d __DEFAULT_FN_ATTRS 104 _mm_maskz_andnot_pd (__mmask8 __U, __m128d __A, __m128d __B) { in _mm_maskz_andnot_pd() 105 return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A, in _mm_maskz_andnot_pd() 163 static __inline__ __m128d __DEFAULT_FN_ATTRS 164 _mm_mask_and_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { in _mm_mask_and_pd() 165 return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A, in _mm_mask_and_pd() 171 static __inline__ __m128d __DEFAULT_FN_ATTRS [all …]
|
D | pmmintrin.h | 68 static __inline__ __m128d __DEFAULT_FN_ATTRS 69 _mm_addsub_pd(__m128d __a, __m128d __b) in _mm_addsub_pd() 74 static __inline__ __m128d __DEFAULT_FN_ATTRS 75 _mm_hadd_pd(__m128d __a, __m128d __b) in _mm_hadd_pd() 80 static __inline__ __m128d __DEFAULT_FN_ATTRS 81 _mm_hsub_pd(__m128d __a, __m128d __b) in _mm_hsub_pd() 88 static __inline__ __m128d __DEFAULT_FN_ATTRS 89 _mm_movedup_pd(__m128d __a) in _mm_movedup_pd()
|
D | avx512vlintrin.h | 1320 static __inline__ __m128d __DEFAULT_FN_ATTRS 1321 _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) in _mm_mask_fmadd_pd() 1323 return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A, in _mm_mask_fmadd_pd() 1329 static __inline__ __m128d __DEFAULT_FN_ATTRS 1330 _mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) in _mm_mask3_fmadd_pd() 1332 return (__m128d) __builtin_ia32_vfmaddpd128_mask3 ((__v2df) __A, in _mm_mask3_fmadd_pd() 1338 static __inline__ __m128d __DEFAULT_FN_ATTRS 1339 _mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) in _mm_maskz_fmadd_pd() 1341 return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A, in _mm_maskz_fmadd_pd() 1347 static __inline__ __m128d __DEFAULT_FN_ATTRS [all …]
|
D | avx512erintrin.h | 156 (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \ 157 (__v2df)(__m128d)(B), \ 162 (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \ 163 (__v2df)(__m128d)(B), \ 164 (__v2df)(__m128d)(S), \ 168 (__m128d)__builtin_ia32_rsqrt28sd_round((__v2df)(__m128d)(A), \ 169 (__v2df)(__m128d)(B), \ 259 (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \ 260 (__v2df)(__m128d)(B), \ 265 (__m128d)__builtin_ia32_rcp28sd_round((__v2df)(__m128d)(A), \ [all …]
|
D | smmintrin.h | 67 (__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)); }) 70 (__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \ 71 (__v2df)(__m128d)(Y), (M)); }) 75 (__m128d)__builtin_shufflevector((__v2df)(__m128d)(V1), \ 76 (__v2df)(__m128d)(V2), \ 87 static __inline__ __m128d __DEFAULT_FN_ATTRS 88 _mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M) in _mm_blendv_pd() 90 return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2, in _mm_blendv_pd() 139 (__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \ 140 (__v2df)(__m128d)(Y), (M)); })
|
D | avxintrin.h | 244 static __inline __m128d __DEFAULT_FN_ATTRS 245 _mm_permutevar_pd(__m128d __a, __m128i __c) in _mm_permutevar_pd() 247 return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c); in _mm_permutevar_pd() 269 (__m128d)__builtin_shufflevector((__v2df)(__m128d)(A), \ 404 (__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \ 405 (__v2df)(__m128d)(b), (c)); }) 420 (__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \ 421 (__v2df)(__m128d)(b), (c)); }) 586 _mm_testz_pd(__m128d __a, __m128d __b) in _mm_testz_pd() 592 _mm_testc_pd(__m128d __a, __m128d __b) in _mm_testc_pd() [all …]
|
/external/libvpx/libvpx/vpx_ports/ |
D | emmintrin_compat.h | 19 _mm_castpd_ps(__m128d __A) in _mm_castpd_ps() 25 _mm_castpd_si128(__m128d __A) in _mm_castpd_si128() 30 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 33 return (__m128d) __A; in _mm_castps_pd() 48 extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) 51 return (__m128d) __A; in _mm_castsi128_pd()
|
/external/clang/test/Sema/ |
D | builtins-x86.c | 5 typedef double __m128d __attribute__((__vector_size__(16))); typedef 17 __m128d test__builtin_ia32_cmppd(__m128d __a, __m128d __b) { in test__builtin_ia32_cmppd() 25 __m128d test__builtin_ia32_cmpsd(__m128d __a, __m128d __b) { in test__builtin_ia32_cmpsd()
|
/external/opencv3/modules/hal/src/ |
D | mathfuncs.cpp | 272 __m128d v_1 = _mm_set1_pd(1.0); in invSqrt() 463 static const __m128d prescale2 = _mm_set1_pd(exp_prescale); in exp() 486 __m128d xd0 = _mm_cvtps_pd(xf0); in exp() 487 __m128d xd2 = _mm_cvtps_pd(_mm_movehl_ps(xf0, xf0)); in exp() 488 __m128d xd1 = _mm_cvtps_pd(xf1); in exp() 489 __m128d xd3 = _mm_cvtps_pd(_mm_movehl_ps(xf1, xf1)); in exp() 525 … __m128d yd0 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[0]), _mm_load_sd(expTab + tab_idx[1])); in exp() 526 … __m128d yd1 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[2]), _mm_load_sd(expTab + tab_idx[3])); in exp() 527 … __m128d yd2 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[4]), _mm_load_sd(expTab + tab_idx[5])); in exp() 528 … __m128d yd3 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[6]), _mm_load_sd(expTab + tab_idx[7])); in exp() [all …]
|
/external/speex/libspeex/ |
D | resample_sse.h | 82 __m128d sum = _mm_setzero_pd(); in inner_product_double() 94 sum = _mm_add_sd(sum, (__m128d) _mm_movehl_ps((__m128) sum, (__m128) sum)); in inner_product_double() 103 __m128d sum; in interpolate_product_double() 104 __m128d sum1 = _mm_setzero_pd(); in interpolate_product_double() 105 __m128d sum2 = _mm_setzero_pd(); in interpolate_product_double() 107 __m128d f1 = _mm_cvtps_pd(f); in interpolate_product_double() 108 __m128d f2 = _mm_cvtps_pd(_mm_movehl_ps(f,f)); in interpolate_product_double() 123 sum = _mm_add_sd(sum, (__m128d) _mm_movehl_ps((__m128) sum, (__m128) sum)); in interpolate_product_double()
|