Home
last modified time | relevance | path

Searched refs:__m128 (Results 1 – 25 of 140) sorted by relevance

123456

/external/clang/test/CodeGen/
Dsse-builtins.c10 __m128 test_mm_add_ps(__m128 A, __m128 B) { in test_mm_add_ps()
16 __m128 test_mm_add_ss(__m128 A, __m128 B) { in test_mm_add_ss()
25 __m128 test_mm_and_ps(__m128 A, __m128 B) { in test_mm_and_ps()
31 __m128 test_mm_andnot_ps(__m128 A, __m128 B) { in test_mm_andnot_ps()
38 __m128 test_mm_cmpeq_ps(__m128 __a, __m128 __b) { in test_mm_cmpeq_ps()
47 __m128 test_mm_cmpeq_ss(__m128 __a, __m128 __b) { in test_mm_cmpeq_ss()
53 __m128 test_mm_cmpge_ps(__m128 __a, __m128 __b) { in test_mm_cmpge_ps()
62 __m128 test_mm_cmpge_ss(__m128 __a, __m128 __b) { in test_mm_cmpge_ss()
69 __m128 test_mm_cmpgt_ps(__m128 __a, __m128 __b) { in test_mm_cmpgt_ps()
78 __m128 test_mm_cmpgt_ss(__m128 __a, __m128 __b) { in test_mm_cmpgt_ss()
[all …]
Dfma-builtins.c8 __m128 test_mm_fmadd_ps(__m128 a, __m128 b, __m128 c) { in test_mm_fmadd_ps()
18 __m128 test_mm_fmadd_ss(__m128 a, __m128 b, __m128 c) { in test_mm_fmadd_ss()
28 __m128 test_mm_fmsub_ps(__m128 a, __m128 b, __m128 c) { in test_mm_fmsub_ps()
38 __m128 test_mm_fmsub_ss(__m128 a, __m128 b, __m128 c) { in test_mm_fmsub_ss()
48 __m128 test_mm_fnmadd_ps(__m128 a, __m128 b, __m128 c) { in test_mm_fnmadd_ps()
58 __m128 test_mm_fnmadd_ss(__m128 a, __m128 b, __m128 c) { in test_mm_fnmadd_ss()
68 __m128 test_mm_fnmsub_ps(__m128 a, __m128 b, __m128 c) { in test_mm_fnmsub_ps()
78 __m128 test_mm_fnmsub_ss(__m128 a, __m128 b, __m128 c) { in test_mm_fnmsub_ss()
88 __m128 test_mm_fmaddsub_ps(__m128 a, __m128 b, __m128 c) { in test_mm_fmaddsub_ps()
98 __m128 test_mm_fmsubadd_ps(__m128 a, __m128 b, __m128 c) { in test_mm_fmsubadd_ps()
Dfma4-builtins.c8 __m128 test_mm_macc_ps(__m128 a, __m128 b, __m128 c) { in test_mm_macc_ps()
20 __m128 test_mm_macc_ss(__m128 a, __m128 b, __m128 c) { in test_mm_macc_ss()
32 __m128 test_mm_msub_ps(__m128 a, __m128 b, __m128 c) { in test_mm_msub_ps()
44 __m128 test_mm_msub_ss(__m128 a, __m128 b, __m128 c) { in test_mm_msub_ss()
56 __m128 test_mm_nmacc_ps(__m128 a, __m128 b, __m128 c) { in test_mm_nmacc_ps()
68 __m128 test_mm_nmacc_ss(__m128 a, __m128 b, __m128 c) { in test_mm_nmacc_ss()
80 __m128 test_mm_nmsub_ps(__m128 a, __m128 b, __m128 c) { in test_mm_nmsub_ps()
92 __m128 test_mm_nmsub_ss(__m128 a, __m128 b, __m128 c) { in test_mm_nmsub_ss()
104 __m128 test_mm_maddsub_ps(__m128 a, __m128 b, __m128 c) { in test_mm_maddsub_ps()
116 __m128 test_mm_msubadd_ps(__m128 a, __m128 b, __m128 c) { in test_mm_msubadd_ps()
Davx512vldq-builtins.c80 __m128 test_mm_mask_andnot_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { in test_mm_mask_andnot_ps()
83 return (__m128) _mm_mask_andnot_ps ( __W, __U, __A, __B); in test_mm_mask_andnot_ps()
86 __m128 test_mm_maskz_andnot_ps (__mmask8 __U, __m128 __A, __m128 __B) { in test_mm_maskz_andnot_ps()
89 return (__m128) _mm_maskz_andnot_ps (__U, __A, __B); in test_mm_maskz_andnot_ps()
128 __m128 test_mm_mask_and_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { in test_mm_mask_and_ps()
131 return (__m128) _mm_mask_and_ps ( __W, __U, __A, __B); in test_mm_mask_and_ps()
134 __m128 test_mm_maskz_and_ps (__mmask8 __U, __m128 __A, __m128 __B) { in test_mm_maskz_and_ps()
137 return (__m128) _mm_maskz_and_ps (__U, __A, __B); in test_mm_maskz_and_ps()
176 __m128 test_mm_mask_xor_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { in test_mm_mask_xor_ps()
179 return (__m128) _mm_mask_xor_ps ( __W, __U, __A, __B); in test_mm_mask_xor_ps()
[all …]
Davx512er-builtins.c80 __m128 test_mm_rsqrt28_round_ss(__m128 a, __m128 b) { in test_mm_rsqrt28_round_ss()
86 __m128 test_mm_mask_rsqrt28_round_ss(__m128 s, __mmask16 m, __m128 a, __m128 b) { in test_mm_mask_rsqrt28_round_ss()
92 __m128 test_mm_maskz_rsqrt28_round_ss(__mmask16 m, __m128 a, __m128 b) { in test_mm_maskz_rsqrt28_round_ss()
98 __m128 test_mm_rsqrt28_ss(__m128 a, __m128 b) { in test_mm_rsqrt28_ss()
104 __m128 test_mm_mask_rsqrt28_ss(__m128 s, __mmask16 m, __m128 a, __m128 b) { in test_mm_mask_rsqrt28_ss()
110 __m128 test_mm_maskz_rsqrt28_ss(__mmask16 m, __m128 a, __m128 b) { in test_mm_maskz_rsqrt28_ss()
206 __m128 test_mm_rcp28_round_ss(__m128 a, __m128 b) { in test_mm_rcp28_round_ss()
212 __m128 test_mm_mask_rcp28_round_ss(__m128 s, __mmask16 m, __m128 a, __m128 b) { in test_mm_mask_rcp28_round_ss()
218 __m128 test_mm_maskz_rcp28_round_ss(__mmask16 m, __m128 a, __m128 b) { in test_mm_maskz_rcp28_round_ss()
224 __m128 test_mm_rcp28_ss(__m128 a, __m128 b) { in test_mm_rcp28_ss()
[all …]
Dsse41-builtins.c23 __m128 test_mm_blend_ps(__m128 V1, __m128 V2) { in test_mm_blend_ps()
41 __m128 test_mm_blendv_ps(__m128 V1, __m128 V2, __m128 V3) { in test_mm_blendv_ps()
53 __m128 test_mm_ceil_ps(__m128 x) { in test_mm_ceil_ps()
65 __m128 test_mm_ceil_ss(__m128 x, __m128 y) { in test_mm_ceil_ss()
168 __m128 test_mm_dp_ps(__m128 x, __m128 y) { in test_mm_dp_ps()
193 int test_mm_extract_ps(__m128 x) { in test_mm_extract_ps()
205 __m128 test_mm_floor_ps(__m128 x) { in test_mm_floor_ps()
217 __m128 test_mm_floor_ss(__m128 x, __m128 y) { in test_mm_floor_ss()
241 __m128 test_mm_insert_ps(__m128 x, __m128 y) { in test_mm_insert_ps()
339 __m128 test_mm_round_ps(__m128 x) { in test_mm_round_ps()
[all …]
Davx-cmp-builtins.c19 __m128d test_cmp_ps(__m128 a, __m128 b) { in test_cmp_ps()
43 __m128d test_cmp_ss(__m128 a, __m128 b) { in test_cmp_ss()
49 __m128 test_cmpgt_ss(__m128 a, __m128 b) { in test_cmpgt_ss()
55 __m128 test_cmpge_ss(__m128 a, __m128 b) { in test_cmpge_ss()
61 __m128 test_cmpngt_ss(__m128 a, __m128 b) { in test_cmpngt_ss()
67 __m128 test_cmpnge_ss(__m128 a, __m128 b) { in test_cmpnge_ss()
Dsse.c10 __m128 test_mm_slli_si128(__m128 a) { in test_mm_slli_si128()
16 __m128 test_mm_slli_si128_0(__m128 a) { in test_mm_slli_si128_0()
22 __m128 test_mm_slli_si128_16(__m128 a) { in test_mm_slli_si128_16()
28 __m128 test_mm_srli_si128(__m128 a) { in test_mm_srli_si128()
34 __m128 test_mm_srli_si128_0(__m128 a) { in test_mm_srli_si128_0()
40 __m128 test_mm_srli_si128_16(__m128 a) { in test_mm_srli_si128_16()
Dsse3-builtins.c16 __m128 test_mm_addsub_ps(__m128 A, __m128 B) { in test_mm_addsub_ps()
28 __m128 test_mm_hadd_ps(__m128 A, __m128 B) { in test_mm_hadd_ps()
40 __m128 test_mm_hsub_ps(__m128 A, __m128 B) { in test_mm_hsub_ps()
66 __m128 test_mm_movehdup_ps(__m128 A) { in test_mm_movehdup_ps()
72 __m128 test_mm_moveldup_ps(__m128 A) { in test_mm_moveldup_ps()
Davx512f-builtins.c1066 __m128 test_mm512_extractf32x4_ps(__m512 a) in test_mm512_extractf32x4_ps()
1073 __m128 test_mm512_mask_extractf32x4_ps(__m128 __W, __mmask8 __U,__m512d __A){ in test_mm512_mask_extractf32x4_ps()
1079 __m128 test_mm512_maskz_extractf32x4_ps( __mmask8 __U,__m512d __A){ in test_mm512_maskz_extractf32x4_ps()
1758 __m128 test_mm_add_round_ss(__m128 __A, __m128 __B) { in test_mm_add_round_ss()
1763 __m128 test_mm_mask_add_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { in test_mm_mask_add_round_ss()
1768 __m128 test_mm_maskz_add_round_ss(__mmask8 __U, __m128 __A, __m128 __B) { in test_mm_maskz_add_round_ss()
1773 __m128 test_mm_mask_add_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { in test_mm_mask_add_ss()
1778 __m128 test_mm_maskz_add_ss(__mmask8 __U, __m128 __A, __m128 __B) { in test_mm_maskz_add_ss()
1858 __m128 test_mm_sub_round_ss(__m128 __A, __m128 __B) { in test_mm_sub_round_ss()
1863 __m128 test_mm_mask_sub_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { in test_mm_mask_sub_round_ss()
[all …]
Dtarget-builtin-noerror.c34 __m128 __attribute__((target("fma"))) fma_1(__m128 a, __m128 b, __m128 c) { in fma_1()
38 __m128 __attribute__((target("fma4"))) fma_2(__m128 a, __m128 b, __m128 c) { in fma_2()
42 __m128 __attribute__((target("fma,fma4"))) fma_3(__m128 a, __m128 b, __m128 c) { in fma_3()
/external/clang/lib/Headers/
Dxmmintrin.h31 typedef float __m128 __attribute__((__vector_size__(16))); typedef
60 static __inline__ __m128 __DEFAULT_FN_ATTRS
61 _mm_add_ss(__m128 __a, __m128 __b) in _mm_add_ss()
80 static __inline__ __m128 __DEFAULT_FN_ATTRS
81 _mm_add_ps(__m128 __a, __m128 __b) in _mm_add_ps()
83 return (__m128)((__v4sf)__a + (__v4sf)__b); in _mm_add_ps()
102 static __inline__ __m128 __DEFAULT_FN_ATTRS
103 _mm_sub_ss(__m128 __a, __m128 __b) in _mm_sub_ss()
123 static __inline__ __m128 __DEFAULT_FN_ATTRS
124 _mm_sub_ps(__m128 __a, __m128 __b) in _mm_sub_ps()
[all …]
Dfma4intrin.h36 static __inline__ __m128 __DEFAULT_FN_ATTRS
37 _mm_macc_ps(__m128 __A, __m128 __B, __m128 __C) in _mm_macc_ps()
39 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_macc_ps()
48 static __inline__ __m128 __DEFAULT_FN_ATTRS
49 _mm_macc_ss(__m128 __A, __m128 __B, __m128 __C) in _mm_macc_ss()
51 return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_macc_ss()
60 static __inline__ __m128 __DEFAULT_FN_ATTRS
61 _mm_msub_ps(__m128 __A, __m128 __B, __m128 __C) in _mm_msub_ps()
63 return (__m128)__builtin_ia32_vfmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_msub_ps()
72 static __inline__ __m128 __DEFAULT_FN_ATTRS
[all …]
Dfmaintrin.h34 static __inline__ __m128 __DEFAULT_FN_ATTRS
35 _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C) in _mm_fmadd_ps()
37 return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmadd_ps()
46 static __inline__ __m128 __DEFAULT_FN_ATTRS
47 _mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C) in _mm_fmadd_ss()
49 return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmadd_ss()
58 static __inline__ __m128 __DEFAULT_FN_ATTRS
59 _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C) in _mm_fmsub_ps()
61 return (__m128)__builtin_ia32_vfmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); in _mm_fmsub_ps()
70 static __inline__ __m128 __DEFAULT_FN_ATTRS
[all …]
Davx512vldqintrin.h129 static __inline__ __m128 __DEFAULT_FN_ATTRS
130 _mm_mask_andnot_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { in _mm_mask_andnot_ps()
131 return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A, in _mm_mask_andnot_ps()
137 static __inline__ __m128 __DEFAULT_FN_ATTRS
138 _mm_maskz_andnot_ps (__mmask8 __U, __m128 __A, __m128 __B) { in _mm_maskz_andnot_ps()
139 return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A, in _mm_maskz_andnot_ps()
197 static __inline__ __m128 __DEFAULT_FN_ATTRS
198 _mm_mask_and_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { in _mm_mask_and_ps()
199 return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A, in _mm_mask_and_ps()
205 static __inline__ __m128 __DEFAULT_FN_ATTRS
[all …]
Dpmmintrin.h64 static __inline__ __m128 __DEFAULT_FN_ATTRS
65 _mm_addsub_ps(__m128 __a, __m128 __b) in _mm_addsub_ps()
87 static __inline__ __m128 __DEFAULT_FN_ATTRS
88 _mm_hadd_ps(__m128 __a, __m128 __b) in _mm_hadd_ps()
110 static __inline__ __m128 __DEFAULT_FN_ATTRS
111 _mm_hsub_ps(__m128 __a, __m128 __b) in _mm_hsub_ps()
132 static __inline__ __m128 __DEFAULT_FN_ATTRS
133 _mm_movehdup_ps(__m128 __a) in _mm_movehdup_ps()
154 static __inline__ __m128 __DEFAULT_FN_ATTRS
155 _mm_moveldup_ps(__m128 __a) in _mm_moveldup_ps()
/external/vulkan-validation-layers/libs/glm/detail/
Dintrinsic_common.inl52 static const __m128 GLM_VAR_USED zero = _mm_setzero_ps();
53 static const __m128 GLM_VAR_USED one = _mm_set_ps1(1.0f);
54 static const __m128 GLM_VAR_USED minus_one = _mm_set_ps1(-1.0f);
55 static const __m128 GLM_VAR_USED two = _mm_set_ps1(2.0f);
56 static const __m128 GLM_VAR_USED three = _mm_set_ps1(3.0f);
57 static const __m128 GLM_VAR_USED pi = _mm_set_ps1(3.1415926535897932384626433832795f);
58 static const __m128 GLM_VAR_USED hundred_eighty = _mm_set_ps1(180.f);
59 …static const __m128 GLM_VAR_USED pi_over_hundred_eighty = _mm_set_ps1(0.01745329251994329576923690…
60 …static const __m128 GLM_VAR_USED hundred_eighty_over_pi = _mm_set_ps1(57.2957795130823208767981548…
63 static const __m128 GLM_VAR_USED abs4Mask = _mm_set_ps1(absMask.f);
[all …]
Dintrinsic_matrix.inl32 static const __m128 GLM_VAR_USED _m128_rad_ps = _mm_set_ps1(3.141592653589793238462643383279f / 180…
33 static const __m128 GLM_VAR_USED _m128_deg_ps = _mm_set_ps1(180.f / 3.14159265358979323846264338327…
38 __m128 const in1[4],
39 __m128 const in2[4],
40 __m128 out[4]
49 GLM_FUNC_QUALIFIER void sse_add_ps(__m128 const in1[4], __m128 const in2[4], __m128 out[4])
59 GLM_FUNC_QUALIFIER void sse_sub_ps(__m128 const in1[4], __m128 const in2[4], __m128 out[4])
69 GLM_FUNC_QUALIFIER __m128 sse_mul_ps(__m128 const m[4], __m128 v)
71 __m128 v0 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0));
72 __m128 v1 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1));
[all …]
Dintrinsic_common.hpp41 __m128 sse_abs_ps(__m128 x);
43 __m128 sse_sgn_ps(__m128 x);
46 __m128 sse_flr_ps(__m128 v);
49 __m128 sse_trc_ps(__m128 v);
52 __m128 sse_nd_ps(__m128 v);
55 __m128 sse_rde_ps(__m128 v);
57 __m128 sse_rnd_ps(__m128 x);
59 __m128 sse_ceil_ps(__m128 v);
61 __m128 sse_frc_ps(__m128 x);
63 __m128 sse_mod_ps(__m128 x, __m128 y);
[all …]
Dintrinsic_geometric.inl33 GLM_FUNC_QUALIFIER __m128 sse_len_ps(__m128 x)
35 __m128 dot0 = sse_dot_ps(x, x);
36 __m128 sqt0 = _mm_sqrt_ps(dot0);
41 GLM_FUNC_QUALIFIER __m128 sse_dst_ps(__m128 p0, __m128 p1)
43 __m128 sub0 = _mm_sub_ps(p0, p1);
44 __m128 len0 = sse_len_ps(sub0);
49 GLM_FUNC_QUALIFIER __m128 sse_dot_ps(__m128 v1, __m128 v2)
54 __m128 mul0 = _mm_mul_ps(v1, v2);
55 __m128 swp0 = _mm_shuffle_ps(mul0, mul0, _MM_SHUFFLE(2, 3, 0, 1));
56 __m128 add0 = _mm_add_ps(mul0, swp0);
[all …]
Dintrinsic_geometric.hpp44 __m128 sse_len_ps(__m128 x);
47 __m128 sse_dst_ps(__m128 p0, __m128 p1);
50 __m128 sse_dot_ps(__m128 v1, __m128 v2);
53 __m128 sse_dot_ss(__m128 v1, __m128 v2);
56 __m128 sse_xpd_ps(__m128 v1, __m128 v2);
59 __m128 sse_nrm_ps(__m128 v);
62 __m128 sse_ffd_ps(__m128 N, __m128 I, __m128 Nref);
65 __m128 sse_rfe_ps(__m128 I, __m128 N);
68 __m128 sse_rfa_ps(__m128 I, __m128 N, __m128 eta);
Dintrinsic_matrix.hpp43 void sse_add_ps(__m128 in1[4], __m128 in2[4], __m128 out[4]);
45 void sse_sub_ps(__m128 in1[4], __m128 in2[4], __m128 out[4]);
47 __m128 sse_mul_ps(__m128 m[4], __m128 v);
49 __m128 sse_mul_ps(__m128 v, __m128 m[4]);
51 void sse_mul_ps(__m128 const in1[4], __m128 const in2[4], __m128 out[4]);
53 void sse_transpose_ps(__m128 const in[4], __m128 out[4]);
55 void sse_inverse_ps(__m128 const in[4], __m128 out[4]);
57 void sse_rotate_ps(__m128 const in[4], float Angle, float const v[3], __m128 out[4]);
59 __m128 sse_det_ps(__m128 const m[4]);
61 __m128 sse_slow_det_ps(__m128 const m[4]);
/external/webrtc/webrtc/modules/audio_processing/aec/
Daec_rdft_sse2.c19 const __m128 mm_swap_sign = _mm_load_ps(k_swap_sign); in cft1st_128_SSE2()
23 __m128 a00v = _mm_loadu_ps(&a[j + 0]); in cft1st_128_SSE2()
24 __m128 a04v = _mm_loadu_ps(&a[j + 4]); in cft1st_128_SSE2()
25 __m128 a08v = _mm_loadu_ps(&a[j + 8]); in cft1st_128_SSE2()
26 __m128 a12v = _mm_loadu_ps(&a[j + 12]); in cft1st_128_SSE2()
27 __m128 a01v = _mm_shuffle_ps(a00v, a08v, _MM_SHUFFLE(1, 0, 1, 0)); in cft1st_128_SSE2()
28 __m128 a23v = _mm_shuffle_ps(a00v, a08v, _MM_SHUFFLE(3, 2, 3, 2)); in cft1st_128_SSE2()
29 __m128 a45v = _mm_shuffle_ps(a04v, a12v, _MM_SHUFFLE(1, 0, 1, 0)); in cft1st_128_SSE2()
30 __m128 a67v = _mm_shuffle_ps(a04v, a12v, _MM_SHUFFLE(3, 2, 3, 2)); in cft1st_128_SSE2()
32 const __m128 wk1rv = _mm_load_ps(&rdft_wk1r[k2]); in cft1st_128_SSE2()
[all …]
Daec_core_sse2.c51 const __m128 x_fft_buf_re = _mm_loadu_ps(&x_fft_buf[0][xPos + j]); in FilterFarSSE2()
52 const __m128 x_fft_buf_im = _mm_loadu_ps(&x_fft_buf[1][xPos + j]); in FilterFarSSE2()
53 const __m128 h_fft_buf_re = _mm_loadu_ps(&h_fft_buf[0][pos + j]); in FilterFarSSE2()
54 const __m128 h_fft_buf_im = _mm_loadu_ps(&h_fft_buf[1][pos + j]); in FilterFarSSE2()
55 const __m128 y_fft_re = _mm_loadu_ps(&y_fft[0][j]); in FilterFarSSE2()
56 const __m128 y_fft_im = _mm_loadu_ps(&y_fft[1][j]); in FilterFarSSE2()
57 const __m128 a = _mm_mul_ps(x_fft_buf_re, h_fft_buf_re); in FilterFarSSE2()
58 const __m128 b = _mm_mul_ps(x_fft_buf_im, h_fft_buf_im); in FilterFarSSE2()
59 const __m128 c = _mm_mul_ps(x_fft_buf_re, h_fft_buf_im); in FilterFarSSE2()
60 const __m128 d = _mm_mul_ps(x_fft_buf_im, h_fft_buf_re); in FilterFarSSE2()
[all …]
/external/vulkan-validation-layers/libs/glm/gtx/
Dsimd_quat.inl18 void print(__m128 v)
23 printf("__m128: %f %f %f %f\n", result[0], result[1], result[2], result[3]);
42 GLM_FUNC_QUALIFIER fquatSIMD::fquatSIMD(__m128 const & Data) :
123 __m128 mul0 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(0, 1, 2, 3)));
124 __m128 mul1 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(1, 0, 3, 2)));
125 __m128 mul2 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(2, 3, 0, 1)));
126 __m128 mul3 = _mm_mul_ps(q1.Data, q2.Data);
129 __m128 add0 = _mm_dp_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f), 0xff);
130 __m128 add1 = _mm_dp_ps(mul1, _mm_set_ps(1.0f, 1.0f, 1.0f, -1.0f), 0xff);
131 __m128 add2 = _mm_dp_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f), 0xff);
[all …]

123456