1 // This file is part of Eigen, a lightweight C++ template library 2 // for linear algebra. 3 // 4 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> 5 // Copyright (C) 2010 Konstantinos Margaritis <markos@freevec.org> 6 // Heavily based on Gael's SSE version. 7 // 8 // This Source Code Form is subject to the terms of the Mozilla 9 // Public License v. 2.0. If a copy of the MPL was not distributed 10 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 11 12 #ifndef EIGEN_PACKET_MATH_NEON_H 13 #define EIGEN_PACKET_MATH_NEON_H 14 15 namespace Eigen { 16 17 namespace internal { 18 19 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 20 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 21 #endif 22 23 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD 24 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 25 #endif 26 27 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD 28 #define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD 29 #endif 30 31 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32 #if EIGEN_ARCH_ARM64 33 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32 34 #else 35 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16 36 #endif 37 #endif 38 39 typedef float32x2_t Packet2f; 40 typedef float32x4_t Packet4f; 41 typedef int32x4_t Packet4i; 42 typedef int32x2_t Packet2i; 43 typedef uint32x4_t Packet4ui; 44 45 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ 46 const Packet4f p4f_##NAME = pset1<Packet4f>(X) 47 48 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ 49 const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int32_t>(X)) 50 51 #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ 52 const Packet4i p4i_##NAME = pset1<Packet4i>(X) 53 54 #if EIGEN_ARCH_ARM64 55 // __builtin_prefetch tends to do nothing on ARM64 compilers because the 56 // prefetch instructions there are too detailed for __builtin_prefetch to map 57 // meaningfully to them. 58 #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__("prfm pldl1keep, [%[addr]]\n" ::[addr] "r"(ADDR) : ); 59 #elif EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC 60 #define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR); 61 #elif defined __pld 62 #define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR) 63 #elif EIGEN_ARCH_ARM32 64 #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ("pld [%[addr]]\n" :: [addr] "r" (ADDR) : ); 65 #else 66 // by default no explicit prefetching 67 #define EIGEN_ARM_PREFETCH(ADDR) 68 #endif 69 70 template<> struct packet_traits<float> : default_packet_traits 71 { 72 typedef Packet4f type; 73 typedef Packet4f half; // Packet2f intrinsics not implemented yet 74 enum { 75 Vectorizable = 1, 76 AlignedOnScalar = 1, 77 size = 4, 78 HasHalfPacket=0, // Packet2f intrinsics not implemented yet 79 80 HasDiv = 1, 81 // FIXME check the Has* 82 HasSin = 0, 83 HasCos = 0, 84 HasLog = 0, 85 HasExp = 1, 86 HasSqrt = 0 87 }; 88 }; 89 template<> struct packet_traits<int32_t> : default_packet_traits 90 { 91 typedef Packet4i type; 92 typedef Packet4i half; // Packet2i intrinsics not implemented yet 93 enum { 94 Vectorizable = 1, 95 AlignedOnScalar = 1, 96 size=4, 97 HasHalfPacket=0 // Packet2i intrinsics not implemented yet 98 // FIXME check the Has* 99 }; 100 }; 101 102 #if EIGEN_GNUC_AT_MOST(4,4) && !EIGEN_COMP_LLVM 103 // workaround gcc 4.2, 4.3 and 4.4 compilatin issue 104 EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); } 105 EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); } 106 EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32 (const float* x) { return ::vld1_dup_f32 ((const float32_t*)x); } 107 EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); } 108 EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); } 109 #endif 110 111 template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; }; 112 template<> struct unpacket_traits<Packet4i> { typedef int32_t type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; }; 113 114 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return vdupq_n_f32(from); } 115 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int32_t& from) { return vdupq_n_s32(from); } 116 117 template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) 118 { 119 const float f[] = {0, 1, 2, 3}; 120 Packet4f countdown = vld1q_f32(f); 121 return vaddq_f32(pset1<Packet4f>(a), countdown); 122 } 123 template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int32_t& a) 124 { 125 const int32_t i[] = {0, 1, 2, 3}; 126 Packet4i countdown = vld1q_s32(i); 127 return vaddq_s32(pset1<Packet4i>(a), countdown); 128 } 129 130 template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); } 131 template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); } 132 133 template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); } 134 template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); } 135 136 template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); } 137 template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); } 138 139 template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } 140 template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } 141 142 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); } 143 template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); } 144 145 template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) 146 { 147 #if EIGEN_ARCH_ARM64 148 return vdivq_f32(a,b); 149 #else 150 Packet4f inv, restep, div; 151 152 // NEON does not offer a divide instruction, we have to do a reciprocal approximation 153 // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers 154 // a reciprocal estimate AND a reciprocal step -which saves a few instructions 155 // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with 156 // Newton-Raphson and vrecpsq_f32() 157 inv = vrecpeq_f32(b); 158 159 // This returns a differential, by which we will have to multiply inv to get a better 160 // approximation of 1/b. 161 restep = vrecpsq_f32(b, inv); 162 inv = vmulq_f32(restep, inv); 163 164 // Finally, multiply a by 1/b and get the wanted result of the division. 165 div = vmulq_f32(a, inv); 166 167 return div; 168 #endif 169 } 170 171 template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/) 172 { eigen_assert(false && "packet integer division are not supported by NEON"); 173 return pset1<Packet4i>(0); 174 } 175 176 // Clang/ARM wrongly advertises __ARM_FEATURE_FMA even when it's not available, 177 // then implements a slow software scalar fallback calling fmaf()! 178 // Filed LLVM bug: 179 // https://llvm.org/bugs/show_bug.cgi?id=27216 180 #if (defined __ARM_FEATURE_FMA) && !(EIGEN_COMP_CLANG && EIGEN_ARCH_ARM) 181 // See bug 936. 182 // FMA is available on VFPv4 i.e. when compiling with -mfpu=neon-vfpv4. 183 // FMA is a true fused multiply-add i.e. only 1 rounding at the end, no intermediate rounding. 184 // MLA is not fused i.e. does 2 roundings. 185 // In addition to giving better accuracy, FMA also gives better performance here on a Krait (Nexus 4): 186 // MLA: 10 GFlop/s ; FMA: 12 GFlops/s. 187 template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vfmaq_f32(c,a,b); } 188 #else 189 template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { 190 #if EIGEN_COMP_CLANG && EIGEN_ARCH_ARM 191 // Clang/ARM will replace VMLA by VMUL+VADD at least for some values of -mcpu, 192 // at least -mcpu=cortex-a8 and -mcpu=cortex-a7. Since the former is the default on 193 // -march=armv7-a, that is a very common case. 194 // See e.g. this thread: 195 // http://lists.llvm.org/pipermail/llvm-dev/2013-December/068806.html 196 // Filed LLVM bug: 197 // https://llvm.org/bugs/show_bug.cgi?id=27219 198 Packet4f r = c; 199 asm volatile( 200 "vmla.f32 %q[r], %q[a], %q[b]" 201 : [r] "+w" (r) 202 : [a] "w" (a), 203 [b] "w" (b) 204 : ); 205 return r; 206 #else 207 return vmlaq_f32(c,a,b); 208 #endif 209 } 210 #endif 211 212 // No FMA instruction for int, so use MLA unconditionally. 213 template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); } 214 215 template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); } 216 template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); } 217 218 template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); } 219 template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); } 220 221 // Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics 222 template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) 223 { 224 return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 225 } 226 template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); } 227 228 template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) 229 { 230 return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 231 } 232 template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); } 233 234 template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) 235 { 236 return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 237 } 238 template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); } 239 240 template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) 241 { 242 return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 243 } 244 template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); } 245 246 template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); } 247 template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int32_t* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); } 248 249 template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); } 250 template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int32_t* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); } 251 252 template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from) 253 { 254 float32x2_t lo, hi; 255 lo = vld1_dup_f32(from); 256 hi = vld1_dup_f32(from+1); 257 return vcombine_f32(lo, hi); 258 } 259 template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int32_t* from) 260 { 261 int32x2_t lo, hi; 262 lo = vld1_dup_s32(from); 263 hi = vld1_dup_s32(from+1); 264 return vcombine_s32(lo, hi); 265 } 266 267 template<> EIGEN_STRONG_INLINE void pstore<float> (float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); } 268 template<> EIGEN_STRONG_INLINE void pstore<int32_t>(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); } 269 270 template<> EIGEN_STRONG_INLINE void pstoreu<float> (float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); } 271 template<> EIGEN_STRONG_INLINE void pstoreu<int32_t>(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); } 272 273 template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride) 274 { 275 Packet4f res = pset1<Packet4f>(0.f); 276 res = vsetq_lane_f32(from[0*stride], res, 0); 277 res = vsetq_lane_f32(from[1*stride], res, 1); 278 res = vsetq_lane_f32(from[2*stride], res, 2); 279 res = vsetq_lane_f32(from[3*stride], res, 3); 280 return res; 281 } 282 template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int32_t, Packet4i>(const int32_t* from, Index stride) 283 { 284 Packet4i res = pset1<Packet4i>(0); 285 res = vsetq_lane_s32(from[0*stride], res, 0); 286 res = vsetq_lane_s32(from[1*stride], res, 1); 287 res = vsetq_lane_s32(from[2*stride], res, 2); 288 res = vsetq_lane_s32(from[3*stride], res, 3); 289 return res; 290 } 291 292 template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride) 293 { 294 to[stride*0] = vgetq_lane_f32(from, 0); 295 to[stride*1] = vgetq_lane_f32(from, 1); 296 to[stride*2] = vgetq_lane_f32(from, 2); 297 to[stride*3] = vgetq_lane_f32(from, 3); 298 } 299 template<> EIGEN_DEVICE_FUNC inline void pscatter<int32_t, Packet4i>(int32_t* to, const Packet4i& from, Index stride) 300 { 301 to[stride*0] = vgetq_lane_s32(from, 0); 302 to[stride*1] = vgetq_lane_s32(from, 1); 303 to[stride*2] = vgetq_lane_s32(from, 2); 304 to[stride*3] = vgetq_lane_s32(from, 3); 305 } 306 307 template<> EIGEN_STRONG_INLINE void prefetch<float> (const float* addr) { EIGEN_ARM_PREFETCH(addr); } 308 template<> EIGEN_STRONG_INLINE void prefetch<int32_t>(const int32_t* addr) { EIGEN_ARM_PREFETCH(addr); } 309 310 // FIXME only store the 2 first elements ? 311 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; } 312 template<> EIGEN_STRONG_INLINE int32_t pfirst<Packet4i>(const Packet4i& a) { int32_t EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; } 313 314 template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { 315 float32x2_t a_lo, a_hi; 316 Packet4f a_r64; 317 318 a_r64 = vrev64q_f32(a); 319 a_lo = vget_low_f32(a_r64); 320 a_hi = vget_high_f32(a_r64); 321 return vcombine_f32(a_hi, a_lo); 322 } 323 template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { 324 int32x2_t a_lo, a_hi; 325 Packet4i a_r64; 326 327 a_r64 = vrev64q_s32(a); 328 a_lo = vget_low_s32(a_r64); 329 a_hi = vget_high_s32(a_r64); 330 return vcombine_s32(a_hi, a_lo); 331 } 332 333 template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); } 334 template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); } 335 336 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a) 337 { 338 float32x2_t a_lo, a_hi, sum; 339 340 a_lo = vget_low_f32(a); 341 a_hi = vget_high_f32(a); 342 sum = vpadd_f32(a_lo, a_hi); 343 sum = vpadd_f32(sum, sum); 344 return vget_lane_f32(sum, 0); 345 } 346 347 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs) 348 { 349 float32x4x2_t vtrn1, vtrn2, res1, res2; 350 Packet4f sum1, sum2, sum; 351 352 // NEON zip performs interleaving of the supplied vectors. 353 // We perform two interleaves in a row to acquire the transposed vector 354 vtrn1 = vzipq_f32(vecs[0], vecs[2]); 355 vtrn2 = vzipq_f32(vecs[1], vecs[3]); 356 res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]); 357 res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]); 358 359 // Do the addition of the resulting vectors 360 sum1 = vaddq_f32(res1.val[0], res1.val[1]); 361 sum2 = vaddq_f32(res2.val[0], res2.val[1]); 362 sum = vaddq_f32(sum1, sum2); 363 364 return sum; 365 } 366 367 template<> EIGEN_STRONG_INLINE int32_t predux<Packet4i>(const Packet4i& a) 368 { 369 int32x2_t a_lo, a_hi, sum; 370 371 a_lo = vget_low_s32(a); 372 a_hi = vget_high_s32(a); 373 sum = vpadd_s32(a_lo, a_hi); 374 sum = vpadd_s32(sum, sum); 375 return vget_lane_s32(sum, 0); 376 } 377 378 template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs) 379 { 380 int32x4x2_t vtrn1, vtrn2, res1, res2; 381 Packet4i sum1, sum2, sum; 382 383 // NEON zip performs interleaving of the supplied vectors. 384 // We perform two interleaves in a row to acquire the transposed vector 385 vtrn1 = vzipq_s32(vecs[0], vecs[2]); 386 vtrn2 = vzipq_s32(vecs[1], vecs[3]); 387 res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]); 388 res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]); 389 390 // Do the addition of the resulting vectors 391 sum1 = vaddq_s32(res1.val[0], res1.val[1]); 392 sum2 = vaddq_s32(res2.val[0], res2.val[1]); 393 sum = vaddq_s32(sum1, sum2); 394 395 return sum; 396 } 397 398 // Other reduction functions: 399 // mul 400 template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a) 401 { 402 float32x2_t a_lo, a_hi, prod; 403 404 // Get a_lo = |a1|a2| and a_hi = |a3|a4| 405 a_lo = vget_low_f32(a); 406 a_hi = vget_high_f32(a); 407 // Get the product of a_lo * a_hi -> |a1*a3|a2*a4| 408 prod = vmul_f32(a_lo, a_hi); 409 // Multiply prod with its swapped value |a2*a4|a1*a3| 410 prod = vmul_f32(prod, vrev64_f32(prod)); 411 412 return vget_lane_f32(prod, 0); 413 } 414 template<> EIGEN_STRONG_INLINE int32_t predux_mul<Packet4i>(const Packet4i& a) 415 { 416 int32x2_t a_lo, a_hi, prod; 417 418 // Get a_lo = |a1|a2| and a_hi = |a3|a4| 419 a_lo = vget_low_s32(a); 420 a_hi = vget_high_s32(a); 421 // Get the product of a_lo * a_hi -> |a1*a3|a2*a4| 422 prod = vmul_s32(a_lo, a_hi); 423 // Multiply prod with its swapped value |a2*a4|a1*a3| 424 prod = vmul_s32(prod, vrev64_s32(prod)); 425 426 return vget_lane_s32(prod, 0); 427 } 428 429 // min 430 template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a) 431 { 432 float32x2_t a_lo, a_hi, min; 433 434 a_lo = vget_low_f32(a); 435 a_hi = vget_high_f32(a); 436 min = vpmin_f32(a_lo, a_hi); 437 min = vpmin_f32(min, min); 438 439 return vget_lane_f32(min, 0); 440 } 441 442 template<> EIGEN_STRONG_INLINE int32_t predux_min<Packet4i>(const Packet4i& a) 443 { 444 int32x2_t a_lo, a_hi, min; 445 446 a_lo = vget_low_s32(a); 447 a_hi = vget_high_s32(a); 448 min = vpmin_s32(a_lo, a_hi); 449 min = vpmin_s32(min, min); 450 451 return vget_lane_s32(min, 0); 452 } 453 454 // max 455 template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a) 456 { 457 float32x2_t a_lo, a_hi, max; 458 459 a_lo = vget_low_f32(a); 460 a_hi = vget_high_f32(a); 461 max = vpmax_f32(a_lo, a_hi); 462 max = vpmax_f32(max, max); 463 464 return vget_lane_f32(max, 0); 465 } 466 467 template<> EIGEN_STRONG_INLINE int32_t predux_max<Packet4i>(const Packet4i& a) 468 { 469 int32x2_t a_lo, a_hi, max; 470 471 a_lo = vget_low_s32(a); 472 a_hi = vget_high_s32(a); 473 max = vpmax_s32(a_lo, a_hi); 474 max = vpmax_s32(max, max); 475 476 return vget_lane_s32(max, 0); 477 } 478 479 // this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors, 480 // see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074 481 #define PALIGN_NEON(Offset,Type,Command) \ 482 template<>\ 483 struct palign_impl<Offset,Type>\ 484 {\ 485 EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\ 486 {\ 487 if (Offset!=0)\ 488 first = Command(first, second, Offset);\ 489 }\ 490 };\ 491 492 PALIGN_NEON(0,Packet4f,vextq_f32) 493 PALIGN_NEON(1,Packet4f,vextq_f32) 494 PALIGN_NEON(2,Packet4f,vextq_f32) 495 PALIGN_NEON(3,Packet4f,vextq_f32) 496 PALIGN_NEON(0,Packet4i,vextq_s32) 497 PALIGN_NEON(1,Packet4i,vextq_s32) 498 PALIGN_NEON(2,Packet4i,vextq_s32) 499 PALIGN_NEON(3,Packet4i,vextq_s32) 500 501 #undef PALIGN_NEON 502 503 EIGEN_DEVICE_FUNC inline void 504 ptranspose(PacketBlock<Packet4f,4>& kernel) { 505 float32x4x2_t tmp1 = vzipq_f32(kernel.packet[0], kernel.packet[1]); 506 float32x4x2_t tmp2 = vzipq_f32(kernel.packet[2], kernel.packet[3]); 507 508 kernel.packet[0] = vcombine_f32(vget_low_f32(tmp1.val[0]), vget_low_f32(tmp2.val[0])); 509 kernel.packet[1] = vcombine_f32(vget_high_f32(tmp1.val[0]), vget_high_f32(tmp2.val[0])); 510 kernel.packet[2] = vcombine_f32(vget_low_f32(tmp1.val[1]), vget_low_f32(tmp2.val[1])); 511 kernel.packet[3] = vcombine_f32(vget_high_f32(tmp1.val[1]), vget_high_f32(tmp2.val[1])); 512 } 513 514 EIGEN_DEVICE_FUNC inline void 515 ptranspose(PacketBlock<Packet4i,4>& kernel) { 516 int32x4x2_t tmp1 = vzipq_s32(kernel.packet[0], kernel.packet[1]); 517 int32x4x2_t tmp2 = vzipq_s32(kernel.packet[2], kernel.packet[3]); 518 kernel.packet[0] = vcombine_s32(vget_low_s32(tmp1.val[0]), vget_low_s32(tmp2.val[0])); 519 kernel.packet[1] = vcombine_s32(vget_high_s32(tmp1.val[0]), vget_high_s32(tmp2.val[0])); 520 kernel.packet[2] = vcombine_s32(vget_low_s32(tmp1.val[1]), vget_low_s32(tmp2.val[1])); 521 kernel.packet[3] = vcombine_s32(vget_high_s32(tmp1.val[1]), vget_high_s32(tmp2.val[1])); 522 } 523 524 //---------- double ---------- 525 526 // Clang 3.5 in the iOS toolchain has an ICE triggered by NEON intrisics for double. 527 // Confirmed at least with __apple_build_version__ = 6000054. 528 #ifdef __apple_build_version__ 529 // Let's hope that by the time __apple_build_version__ hits the 601* range, the bug will be fixed. 530 // https://gist.github.com/yamaya/2924292 suggests that the 3 first digits are only updated with 531 // major toolchain updates. 532 #define EIGEN_APPLE_DOUBLE_NEON_BUG (__apple_build_version__ < 6010000) 533 #else 534 #define EIGEN_APPLE_DOUBLE_NEON_BUG 0 535 #endif 536 537 #if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG 538 539 // Bug 907: workaround missing declarations of the following two functions in the ADK 540 // Defining these functions as templates ensures that if these intrinsics are 541 // already defined in arm_neon.h, then our workaround doesn't cause a conflict 542 // and has lower priority in overload resolution. 543 template <typename T> 544 uint64x2_t vreinterpretq_u64_f64(T a) 545 { 546 return (uint64x2_t) a; 547 } 548 549 template <typename T> 550 float64x2_t vreinterpretq_f64_u64(T a) 551 { 552 return (float64x2_t) a; 553 } 554 555 typedef float64x2_t Packet2d; 556 typedef float64x1_t Packet1d; 557 558 template<> struct packet_traits<double> : default_packet_traits 559 { 560 typedef Packet2d type; 561 typedef Packet2d half; 562 enum { 563 Vectorizable = 1, 564 AlignedOnScalar = 1, 565 size = 2, 566 HasHalfPacket=0, 567 568 HasDiv = 1, 569 // FIXME check the Has* 570 HasSin = 0, 571 HasCos = 0, 572 HasLog = 0, 573 HasExp = 0, 574 HasSqrt = 0 575 }; 576 }; 577 578 template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; }; 579 580 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return vdupq_n_f64(from); } 581 582 template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) 583 { 584 const double countdown_raw[] = {0.0,1.0}; 585 const Packet2d countdown = vld1q_f64(countdown_raw); 586 return vaddq_f64(pset1<Packet2d>(a), countdown); 587 } 588 template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return vaddq_f64(a,b); } 589 590 template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return vsubq_f64(a,b); } 591 592 template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return vnegq_f64(a); } 593 594 template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } 595 596 template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return vmulq_f64(a,b); } 597 598 template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return vdivq_f64(a,b); } 599 600 #ifdef __ARM_FEATURE_FMA 601 // See bug 936. See above comment about FMA for float. 602 template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vfmaq_f64(c,a,b); } 603 #else 604 template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vmlaq_f64(c,a,b); } 605 #endif 606 607 template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vminq_f64(a,b); } 608 609 template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return vmaxq_f64(a,b); } 610 611 // Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics 612 template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) 613 { 614 return vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); 615 } 616 617 template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) 618 { 619 return vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); 620 } 621 622 template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) 623 { 624 return vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); 625 } 626 627 template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) 628 { 629 return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); 630 } 631 632 template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f64(from); } 633 634 template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f64(from); } 635 636 template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from) 637 { 638 return vld1q_dup_f64(from); 639 } 640 template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f64(to, from); } 641 642 template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to, from); } 643 644 template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride) 645 { 646 Packet2d res = pset1<Packet2d>(0.0); 647 res = vsetq_lane_f64(from[0*stride], res, 0); 648 res = vsetq_lane_f64(from[1*stride], res, 1); 649 return res; 650 } 651 template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride) 652 { 653 to[stride*0] = vgetq_lane_f64(from, 0); 654 to[stride*1] = vgetq_lane_f64(from, 1); 655 } 656 template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_ARM_PREFETCH(addr); } 657 658 // FIXME only store the 2 first elements ? 659 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(a, 0); } 660 661 template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return vcombine_f64(vget_high_f64(a), vget_low_f64(a)); } 662 663 template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vabsq_f64(a); } 664 665 #if EIGEN_COMP_CLANG && defined(__apple_build_version__) 666 // workaround ICE, see bug 907 667 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return (vget_low_f64(a) + vget_high_f64(a))[0]; } 668 #else 669 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) + vget_high_f64(a), 0); } 670 #endif 671 672 template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs) 673 { 674 float64x2_t trn1, trn2; 675 676 // NEON zip performs interleaving of the supplied vectors. 677 // We perform two interleaves in a row to acquire the transposed vector 678 trn1 = vzip1q_f64(vecs[0], vecs[1]); 679 trn2 = vzip2q_f64(vecs[0], vecs[1]); 680 681 // Do the addition of the resulting vectors 682 return vaddq_f64(trn1, trn2); 683 } 684 // Other reduction functions: 685 // mul 686 #if EIGEN_COMP_CLANG && defined(__apple_build_version__) 687 template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) { return (vget_low_f64(a) * vget_high_f64(a))[0]; } 688 #else 689 template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) * vget_high_f64(a), 0); } 690 #endif 691 692 // min 693 template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(vpminq_f64(a, a), 0); } 694 695 // max 696 template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(vpmaxq_f64(a, a), 0); } 697 698 // this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors, 699 // see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074 700 #define PALIGN_NEON(Offset,Type,Command) \ 701 template<>\ 702 struct palign_impl<Offset,Type>\ 703 {\ 704 EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\ 705 {\ 706 if (Offset!=0)\ 707 first = Command(first, second, Offset);\ 708 }\ 709 };\ 710 711 PALIGN_NEON(0,Packet2d,vextq_f64) 712 PALIGN_NEON(1,Packet2d,vextq_f64) 713 #undef PALIGN_NEON 714 715 EIGEN_DEVICE_FUNC inline void 716 ptranspose(PacketBlock<Packet2d,2>& kernel) { 717 float64x2_t trn1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]); 718 float64x2_t trn2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]); 719 720 kernel.packet[0] = trn1; 721 kernel.packet[1] = trn2; 722 } 723 #endif // EIGEN_ARCH_ARM64 724 725 } // end namespace internal 726 727 } // end namespace Eigen 728 729 #endif // EIGEN_PACKET_MATH_NEON_H 730