10 #ifndef EIGEN_PACKET_MATH_AVX_H
11 #define EIGEN_PACKET_MATH_AVX_H
17 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
21 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
22 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
26 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
31 typedef __m256 Packet8f;
32 typedef __m256i Packet8i;
33 typedef __m256d Packet4d;
35 template<>
struct is_arithmetic<__m256> {
enum { value =
true }; };
36 template<>
struct is_arithmetic<__m256i> {
enum { value =
true }; };
37 template<>
struct is_arithmetic<__m256d> {
enum { value =
true }; };
39 #define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \
40 const Packet8f p8f_##NAME = pset1<Packet8f>(X)
42 #define _EIGEN_DECLARE_CONST_Packet4d(NAME,X) \
43 const Packet4d p4d_##NAME = pset1<Packet4d>(X)
45 #define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME,X) \
46 const Packet8f p8f_##NAME = _mm256_castsi256_ps(pset1<Packet8i>(X))
48 #define _EIGEN_DECLARE_CONST_Packet8i(NAME,X) \
49 const Packet8i p8i_##NAME = pset1<Packet8i>(X)
53 #ifndef EIGEN_VECTORIZE_AVX512
54 template<>
struct packet_traits<float> : default_packet_traits
56 typedef Packet8f type;
57 typedef Packet4f half;
65 HasSin = EIGEN_FAST_MATH,
71 HasTanh = EIGEN_FAST_MATH,
78 template<>
struct packet_traits<double> : default_packet_traits
80 typedef Packet4d type;
81 typedef Packet2d half;
100 template<>
struct scalar_div_cost<float,true> {
enum { value = 14 }; };
101 template<>
struct scalar_div_cost<double,true> {
enum { value = 16 }; };
116 template<>
struct unpacket_traits<Packet8f> {
typedef float type;
typedef Packet4f half;
enum {size=8, alignment=
Aligned32}; };
117 template<>
struct unpacket_traits<Packet4d> {
typedef double type;
typedef Packet2d half;
enum {size=4, alignment=
Aligned32}; };
118 template<>
struct unpacket_traits<Packet8i> {
typedef int type;
typedef Packet4i half;
enum {size=8, alignment=
Aligned32}; };
120 template<> EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(
const float& from) {
return _mm256_set1_ps(from); }
121 template<> EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(
const double& from) {
return _mm256_set1_pd(from); }
122 template<> EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(
const int& from) {
return _mm256_set1_epi32(from); }
124 template<> EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(
const float* from) {
return _mm256_broadcast_ss(from); }
125 template<> EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(
const double* from) {
return _mm256_broadcast_sd(from); }
127 template<> EIGEN_STRONG_INLINE Packet8f plset<Packet8f>(
const float& a) {
return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); }
128 template<> EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(
const double& a) {
return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); }
130 template<> EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_add_ps(a,b); }
131 template<> EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_add_pd(a,b); }
133 template<> EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_sub_ps(a,b); }
134 template<> EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_sub_pd(a,b); }
136 template<> EIGEN_STRONG_INLINE Packet8f pnegate(
const Packet8f& a)
138 return _mm256_sub_ps(_mm256_set1_ps(0.0),a);
140 template<> EIGEN_STRONG_INLINE Packet4d pnegate(
const Packet4d& a)
142 return _mm256_sub_pd(_mm256_set1_pd(0.0),a);
145 template<> EIGEN_STRONG_INLINE Packet8f pconj(
const Packet8f& a) {
return a; }
146 template<> EIGEN_STRONG_INLINE Packet4d pconj(
const Packet4d& a) {
return a; }
147 template<> EIGEN_STRONG_INLINE Packet8i pconj(
const Packet8i& a) {
return a; }
149 template<> EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_mul_ps(a,b); }
150 template<> EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_mul_pd(a,b); }
153 template<> EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_div_ps(a,b); }
154 template<> EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_div_pd(a,b); }
155 template<> EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(
const Packet8i& ,
const Packet8i& )
156 { eigen_assert(
false &&
"packet integer division are not supported by AVX");
157 return pset1<Packet8i>(0);
161 template<> EIGEN_STRONG_INLINE Packet8f pmadd(
const Packet8f& a,
const Packet8f& b,
const Packet8f& c) {
162 #if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
169 __asm__(
"vfmadd231ps %[a], %[b], %[c]" : [c]
"+x" (res) : [a]
"x" (a), [b]
"x" (b));
172 return _mm256_fmadd_ps(a,b,c);
175 template<> EIGEN_STRONG_INLINE Packet4d pmadd(
const Packet4d& a,
const Packet4d& b,
const Packet4d& c) {
176 #if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
179 __asm__(
"vfmadd231pd %[a], %[b], %[c]" : [c]
"+x" (res) : [a]
"x" (a), [b]
"x" (b));
182 return _mm256_fmadd_pd(a,b,c);
187 template<> EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_min_ps(a,b); }
188 template<> EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_min_pd(a,b); }
190 template<> EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_max_ps(a,b); }
191 template<> EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_max_pd(a,b); }
193 template<> EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(
const Packet8f& a) {
return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
194 template<> EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(
const Packet4d& a) {
return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
196 template<> EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(
const Packet8f& a) {
return _mm256_ceil_ps(a); }
197 template<> EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(
const Packet4d& a) {
return _mm256_ceil_pd(a); }
199 template<> EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(
const Packet8f& a) {
return _mm256_floor_ps(a); }
200 template<> EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(
const Packet4d& a) {
return _mm256_floor_pd(a); }
202 template<> EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_and_ps(a,b); }
203 template<> EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_and_pd(a,b); }
205 template<> EIGEN_STRONG_INLINE Packet8f por<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_or_ps(a,b); }
206 template<> EIGEN_STRONG_INLINE Packet4d por<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_or_pd(a,b); }
208 template<> EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_xor_ps(a,b); }
209 template<> EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_xor_pd(a,b); }
211 template<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_andnot_ps(a,b); }
212 template<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_andnot_pd(a,b); }
214 template<> EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(
const float* from) { EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_ps(from); }
215 template<> EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(
const double* from) { EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_pd(from); }
216 template<> EIGEN_STRONG_INLINE Packet8i pload<Packet8i>(
const int* from) { EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_si256(reinterpret_cast<const __m256i*>(from)); }
218 template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(
const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_ps(from); }
219 template<> EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(
const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_pd(from); }
220 template<> EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(
const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from)); }
223 template<> EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(
const float* from)
231 Packet8f tmp = _mm256_broadcast_ps((
const __m128*)(
const void*)from);
233 tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15);
235 return _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2));
238 template<> EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(
const double* from)
240 Packet4d tmp = _mm256_broadcast_pd((
const __m128d*)(
const void*)from);
241 return _mm256_permute_pd(tmp, 3<<2);
245 template<> EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(
const float* from)
247 Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
248 return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);
251 template<> EIGEN_STRONG_INLINE
void pstore<float>(
float* to,
const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); }
252 template<> EIGEN_STRONG_INLINE
void pstore<double>(
double* to,
const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); }
253 template<> EIGEN_STRONG_INLINE
void pstore<int>(
int* to,
const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
255 template<> EIGEN_STRONG_INLINE
void pstoreu<float>(
float* to,
const Packet8f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); }
256 template<> EIGEN_STRONG_INLINE
void pstoreu<double>(
double* to,
const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); }
257 template<> EIGEN_STRONG_INLINE
void pstoreu<int>(
int* to,
const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
261 template<> EIGEN_DEVICE_FUNC
inline Packet8f pgather<float, Packet8f>(
const float* from,
Index stride)
263 return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
264 from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
266 template<> EIGEN_DEVICE_FUNC
inline Packet4d pgather<double, Packet4d>(
const double* from,
Index stride)
268 return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
271 template<> EIGEN_DEVICE_FUNC
inline void pscatter<float, Packet8f>(
float* to,
const Packet8f& from,
Index stride)
273 __m128 low = _mm256_extractf128_ps(from, 0);
274 to[stride*0] = _mm_cvtss_f32(low);
275 to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
276 to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
277 to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
279 __m128 high = _mm256_extractf128_ps(from, 1);
280 to[stride*4] = _mm_cvtss_f32(high);
281 to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
282 to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
283 to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
285 template<> EIGEN_DEVICE_FUNC
inline void pscatter<double, Packet4d>(
double* to,
const Packet4d& from,
Index stride)
287 __m128d low = _mm256_extractf128_pd(from, 0);
288 to[stride*0] = _mm_cvtsd_f64(low);
289 to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
290 __m128d high = _mm256_extractf128_pd(from, 1);
291 to[stride*2] = _mm_cvtsd_f64(high);
292 to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
295 template<> EIGEN_STRONG_INLINE
void pstore1<Packet8f>(
float* to,
const float& a)
297 Packet8f pa = pset1<Packet8f>(a);
300 template<> EIGEN_STRONG_INLINE
void pstore1<Packet4d>(
double* to,
const double& a)
302 Packet4d pa = pset1<Packet4d>(a);
305 template<> EIGEN_STRONG_INLINE
void pstore1<Packet8i>(
int* to,
const int& a)
307 Packet8i pa = pset1<Packet8i>(a);
311 #ifndef EIGEN_VECTORIZE_AVX512
312 template<> EIGEN_STRONG_INLINE
void prefetch<float>(
const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
313 template<> EIGEN_STRONG_INLINE
void prefetch<double>(
const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
314 template<> EIGEN_STRONG_INLINE
void prefetch<int>(
const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
317 template<> EIGEN_STRONG_INLINE
float pfirst<Packet8f>(
const Packet8f& a) {
318 return _mm_cvtss_f32(_mm256_castps256_ps128(a));
320 template<> EIGEN_STRONG_INLINE
double pfirst<Packet4d>(
const Packet4d& a) {
321 return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
323 template<> EIGEN_STRONG_INLINE
int pfirst<Packet8i>(
const Packet8i& a) {
324 return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
328 template<> EIGEN_STRONG_INLINE Packet8f preverse(
const Packet8f& a)
330 __m256 tmp = _mm256_shuffle_ps(a,a,0x1b);
331 return _mm256_permute2f128_ps(tmp, tmp, 1);
333 template<> EIGEN_STRONG_INLINE Packet4d preverse(
const Packet4d& a)
335 __m256d tmp = _mm256_shuffle_pd(a,a,5);
336 return _mm256_permute2f128_pd(tmp, tmp, 1);
340 __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
341 return _mm256_permute_pd(swap_halves,5);
346 template<> EIGEN_STRONG_INLINE Packet8f pabs(
const Packet8f& a)
348 const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
349 return _mm256_and_ps(a,mask);
351 template<> EIGEN_STRONG_INLINE Packet4d pabs(
const Packet4d& a)
353 const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
354 return _mm256_and_pd(a,mask);
359 template<> EIGEN_STRONG_INLINE Packet8f preduxp<Packet8f>(
const Packet8f* vecs)
361 __m256 hsum1 = _mm256_hadd_ps(vecs[0], vecs[1]);
362 __m256 hsum2 = _mm256_hadd_ps(vecs[2], vecs[3]);
363 __m256 hsum3 = _mm256_hadd_ps(vecs[4], vecs[5]);
364 __m256 hsum4 = _mm256_hadd_ps(vecs[6], vecs[7]);
366 __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
367 __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
368 __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
369 __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
371 __m256 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
372 __m256 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
373 __m256 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
374 __m256 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
376 __m256 sum1 = _mm256_add_ps(perm1, hsum5);
377 __m256 sum2 = _mm256_add_ps(perm2, hsum6);
378 __m256 sum3 = _mm256_add_ps(perm3, hsum7);
379 __m256 sum4 = _mm256_add_ps(perm4, hsum8);
381 __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
382 __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
384 __m256
final = _mm256_blend_ps(blend1, blend2, 0xf0);
387 template<> EIGEN_STRONG_INLINE Packet4d preduxp<Packet4d>(
const Packet4d* vecs)
391 tmp0 = _mm256_hadd_pd(vecs[0], vecs[1]);
392 tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
394 tmp1 = _mm256_hadd_pd(vecs[2], vecs[3]);
395 tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
397 return _mm256_blend_pd(tmp0, tmp1, 0xC);
400 template<> EIGEN_STRONG_INLINE
float predux<Packet8f>(
const Packet8f& a)
402 return predux(Packet4f(_mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1))));
404 template<> EIGEN_STRONG_INLINE
double predux<Packet4d>(
const Packet4d& a)
406 return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a),_mm256_extractf128_pd(a,1))));
409 template<> EIGEN_STRONG_INLINE Packet4f predux_downto4<Packet8f>(
const Packet8f& a)
411 return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
414 template<> EIGEN_STRONG_INLINE
float predux_mul<Packet8f>(
const Packet8f& a)
417 tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1));
418 tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
419 return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
421 template<> EIGEN_STRONG_INLINE
double predux_mul<Packet4d>(
const Packet4d& a)
424 tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1));
425 return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1)));
428 template<> EIGEN_STRONG_INLINE
float predux_min<Packet8f>(
const Packet8f& a)
430 Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1));
431 tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
432 return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
434 template<> EIGEN_STRONG_INLINE
double predux_min<Packet4d>(
const Packet4d& a)
436 Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1));
437 return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
440 template<> EIGEN_STRONG_INLINE
float predux_max<Packet8f>(
const Packet8f& a)
442 Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1));
443 tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
444 return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
447 template<> EIGEN_STRONG_INLINE
double predux_max<Packet4d>(
const Packet4d& a)
449 Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1));
450 return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
455 struct palign_impl<Offset,Packet8f>
457 static EIGEN_STRONG_INLINE
void run(Packet8f& first,
const Packet8f& second)
461 first = _mm256_blend_ps(first, second, 1);
462 Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
463 Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
464 first = _mm256_blend_ps(tmp1, tmp2, 0x88);
468 first = _mm256_blend_ps(first, second, 3);
469 Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
470 Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
471 first = _mm256_blend_ps(tmp1, tmp2, 0xcc);
475 first = _mm256_blend_ps(first, second, 7);
476 Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
477 Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
478 first = _mm256_blend_ps(tmp1, tmp2, 0xee);
482 first = _mm256_blend_ps(first, second, 15);
483 Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(3,2,1,0));
484 Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
485 first = _mm256_permute_ps(tmp2, _MM_SHUFFLE(3,2,1,0));
489 first = _mm256_blend_ps(first, second, 31);
490 first = _mm256_permute2f128_ps(first, first, 1);
491 Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
492 first = _mm256_permute2f128_ps(tmp, tmp, 1);
493 first = _mm256_blend_ps(tmp, first, 0x88);
497 first = _mm256_blend_ps(first, second, 63);
498 first = _mm256_permute2f128_ps(first, first, 1);
499 Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
500 first = _mm256_permute2f128_ps(tmp, tmp, 1);
501 first = _mm256_blend_ps(tmp, first, 0xcc);
505 first = _mm256_blend_ps(first, second, 127);
506 first = _mm256_permute2f128_ps(first, first, 1);
507 Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
508 first = _mm256_permute2f128_ps(tmp, tmp, 1);
509 first = _mm256_blend_ps(tmp, first, 0xee);
515 struct palign_impl<Offset,Packet4d>
517 static EIGEN_STRONG_INLINE
void run(Packet4d& first,
const Packet4d& second)
521 first = _mm256_blend_pd(first, second, 1);
522 __m256d tmp = _mm256_permute_pd(first, 5);
523 first = _mm256_permute2f128_pd(tmp, tmp, 1);
524 first = _mm256_blend_pd(tmp, first, 0xA);
528 first = _mm256_blend_pd(first, second, 3);
529 first = _mm256_permute2f128_pd(first, first, 1);
533 first = _mm256_blend_pd(first, second, 7);
534 __m256d tmp = _mm256_permute_pd(first, 5);
535 first = _mm256_permute2f128_pd(tmp, tmp, 1);
536 first = _mm256_blend_pd(tmp, first, 5);
541 EIGEN_DEVICE_FUNC
inline void
542 ptranspose(PacketBlock<Packet8f,8>& kernel) {
543 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
544 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
545 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
546 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
547 __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
548 __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
549 __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
550 __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
551 __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
552 __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
553 __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
554 __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
555 __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0));
556 __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2));
557 __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0));
558 __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2));
559 kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
560 kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
561 kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
562 kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
563 kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
564 kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
565 kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
566 kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
569 EIGEN_DEVICE_FUNC
inline void
570 ptranspose(PacketBlock<Packet8f,4>& kernel) {
571 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
572 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
573 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
574 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
576 __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
577 __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
578 __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
579 __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
581 kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
582 kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
583 kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
584 kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
587 EIGEN_DEVICE_FUNC
inline void
588 ptranspose(PacketBlock<Packet4d,4>& kernel) {
589 __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
590 __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
591 __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
592 __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
594 kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
595 kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
596 kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
597 kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
600 template<> EIGEN_STRONG_INLINE Packet8f pblend(
const Selector<8>& ifPacket,
const Packet8f& thenPacket,
const Packet8f& elsePacket) {
601 const __m256 zero = _mm256_setzero_ps();
602 const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
603 __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ);
604 return _mm256_blendv_ps(thenPacket, elsePacket, false_mask);
606 template<> EIGEN_STRONG_INLINE Packet4d pblend(
const Selector<4>& ifPacket,
const Packet4d& thenPacket,
const Packet4d& elsePacket) {
607 const __m256d zero = _mm256_setzero_pd();
608 const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
609 __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ);
610 return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);
613 template<> EIGEN_STRONG_INLINE Packet8f pinsertfirst(
const Packet8f& a,
float b)
615 return _mm256_blend_ps(a,pset1<Packet8f>(b),1);
618 template<> EIGEN_STRONG_INLINE Packet4d pinsertfirst(
const Packet4d& a,
double b)
620 return _mm256_blend_pd(a,pset1<Packet4d>(b),1);
623 template<> EIGEN_STRONG_INLINE Packet8f pinsertlast(
const Packet8f& a,
float b)
625 return _mm256_blend_ps(a,pset1<Packet8f>(b),(1<<7));
628 template<> EIGEN_STRONG_INLINE Packet4d pinsertlast(
const Packet4d& a,
double b)
630 return _mm256_blend_pd(a,pset1<Packet4d>(b),(1<<3));
637 #endif // EIGEN_PACKET_MATH_AVX_H