10 #ifndef EIGEN_PACKET_MATH_AVX_H 11 #define EIGEN_PACKET_MATH_AVX_H 17 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 18 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 21 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 22 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*)) 26 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD 27 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 31 typedef __m256 Packet8f;
32 typedef __m256i Packet8i;
33 typedef __m256d Packet4d;
35 template<>
struct is_arithmetic<__m256> {
enum { value =
true }; };
36 template<>
struct is_arithmetic<__m256i> {
enum { value =
true }; };
37 template<>
struct is_arithmetic<__m256d> {
enum { value =
true }; };
39 #define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \ 40 const Packet8f p8f_##NAME = pset1<Packet8f>(X) 42 #define _EIGEN_DECLARE_CONST_Packet4d(NAME,X) \ 43 const Packet4d p4d_##NAME = pset1<Packet4d>(X) 45 #define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME,X) \ 46 const Packet8f p8f_##NAME = _mm256_castsi256_ps(pset1<Packet8i>(X)) 48 #define _EIGEN_DECLARE_CONST_Packet8i(NAME,X) \ 49 const Packet8i p8i_##NAME = pset1<Packet8i>(X) 52 template<>
struct packet_traits<float> : default_packet_traits
54 typedef Packet8f type;
55 typedef Packet4f half;
63 HasSin = EIGEN_FAST_MATH,
75 template<>
struct packet_traits<double> : default_packet_traits
77 typedef Packet4d type;
78 typedef Packet2d half;
109 template<>
struct unpacket_traits<Packet8f> {
typedef float type;
typedef Packet4f half;
enum {size=8, alignment=
Aligned32}; };
110 template<>
struct unpacket_traits<Packet4d> {
typedef double type;
typedef Packet2d half;
enum {size=4, alignment=
Aligned32}; };
111 template<>
struct unpacket_traits<Packet8i> {
typedef int type;
typedef Packet4i half;
enum {size=8, alignment=
Aligned32}; };
113 template<> EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(
const float& from) {
return _mm256_set1_ps(from); }
114 template<> EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(
const double& from) {
return _mm256_set1_pd(from); }
115 template<> EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(
const int& from) {
return _mm256_set1_epi32(from); }
117 template<> EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(
const float* from) {
return _mm256_broadcast_ss(from); }
118 template<> EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(
const double* from) {
return _mm256_broadcast_sd(from); }
120 template<> EIGEN_STRONG_INLINE Packet8f plset<Packet8f>(
const float& a) {
return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); }
121 template<> EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(
const double& a) {
return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); }
123 template<> EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_add_ps(a,b); }
124 template<> EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_add_pd(a,b); }
126 template<> EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_sub_ps(a,b); }
127 template<> EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_sub_pd(a,b); }
129 template<> EIGEN_STRONG_INLINE Packet8f pnegate(
const Packet8f& a)
131 return _mm256_sub_ps(_mm256_set1_ps(0.0),a);
133 template<> EIGEN_STRONG_INLINE Packet4d pnegate(
const Packet4d& a)
135 return _mm256_sub_pd(_mm256_set1_pd(0.0),a);
138 template<> EIGEN_STRONG_INLINE Packet8f pconj(
const Packet8f& a) {
return a; }
139 template<> EIGEN_STRONG_INLINE Packet4d pconj(
const Packet4d& a) {
return a; }
140 template<> EIGEN_STRONG_INLINE Packet8i pconj(
const Packet8i& a) {
return a; }
142 template<> EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_mul_ps(a,b); }
143 template<> EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_mul_pd(a,b); }
146 template<> EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_div_ps(a,b); }
147 template<> EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_div_pd(a,b); }
148 template<> EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(
const Packet8i& ,
const Packet8i& )
149 { eigen_assert(
false &&
"packet integer division are not supported by AVX");
150 return pset1<Packet8i>(0);
154 template<> EIGEN_STRONG_INLINE Packet8f pmadd(
const Packet8f& a,
const Packet8f& b,
const Packet8f& c) {
155 #if EIGEN_COMP_GNUC || EIGEN_COMP_CLANG 161 __asm__(
"vfmadd231ps %[a], %[b], %[c]" : [c]
"+x" (res) : [a]
"x" (a), [b]
"x" (b));
164 return _mm256_fmadd_ps(a,b,c);
167 template<> EIGEN_STRONG_INLINE Packet4d pmadd(
const Packet4d& a,
const Packet4d& b,
const Packet4d& c) {
168 #if EIGEN_COMP_GNUC || EIGEN_COMP_CLANG 171 __asm__(
"vfmadd231pd %[a], %[b], %[c]" : [c]
"+x" (res) : [a]
"x" (a), [b]
"x" (b));
174 return _mm256_fmadd_pd(a,b,c);
179 template<> EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_min_ps(a,b); }
180 template<> EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_min_pd(a,b); }
182 template<> EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_max_ps(a,b); }
183 template<> EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_max_pd(a,b); }
185 template<> EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(
const Packet8f& a) {
return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
186 template<> EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(
const Packet4d& a) {
return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
188 template<> EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(
const Packet8f& a) {
return _mm256_ceil_ps(a); }
189 template<> EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(
const Packet4d& a) {
return _mm256_ceil_pd(a); }
191 template<> EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(
const Packet8f& a) {
return _mm256_floor_ps(a); }
192 template<> EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(
const Packet4d& a) {
return _mm256_floor_pd(a); }
194 template<> EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_and_ps(a,b); }
195 template<> EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_and_pd(a,b); }
197 template<> EIGEN_STRONG_INLINE Packet8f por<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_or_ps(a,b); }
198 template<> EIGEN_STRONG_INLINE Packet4d por<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_or_pd(a,b); }
200 template<> EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_xor_ps(a,b); }
201 template<> EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_xor_pd(a,b); }
203 template<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(
const Packet8f& a,
const Packet8f& b) {
return _mm256_andnot_ps(a,b); }
204 template<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(
const Packet4d& a,
const Packet4d& b) {
return _mm256_andnot_pd(a,b); }
206 template<> EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(
const float* from) { EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_ps(from); }
207 template<> EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(
const double* from) { EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_pd(from); }
208 template<> EIGEN_STRONG_INLINE Packet8i pload<Packet8i>(
const int* from) { EIGEN_DEBUG_ALIGNED_LOAD
return _mm256_load_si256(reinterpret_cast<const __m256i*>(from)); }
210 template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(
const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_ps(from); }
211 template<> EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(
const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_pd(from); }
212 template<> EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(
const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD
return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from)); }
215 template<> EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(
const float* from)
223 Packet8f tmp = _mm256_broadcast_ps((
const __m128*)(
const void*)from);
225 tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15);
227 return _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2));
230 template<> EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(
const double* from)
232 Packet4d tmp = _mm256_broadcast_pd((
const __m128d*)(
const void*)from);
233 return _mm256_permute_pd(tmp, 3<<2);
237 template<> EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(
const float* from)
239 Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
240 return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);
243 template<> EIGEN_STRONG_INLINE
void pstore<float>(
float* to,
const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); }
244 template<> EIGEN_STRONG_INLINE
void pstore<double>(
double* to,
const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); }
245 template<> EIGEN_STRONG_INLINE
void pstore<int>(
int* to,
const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
247 template<> EIGEN_STRONG_INLINE
void pstoreu<float>(
float* to,
const Packet8f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); }
248 template<> EIGEN_STRONG_INLINE
void pstoreu<double>(
double* to,
const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); }
249 template<> EIGEN_STRONG_INLINE
void pstoreu<int>(
int* to,
const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
253 template<> EIGEN_DEVICE_FUNC
inline Packet8f pgather<float, Packet8f>(
const float* from, Index stride)
255 return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
256 from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
258 template<> EIGEN_DEVICE_FUNC
inline Packet4d pgather<double, Packet4d>(
const double* from, Index stride)
260 return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
263 template<> EIGEN_DEVICE_FUNC
inline void pscatter<float, Packet8f>(
float* to,
const Packet8f& from, Index stride)
265 __m128 low = _mm256_extractf128_ps(from, 0);
266 to[stride*0] = _mm_cvtss_f32(low);
267 to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
268 to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
269 to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
271 __m128 high = _mm256_extractf128_ps(from, 1);
272 to[stride*4] = _mm_cvtss_f32(high);
273 to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
274 to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
275 to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
277 template<> EIGEN_DEVICE_FUNC
inline void pscatter<double, Packet4d>(
double* to,
const Packet4d& from, Index stride)
279 __m128d low = _mm256_extractf128_pd(from, 0);
280 to[stride*0] = _mm_cvtsd_f64(low);
281 to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
282 __m128d high = _mm256_extractf128_pd(from, 1);
283 to[stride*2] = _mm_cvtsd_f64(high);
284 to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
287 template<> EIGEN_STRONG_INLINE
void pstore1<Packet8f>(
float* to,
const float& a)
289 Packet8f pa = pset1<Packet8f>(a);
292 template<> EIGEN_STRONG_INLINE
void pstore1<Packet4d>(
double* to,
const double& a)
294 Packet4d pa = pset1<Packet4d>(a);
297 template<> EIGEN_STRONG_INLINE
void pstore1<Packet8i>(
int* to,
const int& a)
299 Packet8i pa = pset1<Packet8i>(a);
303 template<> EIGEN_STRONG_INLINE
void prefetch<float>(
const float* addr) { _mm_prefetch((
const char*)(addr), _MM_HINT_T0); }
304 template<> EIGEN_STRONG_INLINE
void prefetch<double>(
const double* addr) { _mm_prefetch((
const char*)(addr), _MM_HINT_T0); }
305 template<> EIGEN_STRONG_INLINE
void prefetch<int>(
const int* addr) { _mm_prefetch((
const char*)(addr), _MM_HINT_T0); }
307 template<> EIGEN_STRONG_INLINE
float pfirst<Packet8f>(
const Packet8f& a) {
308 return _mm_cvtss_f32(_mm256_castps256_ps128(a));
310 template<> EIGEN_STRONG_INLINE
double pfirst<Packet4d>(
const Packet4d& a) {
311 return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
313 template<> EIGEN_STRONG_INLINE
int pfirst<Packet8i>(
const Packet8i& a) {
314 return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
318 template<> EIGEN_STRONG_INLINE Packet8f preverse(
const Packet8f& a)
320 __m256 tmp = _mm256_shuffle_ps(a,a,0x1b);
321 return _mm256_permute2f128_ps(tmp, tmp, 1);
323 template<> EIGEN_STRONG_INLINE Packet4d preverse(
const Packet4d& a)
325 __m256d tmp = _mm256_shuffle_pd(a,a,5);
326 return _mm256_permute2f128_pd(tmp, tmp, 1);
328 __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
329 return _mm256_permute_pd(swap_halves,5);
333 template<> EIGEN_STRONG_INLINE Packet8f pabs(
const Packet8f& a)
335 const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
336 return _mm256_and_ps(a,mask);
338 template<> EIGEN_STRONG_INLINE Packet4d pabs(
const Packet4d& a)
340 const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
341 return _mm256_and_pd(a,mask);
346 template<> EIGEN_STRONG_INLINE Packet8f preduxp<Packet8f>(
const Packet8f* vecs)
348 __m256 hsum1 = _mm256_hadd_ps(vecs[0], vecs[1]);
349 __m256 hsum2 = _mm256_hadd_ps(vecs[2], vecs[3]);
350 __m256 hsum3 = _mm256_hadd_ps(vecs[4], vecs[5]);
351 __m256 hsum4 = _mm256_hadd_ps(vecs[6], vecs[7]);
353 __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
354 __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
355 __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
356 __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
358 __m256 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
359 __m256 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
360 __m256 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
361 __m256 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
363 __m256 sum1 = _mm256_add_ps(perm1, hsum5);
364 __m256 sum2 = _mm256_add_ps(perm2, hsum6);
365 __m256 sum3 = _mm256_add_ps(perm3, hsum7);
366 __m256 sum4 = _mm256_add_ps(perm4, hsum8);
368 __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
369 __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
371 __m256
final = _mm256_blend_ps(blend1, blend2, 0xf0);
374 template<> EIGEN_STRONG_INLINE Packet4d preduxp<Packet4d>(
const Packet4d* vecs)
378 tmp0 = _mm256_hadd_pd(vecs[0], vecs[1]);
379 tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
381 tmp1 = _mm256_hadd_pd(vecs[2], vecs[3]);
382 tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
384 return _mm256_blend_pd(tmp0, tmp1, 0xC);
387 template<> EIGEN_STRONG_INLINE
float predux<Packet8f>(
const Packet8f& a)
389 Packet8f tmp0 = _mm256_hadd_ps(a,_mm256_permute2f128_ps(a,a,1));
390 tmp0 = _mm256_hadd_ps(tmp0,tmp0);
391 return pfirst(_mm256_hadd_ps(tmp0, tmp0));
393 template<> EIGEN_STRONG_INLINE
double predux<Packet4d>(
const Packet4d& a)
395 Packet4d tmp0 = _mm256_hadd_pd(a,_mm256_permute2f128_pd(a,a,1));
396 return pfirst(_mm256_hadd_pd(tmp0,tmp0));
399 template<> EIGEN_STRONG_INLINE Packet4f predux4<Packet8f>(
const Packet8f& a)
401 return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
404 template<> EIGEN_STRONG_INLINE
float predux_mul<Packet8f>(
const Packet8f& a)
407 tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1));
408 tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
409 return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
411 template<> EIGEN_STRONG_INLINE
double predux_mul<Packet4d>(
const Packet4d& a)
414 tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1));
415 return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1)));
418 template<> EIGEN_STRONG_INLINE
float predux_min<Packet8f>(
const Packet8f& a)
420 Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1));
421 tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
422 return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
424 template<> EIGEN_STRONG_INLINE
double predux_min<Packet4d>(
const Packet4d& a)
426 Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1));
427 return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
430 template<> EIGEN_STRONG_INLINE
float predux_max<Packet8f>(
const Packet8f& a)
432 Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1));
433 tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
434 return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
437 template<> EIGEN_STRONG_INLINE
double predux_max<Packet4d>(
const Packet4d& a)
439 Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1));
440 return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
445 struct palign_impl<Offset,Packet8f>
447 static EIGEN_STRONG_INLINE
void run(Packet8f& first,
const Packet8f& second)
451 first = _mm256_blend_ps(first, second, 1);
452 Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
453 Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
454 first = _mm256_blend_ps(tmp1, tmp2, 0x88);
458 first = _mm256_blend_ps(first, second, 3);
459 Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
460 Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
461 first = _mm256_blend_ps(tmp1, tmp2, 0xcc);
465 first = _mm256_blend_ps(first, second, 7);
466 Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
467 Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
468 first = _mm256_blend_ps(tmp1, tmp2, 0xee);
472 first = _mm256_blend_ps(first, second, 15);
473 Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(3,2,1,0));
474 Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
475 first = _mm256_permute_ps(tmp2, _MM_SHUFFLE(3,2,1,0));
479 first = _mm256_blend_ps(first, second, 31);
480 first = _mm256_permute2f128_ps(first, first, 1);
481 Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
482 first = _mm256_permute2f128_ps(tmp, tmp, 1);
483 first = _mm256_blend_ps(tmp, first, 0x88);
487 first = _mm256_blend_ps(first, second, 63);
488 first = _mm256_permute2f128_ps(first, first, 1);
489 Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
490 first = _mm256_permute2f128_ps(tmp, tmp, 1);
491 first = _mm256_blend_ps(tmp, first, 0xcc);
495 first = _mm256_blend_ps(first, second, 127);
496 first = _mm256_permute2f128_ps(first, first, 1);
497 Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
498 first = _mm256_permute2f128_ps(tmp, tmp, 1);
499 first = _mm256_blend_ps(tmp, first, 0xee);
505 struct palign_impl<Offset,Packet4d>
507 static EIGEN_STRONG_INLINE
void run(Packet4d& first,
const Packet4d& second)
511 first = _mm256_blend_pd(first, second, 1);
512 __m256d tmp = _mm256_permute_pd(first, 5);
513 first = _mm256_permute2f128_pd(tmp, tmp, 1);
514 first = _mm256_blend_pd(tmp, first, 0xA);
518 first = _mm256_blend_pd(first, second, 3);
519 first = _mm256_permute2f128_pd(first, first, 1);
523 first = _mm256_blend_pd(first, second, 7);
524 __m256d tmp = _mm256_permute_pd(first, 5);
525 first = _mm256_permute2f128_pd(tmp, tmp, 1);
526 first = _mm256_blend_pd(tmp, first, 5);
531 EIGEN_DEVICE_FUNC
inline void 532 ptranspose(PacketBlock<Packet8f,8>& kernel) {
533 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
534 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
535 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
536 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
537 __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
538 __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
539 __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
540 __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
541 __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
542 __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
543 __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
544 __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
545 __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0));
546 __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2));
547 __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0));
548 __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2));
549 kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
550 kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
551 kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
552 kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
553 kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
554 kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
555 kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
556 kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
559 EIGEN_DEVICE_FUNC
inline void 560 ptranspose(PacketBlock<Packet8f,4>& kernel) {
561 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
562 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
563 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
564 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
566 __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
567 __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
568 __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
569 __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
571 kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
572 kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
573 kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
574 kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
577 EIGEN_DEVICE_FUNC
inline void 578 ptranspose(PacketBlock<Packet4d,4>& kernel) {
579 __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
580 __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
581 __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
582 __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
584 kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
585 kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
586 kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
587 kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
590 template<> EIGEN_STRONG_INLINE Packet8f pblend(
const Selector<8>& ifPacket,
const Packet8f& thenPacket,
const Packet8f& elsePacket) {
591 const __m256 zero = _mm256_setzero_ps();
592 const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
593 __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ);
594 return _mm256_blendv_ps(thenPacket, elsePacket, false_mask);
596 template<> EIGEN_STRONG_INLINE Packet4d pblend(
const Selector<4>& ifPacket,
const Packet4d& thenPacket,
const Packet4d& elsePacket) {
597 const __m256d zero = _mm256_setzero_pd();
598 const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
599 __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ);
600 return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);
607 #endif // EIGEN_PACKET_MATH_AVX_H
Definition: Eigen_Colamd.h:54
Definition: Constants.h:231