Eigen  3.3.0
 
Loading...
Searching...
No Matches
AVX/PacketMath.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com)
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_PACKET_MATH_AVX_H
11#define EIGEN_PACKET_MATH_AVX_H
12
13namespace Eigen {
14
15namespace internal {
16
17#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
19#endif
20
21#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
22#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
23#endif
24
25#ifdef __FMA__
26#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
28#endif
29#endif
30
31typedef __m256 Packet8f;
32typedef __m256i Packet8i;
33typedef __m256d Packet4d;
34
35template<> struct is_arithmetic<__m256> { enum { value = true }; };
36template<> struct is_arithmetic<__m256i> { enum { value = true }; };
37template<> struct is_arithmetic<__m256d> { enum { value = true }; };
38
39#define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \
40 const Packet8f p8f_##NAME = pset1<Packet8f>(X)
41
42#define _EIGEN_DECLARE_CONST_Packet4d(NAME,X) \
43 const Packet4d p4d_##NAME = pset1<Packet4d>(X)
44
45#define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME,X) \
46 const Packet8f p8f_##NAME = _mm256_castsi256_ps(pset1<Packet8i>(X))
47
48#define _EIGEN_DECLARE_CONST_Packet8i(NAME,X) \
49 const Packet8i p8i_##NAME = pset1<Packet8i>(X)
50
51// Use the packet_traits defined in AVX512/PacketMath.h instead if we're going
52// to leverage AVX512 instructions.
53#ifndef EIGEN_VECTORIZE_AVX512
54template<> struct packet_traits<float> : default_packet_traits
55{
56 typedef Packet8f type;
57 typedef Packet4f half;
58 enum {
59 Vectorizable = 1,
60 AlignedOnScalar = 1,
61 size=8,
62 HasHalfPacket = 1,
63
64 HasDiv = 1,
65 HasSin = EIGEN_FAST_MATH,
66 HasCos = 0,
67 HasLog = 1,
68 HasExp = 1,
69 HasSqrt = 1,
70 HasRsqrt = 1,
71 HasTanh = EIGEN_FAST_MATH,
72 HasBlend = 1,
73 HasRound = 1,
74 HasFloor = 1,
75 HasCeil = 1
76 };
77};
78template<> struct packet_traits<double> : default_packet_traits
79{
80 typedef Packet4d type;
81 typedef Packet2d half;
82 enum {
83 Vectorizable = 1,
84 AlignedOnScalar = 1,
85 size=4,
86 HasHalfPacket = 1,
87
88 HasDiv = 1,
89 HasExp = 1,
90 HasSqrt = 1,
91 HasRsqrt = 1,
92 HasBlend = 1,
93 HasRound = 1,
94 HasFloor = 1,
95 HasCeil = 1
96 };
97};
98#endif
99
100template<> struct scalar_div_cost<float,true> { enum { value = 14 }; };
101template<> struct scalar_div_cost<double,true> { enum { value = 16 }; };
102
103/* Proper support for integers is only provided by AVX2. In the meantime, we'll
104 use SSE instructions and packets to deal with integers.
105template<> struct packet_traits<int> : default_packet_traits
106{
107 typedef Packet8i type;
108 enum {
109 Vectorizable = 1,
110 AlignedOnScalar = 1,
111 size=8
112 };
113};
114*/
115
116template<> struct unpacket_traits<Packet8f> { typedef float type; typedef Packet4f half; enum {size=8, alignment=Aligned32}; };
117template<> struct unpacket_traits<Packet4d> { typedef double type; typedef Packet2d half; enum {size=4, alignment=Aligned32}; };
118template<> struct unpacket_traits<Packet8i> { typedef int type; typedef Packet4i half; enum {size=8, alignment=Aligned32}; };
119
120template<> EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(const float& from) { return _mm256_set1_ps(from); }
121template<> EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(const double& from) { return _mm256_set1_pd(from); }
122template<> EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(const int& from) { return _mm256_set1_epi32(from); }
123
124template<> EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(const float* from) { return _mm256_broadcast_ss(from); }
125template<> EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(const double* from) { return _mm256_broadcast_sd(from); }
126
127template<> EIGEN_STRONG_INLINE Packet8f plset<Packet8f>(const float& a) { return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); }
128template<> EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(const double& a) { return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); }
129
130template<> EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_add_ps(a,b); }
131template<> EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_add_pd(a,b); }
132
133template<> EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_sub_ps(a,b); }
134template<> EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_sub_pd(a,b); }
135
136template<> EIGEN_STRONG_INLINE Packet8f pnegate(const Packet8f& a)
137{
138 return _mm256_sub_ps(_mm256_set1_ps(0.0),a);
139}
140template<> EIGEN_STRONG_INLINE Packet4d pnegate(const Packet4d& a)
141{
142 return _mm256_sub_pd(_mm256_set1_pd(0.0),a);
143}
144
145template<> EIGEN_STRONG_INLINE Packet8f pconj(const Packet8f& a) { return a; }
146template<> EIGEN_STRONG_INLINE Packet4d pconj(const Packet4d& a) { return a; }
147template<> EIGEN_STRONG_INLINE Packet8i pconj(const Packet8i& a) { return a; }
148
149template<> EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_mul_ps(a,b); }
150template<> EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_mul_pd(a,b); }
151
152
153template<> EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_div_ps(a,b); }
154template<> EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_div_pd(a,b); }
155template<> EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(const Packet8i& /*a*/, const Packet8i& /*b*/)
156{ eigen_assert(false && "packet integer division are not supported by AVX");
157 return pset1<Packet8i>(0);
158}
159
160#ifdef __FMA__
161template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
162#if ( EIGEN_COMP_GNUC_STRICT || (EIGEN_COMP_CLANG && (EIGEN_COMP_CLANG<308)) )
163 // clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers,
164 // and gcc stupidly generates a vfmadd132ps instruction,
165 // so let's enforce it to generate a vfmadd231ps instruction since the most common use case is to accumulate
166 // the result of the product.
167 Packet8f res = c;
168 __asm__("vfmadd231ps %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
169 return res;
170#else
171 return _mm256_fmadd_ps(a,b,c);
172#endif
173}
174template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
175#if ( EIGEN_COMP_GNUC_STRICT || (EIGEN_COMP_CLANG && (EIGEN_COMP_CLANG<308)) )
176 // see above
177 Packet4d res = c;
178 __asm__("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
179 return res;
180#else
181 return _mm256_fmadd_pd(a,b,c);
182#endif
183}
184#endif
185
186template<> EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_min_ps(a,b); }
187template<> EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_min_pd(a,b); }
188
189template<> EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_max_ps(a,b); }
190template<> EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_max_pd(a,b); }
191
192template<> EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(const Packet8f& a) { return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
193template<> EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(const Packet4d& a) { return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
194
195template<> EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(const Packet8f& a) { return _mm256_ceil_ps(a); }
196template<> EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(const Packet4d& a) { return _mm256_ceil_pd(a); }
197
198template<> EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(const Packet8f& a) { return _mm256_floor_ps(a); }
199template<> EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(const Packet4d& a) { return _mm256_floor_pd(a); }
200
201template<> EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_and_ps(a,b); }
202template<> EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_and_pd(a,b); }
203
204template<> EIGEN_STRONG_INLINE Packet8f por<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_or_ps(a,b); }
205template<> EIGEN_STRONG_INLINE Packet4d por<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_or_pd(a,b); }
206
207template<> EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_xor_ps(a,b); }
208template<> EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_xor_pd(a,b); }
209
210template<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(a,b); }
211template<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(a,b); }
212
213template<> EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); }
214template<> EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); }
215template<> EIGEN_STRONG_INLINE Packet8i pload<Packet8i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from)); }
216
217template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from); }
218template<> EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from); }
219template<> EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from)); }
220
221// Loads 4 floats from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3, a3}
222template<> EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(const float* from)
223{
224 // TODO try to find a way to avoid the need of a temporary register
225// Packet8f tmp = _mm256_castps128_ps256(_mm_loadu_ps(from));
226// tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1);
227// return _mm256_unpacklo_ps(tmp,tmp);
228
229 // _mm256_insertf128_ps is very slow on Haswell, thus:
230 Packet8f tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
231 // mimic an "inplace" permutation of the lower 128bits using a blend
232 tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15);
233 // then we can perform a consistent permutation on the global register to get everything in shape:
234 return _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2));
235}
236// Loads 2 doubles from memory a returns the packet {a0, a0 a1, a1}
237template<> EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(const double* from)
238{
239 Packet4d tmp = _mm256_broadcast_pd((const __m128d*)(const void*)from);
240 return _mm256_permute_pd(tmp, 3<<2);
241}
242
243// Loads 2 floats from memory a returns the packet {a0, a0 a0, a0, a1, a1, a1, a1}
244template<> EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(const float* from)
245{
246 Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
247 return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);
248}
249
250template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); }
251template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); }
252template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
253
254template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet8f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); }
255template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); }
256template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
257
258// NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available
259// NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4);
260template<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, Index stride)
261{
262 return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
263 from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
264}
265template<> EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, Index stride)
266{
267 return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
268}
269
270template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, Index stride)
271{
272 __m128 low = _mm256_extractf128_ps(from, 0);
273 to[stride*0] = _mm_cvtss_f32(low);
274 to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
275 to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
276 to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
277
278 __m128 high = _mm256_extractf128_ps(from, 1);
279 to[stride*4] = _mm_cvtss_f32(high);
280 to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
281 to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
282 to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
283}
284template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, Index stride)
285{
286 __m128d low = _mm256_extractf128_pd(from, 0);
287 to[stride*0] = _mm_cvtsd_f64(low);
288 to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
289 __m128d high = _mm256_extractf128_pd(from, 1);
290 to[stride*2] = _mm_cvtsd_f64(high);
291 to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
292}
293
294template<> EIGEN_STRONG_INLINE void pstore1<Packet8f>(float* to, const float& a)
295{
296 Packet8f pa = pset1<Packet8f>(a);
297 pstore(to, pa);
298}
299template<> EIGEN_STRONG_INLINE void pstore1<Packet4d>(double* to, const double& a)
300{
301 Packet4d pa = pset1<Packet4d>(a);
302 pstore(to, pa);
303}
304template<> EIGEN_STRONG_INLINE void pstore1<Packet8i>(int* to, const int& a)
305{
306 Packet8i pa = pset1<Packet8i>(a);
307 pstore(to, pa);
308}
309
310#ifndef EIGEN_VECTORIZE_AVX512
311template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
312template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
313template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
314#endif
315
316template<> EIGEN_STRONG_INLINE float pfirst<Packet8f>(const Packet8f& a) {
317 return _mm_cvtss_f32(_mm256_castps256_ps128(a));
318}
319template<> EIGEN_STRONG_INLINE double pfirst<Packet4d>(const Packet4d& a) {
320 return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
321}
322template<> EIGEN_STRONG_INLINE int pfirst<Packet8i>(const Packet8i& a) {
323 return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
324}
325
326
327template<> EIGEN_STRONG_INLINE Packet8f preverse(const Packet8f& a)
328{
329 __m256 tmp = _mm256_shuffle_ps(a,a,0x1b);
330 return _mm256_permute2f128_ps(tmp, tmp, 1);
331}
332template<> EIGEN_STRONG_INLINE Packet4d preverse(const Packet4d& a)
333{
334 __m256d tmp = _mm256_shuffle_pd(a,a,5);
335 return _mm256_permute2f128_pd(tmp, tmp, 1);
336
337 __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
338 return _mm256_permute_pd(swap_halves,5);
339}
340
341// pabs should be ok
342template<> EIGEN_STRONG_INLINE Packet8f pabs(const Packet8f& a)
343{
344 const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
345 return _mm256_and_ps(a,mask);
346}
347template<> EIGEN_STRONG_INLINE Packet4d pabs(const Packet4d& a)
348{
349 const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
350 return _mm256_and_pd(a,mask);
351}
352
353// preduxp should be ok
354// FIXME: why is this ok? why isn't the simply implementation working as expected?
355template<> EIGEN_STRONG_INLINE Packet8f preduxp<Packet8f>(const Packet8f* vecs)
356{
357 __m256 hsum1 = _mm256_hadd_ps(vecs[0], vecs[1]);
358 __m256 hsum2 = _mm256_hadd_ps(vecs[2], vecs[3]);
359 __m256 hsum3 = _mm256_hadd_ps(vecs[4], vecs[5]);
360 __m256 hsum4 = _mm256_hadd_ps(vecs[6], vecs[7]);
361
362 __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
363 __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
364 __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
365 __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
366
367 __m256 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
368 __m256 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
369 __m256 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
370 __m256 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
371
372 __m256 sum1 = _mm256_add_ps(perm1, hsum5);
373 __m256 sum2 = _mm256_add_ps(perm2, hsum6);
374 __m256 sum3 = _mm256_add_ps(perm3, hsum7);
375 __m256 sum4 = _mm256_add_ps(perm4, hsum8);
376
377 __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
378 __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
379
380 __m256 final = _mm256_blend_ps(blend1, blend2, 0xf0);
381 return final;
382}
383template<> EIGEN_STRONG_INLINE Packet4d preduxp<Packet4d>(const Packet4d* vecs)
384{
385 Packet4d tmp0, tmp1;
386
387 tmp0 = _mm256_hadd_pd(vecs[0], vecs[1]);
388 tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
389
390 tmp1 = _mm256_hadd_pd(vecs[2], vecs[3]);
391 tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
392
393 return _mm256_blend_pd(tmp0, tmp1, 0xC);
394}
395
396template<> EIGEN_STRONG_INLINE float predux<Packet8f>(const Packet8f& a)
397{
398 Packet8f tmp0 = _mm256_hadd_ps(a,_mm256_permute2f128_ps(a,a,1));
399 tmp0 = _mm256_hadd_ps(tmp0,tmp0);
400 return pfirst(_mm256_hadd_ps(tmp0, tmp0));
401}
402template<> EIGEN_STRONG_INLINE double predux<Packet4d>(const Packet4d& a)
403{
404 Packet4d tmp0 = _mm256_hadd_pd(a,_mm256_permute2f128_pd(a,a,1));
405 return pfirst(_mm256_hadd_pd(tmp0,tmp0));
406}
407
408template<> EIGEN_STRONG_INLINE Packet4f predux_downto4<Packet8f>(const Packet8f& a)
409{
410 return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
411}
412
413template<> EIGEN_STRONG_INLINE float predux_mul<Packet8f>(const Packet8f& a)
414{
415 Packet8f tmp;
416 tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1));
417 tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
418 return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
419}
420template<> EIGEN_STRONG_INLINE double predux_mul<Packet4d>(const Packet4d& a)
421{
422 Packet4d tmp;
423 tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1));
424 return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1)));
425}
426
427template<> EIGEN_STRONG_INLINE float predux_min<Packet8f>(const Packet8f& a)
428{
429 Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1));
430 tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
431 return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
432}
433template<> EIGEN_STRONG_INLINE double predux_min<Packet4d>(const Packet4d& a)
434{
435 Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1));
436 return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
437}
438
439template<> EIGEN_STRONG_INLINE float predux_max<Packet8f>(const Packet8f& a)
440{
441 Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1));
442 tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
443 return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
444}
445
446template<> EIGEN_STRONG_INLINE double predux_max<Packet4d>(const Packet4d& a)
447{
448 Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1));
449 return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
450}
451
452
453template<int Offset>
454struct palign_impl<Offset,Packet8f>
455{
456 static EIGEN_STRONG_INLINE void run(Packet8f& first, const Packet8f& second)
457 {
458 if (Offset==1)
459 {
460 first = _mm256_blend_ps(first, second, 1);
461 Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
462 Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
463 first = _mm256_blend_ps(tmp1, tmp2, 0x88);
464 }
465 else if (Offset==2)
466 {
467 first = _mm256_blend_ps(first, second, 3);
468 Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
469 Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
470 first = _mm256_blend_ps(tmp1, tmp2, 0xcc);
471 }
472 else if (Offset==3)
473 {
474 first = _mm256_blend_ps(first, second, 7);
475 Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
476 Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
477 first = _mm256_blend_ps(tmp1, tmp2, 0xee);
478 }
479 else if (Offset==4)
480 {
481 first = _mm256_blend_ps(first, second, 15);
482 Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(3,2,1,0));
483 Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
484 first = _mm256_permute_ps(tmp2, _MM_SHUFFLE(3,2,1,0));
485 }
486 else if (Offset==5)
487 {
488 first = _mm256_blend_ps(first, second, 31);
489 first = _mm256_permute2f128_ps(first, first, 1);
490 Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
491 first = _mm256_permute2f128_ps(tmp, tmp, 1);
492 first = _mm256_blend_ps(tmp, first, 0x88);
493 }
494 else if (Offset==6)
495 {
496 first = _mm256_blend_ps(first, second, 63);
497 first = _mm256_permute2f128_ps(first, first, 1);
498 Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
499 first = _mm256_permute2f128_ps(tmp, tmp, 1);
500 first = _mm256_blend_ps(tmp, first, 0xcc);
501 }
502 else if (Offset==7)
503 {
504 first = _mm256_blend_ps(first, second, 127);
505 first = _mm256_permute2f128_ps(first, first, 1);
506 Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
507 first = _mm256_permute2f128_ps(tmp, tmp, 1);
508 first = _mm256_blend_ps(tmp, first, 0xee);
509 }
510 }
511};
512
513template<int Offset>
514struct palign_impl<Offset,Packet4d>
515{
516 static EIGEN_STRONG_INLINE void run(Packet4d& first, const Packet4d& second)
517 {
518 if (Offset==1)
519 {
520 first = _mm256_blend_pd(first, second, 1);
521 __m256d tmp = _mm256_permute_pd(first, 5);
522 first = _mm256_permute2f128_pd(tmp, tmp, 1);
523 first = _mm256_blend_pd(tmp, first, 0xA);
524 }
525 else if (Offset==2)
526 {
527 first = _mm256_blend_pd(first, second, 3);
528 first = _mm256_permute2f128_pd(first, first, 1);
529 }
530 else if (Offset==3)
531 {
532 first = _mm256_blend_pd(first, second, 7);
533 __m256d tmp = _mm256_permute_pd(first, 5);
534 first = _mm256_permute2f128_pd(tmp, tmp, 1);
535 first = _mm256_blend_pd(tmp, first, 5);
536 }
537 }
538};
539
540EIGEN_DEVICE_FUNC inline void
541ptranspose(PacketBlock<Packet8f,8>& kernel) {
542 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
543 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
544 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
545 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
546 __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
547 __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
548 __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
549 __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
550 __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
551 __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
552 __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
553 __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
554 __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0));
555 __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2));
556 __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0));
557 __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2));
558 kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
559 kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
560 kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
561 kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
562 kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
563 kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
564 kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
565 kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
566}
567
568EIGEN_DEVICE_FUNC inline void
569ptranspose(PacketBlock<Packet8f,4>& kernel) {
570 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
571 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
572 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
573 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
574
575 __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
576 __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
577 __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
578 __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
579
580 kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
581 kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
582 kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
583 kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
584}
585
586EIGEN_DEVICE_FUNC inline void
587ptranspose(PacketBlock<Packet4d,4>& kernel) {
588 __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
589 __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
590 __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
591 __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
592
593 kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
594 kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
595 kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
596 kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
597}
598
599template<> EIGEN_STRONG_INLINE Packet8f pblend(const Selector<8>& ifPacket, const Packet8f& thenPacket, const Packet8f& elsePacket) {
600 const __m256 zero = _mm256_setzero_ps();
601 const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
602 __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ);
603 return _mm256_blendv_ps(thenPacket, elsePacket, false_mask);
604}
605template<> EIGEN_STRONG_INLINE Packet4d pblend(const Selector<4>& ifPacket, const Packet4d& thenPacket, const Packet4d& elsePacket) {
606 const __m256d zero = _mm256_setzero_pd();
607 const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
608 __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ);
609 return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);
610}
611
612template<> EIGEN_STRONG_INLINE Packet8f pinsertfirst(const Packet8f& a, float b)
613{
614 return _mm256_blend_ps(a,pset1<Packet8f>(b),1);
615}
616
617template<> EIGEN_STRONG_INLINE Packet4d pinsertfirst(const Packet4d& a, double b)
618{
619 return _mm256_blend_pd(a,pset1<Packet4d>(b),1);
620}
621
622template<> EIGEN_STRONG_INLINE Packet8f pinsertlast(const Packet8f& a, float b)
623{
624 return _mm256_blend_ps(a,pset1<Packet8f>(b),(1<<7));
625}
626
627template<> EIGEN_STRONG_INLINE Packet4d pinsertlast(const Packet4d& a, double b)
628{
629 return _mm256_blend_pd(a,pset1<Packet4d>(b),(1<<3));
630}
631
632} // end namespace internal
633
634} // end namespace Eigen
635
636#endif // EIGEN_PACKET_MATH_AVX_H
@ Aligned32
Definition: Constants.h:231
Namespace containing all symbols from the Eigen library.
Definition: Core:287
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33