TensorFFT.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2015 Jianwei Cui <thucjw@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_FFT_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_FFT_H
12 
13 // NVCC fails to compile this code
14 #if !defined(__CUDACC__)
15 
16 namespace Eigen {
17 
29 template <bool NeedUprade> struct MakeComplex {
30  template <typename T>
31  EIGEN_DEVICE_FUNC
32  T operator() (const T& val) const { return val; }
33 };
34 
35 template <> struct MakeComplex<true> {
36  template <typename T>
37  EIGEN_DEVICE_FUNC
38  std::complex<T> operator() (const T& val) const { return std::complex<T>(val, 0); }
39 };
40 
41 template <> struct MakeComplex<false> {
42  template <typename T>
43  EIGEN_DEVICE_FUNC
44  std::complex<T> operator() (const std::complex<T>& val) const { return val; }
45 };
46 
47 template <int ResultType> struct PartOf {
48  template <typename T> T operator() (const T& val) const { return val; }
49 };
50 
51 template <> struct PartOf<RealPart> {
52  template <typename T> T operator() (const std::complex<T>& val) const { return val.real(); }
53 };
54 
55 template <> struct PartOf<ImagPart> {
56  template <typename T> T operator() (const std::complex<T>& val) const { return val.imag(); }
57 };
58 
59 namespace internal {
60 template <typename FFT, typename XprType, int FFTResultType, int FFTDir>
61 struct traits<TensorFFTOp<FFT, XprType, FFTResultType, FFTDir> > : public traits<XprType> {
62  typedef traits<XprType> XprTraits;
63  typedef typename NumTraits<typename XprTraits::Scalar>::Real RealScalar;
64  typedef typename std::complex<RealScalar> ComplexScalar;
65  typedef typename XprTraits::Scalar InputScalar;
66  typedef typename conditional<FFTResultType == RealPart || FFTResultType == ImagPart, RealScalar, ComplexScalar>::type OutputScalar;
67  typedef typename XprTraits::StorageKind StorageKind;
68  typedef typename XprTraits::Index Index;
69  typedef typename XprType::Nested Nested;
70  typedef typename remove_reference<Nested>::type _Nested;
71  static const int NumDimensions = XprTraits::NumDimensions;
72  static const int Layout = XprTraits::Layout;
73 };
74 
75 template <typename FFT, typename XprType, int FFTResultType, int FFTDirection>
76 struct eval<TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection>, Eigen::Dense> {
77  typedef const TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection>& type;
78 };
79 
80 template <typename FFT, typename XprType, int FFTResultType, int FFTDirection>
81 struct nested<TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection>, 1, typename eval<TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection> >::type> {
82  typedef TensorFFTOp<FFT, XprType, FFTResultType, FFTDirection> type;
83 };
84 
85 } // end namespace internal
86 
87 template <typename FFT, typename XprType, int FFTResultType, int FFTDir>
88 class TensorFFTOp : public TensorBase<TensorFFTOp<FFT, XprType, FFTResultType, FFTDir>, ReadOnlyAccessors> {
89  public:
90  typedef typename Eigen::internal::traits<TensorFFTOp>::Scalar Scalar;
91  typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
92  typedef typename std::complex<RealScalar> ComplexScalar;
93  typedef typename internal::conditional<FFTResultType == RealPart || FFTResultType == ImagPart, RealScalar, ComplexScalar>::type OutputScalar;
94  typedef OutputScalar CoeffReturnType;
95  typedef typename Eigen::internal::nested<TensorFFTOp>::type Nested;
96  typedef typename Eigen::internal::traits<TensorFFTOp>::StorageKind StorageKind;
97  typedef typename Eigen::internal::traits<TensorFFTOp>::Index Index;
98 
99  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFFTOp(const XprType& expr, const FFT& fft)
100  : m_xpr(expr), m_fft(fft) {}
101 
102  EIGEN_DEVICE_FUNC
103  const FFT& fft() const { return m_fft; }
104 
105  EIGEN_DEVICE_FUNC
106  const typename internal::remove_all<typename XprType::Nested>::type& expression() const {
107  return m_xpr;
108  }
109 
110  protected:
111  typename XprType::Nested m_xpr;
112  const FFT m_fft;
113 };
114 
115 // Eval as rvalue
116 template <typename FFT, typename ArgType, typename Device, int FFTResultType, int FFTDir>
117 struct TensorEvaluator<const TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir>, Device> {
118  typedef TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir> XprType;
119  typedef typename XprType::Index Index;
120  static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
121  typedef DSizes<Index, NumDims> Dimensions;
122  typedef typename XprType::Scalar Scalar;
123  typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
124  typedef typename std::complex<RealScalar> ComplexScalar;
125  typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions;
126  typedef internal::traits<XprType> XprTraits;
127  typedef typename XprTraits::Scalar InputScalar;
128  typedef typename internal::conditional<FFTResultType == RealPart || FFTResultType == ImagPart, RealScalar, ComplexScalar>::type OutputScalar;
129  typedef OutputScalar CoeffReturnType;
130  typedef typename PacketType<OutputScalar, Device>::type PacketReturnType;
131 
132  enum {
133  IsAligned = false,
134  PacketAccess = true,
135  BlockAccess = false,
136  Layout = TensorEvaluator<ArgType, Device>::Layout,
137  CoordAccess = false,
138  };
139 
140  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_fft(op.fft()), m_impl(op.expression(), device), m_data(NULL), m_device(device) {
141  const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
142  for (int i = 0; i < NumDims; ++i) {
143  eigen_assert(input_dims[i] > 0);
144  m_dimensions[i] = input_dims[i];
145  }
146 
147  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
148  m_strides[0] = 1;
149  for (int i = 1; i < NumDims; ++i) {
150  m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1];
151  }
152  } else {
153  m_strides[NumDims - 1] = 1;
154  for (int i = NumDims - 2; i >= 0; --i) {
155  m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1];
156  }
157  }
158  m_size = m_dimensions.TotalSize();
159  }
160 
161  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
162  return m_dimensions;
163  }
164 
165  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(OutputScalar* data) {
166  m_impl.evalSubExprsIfNeeded(NULL);
167  if (data) {
168  evalToBuf(data);
169  return false;
170  } else {
171  m_data = (CoeffReturnType*)m_device.allocate(sizeof(CoeffReturnType) * m_size);
172  evalToBuf(m_data);
173  return true;
174  }
175  }
176 
177 
178  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
179  if (m_data) {
180  m_device.deallocate(m_data);
181  m_data = NULL;
182  }
183  m_impl.cleanup();
184  }
185 
186  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const {
187  return m_data[index];
188  }
189 
190  template<int LoadMode>
191  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType packet(Index index) const {
192  return internal::ploadt<PacketReturnType, LoadMode>(m_data + index);
193  }
194 
195  EIGEN_DEVICE_FUNC Scalar* data() const { return m_data; }
196 
197 
198  private:
199  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalToBuf(OutputScalar* data) {
200  const bool write_to_out = internal::is_same<OutputScalar, ComplexScalar>::value;
201  ComplexScalar* buf = write_to_out ? (ComplexScalar*)data : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * m_size);
202 
203  for (Index i = 0; i < m_size; ++i) {
204  buf[i] = MakeComplex<internal::is_same<InputScalar, RealScalar>::value>()(m_impl.coeff(i));
205  }
206 
207  for (size_t i = 0; i < m_fft.size(); ++i) {
208  int dim = m_fft[i];
209  eigen_assert(dim >= 0 && dim < NumDims);
210  Index line_len = m_dimensions[dim];
211  eigen_assert(line_len >= 1);
212  ComplexScalar* line_buf = (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * line_len);
213  const bool is_power_of_two = isPowerOfTwo(line_len);
214  const Index good_composite = is_power_of_two ? 0 : findGoodComposite(line_len);
215  const Index log_len = is_power_of_two ? getLog2(line_len) : getLog2(good_composite);
216 
217  ComplexScalar* a = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * good_composite);
218  ComplexScalar* b = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * good_composite);
219  ComplexScalar* pos_j_base_powered = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * (line_len + 1));
220  if (!is_power_of_two) {
221  ComplexScalar pos_j_base = ComplexScalar(std::cos(M_PI/line_len), std::sin(M_PI/line_len));
222  for (Index j = 0; j < line_len + 1; ++j) {
223  pos_j_base_powered[j] = std::pow(pos_j_base, j * j);
224  }
225  }
226 
227  for (Index partial_index = 0; partial_index < m_size / line_len; ++partial_index) {
228  Index base_offset = getBaseOffsetFromIndex(partial_index, dim);
229 
230  // get data into line_buf
231  for (Index j = 0; j < line_len; ++j) {
232  Index offset = getIndexFromOffset(base_offset, dim, j);
233  line_buf[j] = buf[offset];
234  }
235 
236  // processs the line
237  if (is_power_of_two) {
238  processDataLineCooleyTukey(line_buf, line_len, log_len);
239  }
240  else {
241  processDataLineBluestein(line_buf, line_len, good_composite, log_len, a, b, pos_j_base_powered);
242  }
243 
244  // write back
245  for (Index j = 0; j < line_len; ++j) {
246  const ComplexScalar div_factor = (FFTDir == FFT_FORWARD) ? ComplexScalar(1, 0) : ComplexScalar(line_len, 0);
247  Index offset = getIndexFromOffset(base_offset, dim, j);
248  buf[offset] = line_buf[j] / div_factor;
249  }
250  }
251  m_device.deallocate(line_buf);
252  if (!pos_j_base_powered) {
253  m_device.deallocate(a);
254  m_device.deallocate(b);
255  m_device.deallocate(pos_j_base_powered);
256  }
257  }
258 
259  if(!write_to_out) {
260  for (Index i = 0; i < m_size; ++i) {
261  data[i] = PartOf<FFTResultType>()(buf[i]);
262  }
263  m_device.deallocate(buf);
264  }
265  }
266 
267  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static bool isPowerOfTwo(Index x) {
268  eigen_assert(x > 0);
269  return !(x & (x - 1));
270  }
271 
272  // The composite number for padding, used in Bluestein's FFT algorithm
273  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Index findGoodComposite(Index n) {
274  Index i = 2;
275  while (i < 2 * n - 1) i *= 2;
276  return i;
277  }
278 
279  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Index getLog2(Index m) {
280  Index log2m = 0;
281  while (m >>= 1) log2m++;
282  return log2m;
283  }
284 
285  // Call Cooley Tukey algorithm directly, data length must be power of 2
286  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void processDataLineCooleyTukey(ComplexScalar* line_buf, Index line_len, Index log_len) {
287  eigen_assert(isPowerOfTwo(line_len));
288  scramble_FFT(line_buf, line_len);
289  compute_1D_Butterfly<FFTDir>(line_buf, line_len, log_len);
290  }
291 
292  // Call Bluestein's FFT algorithm, m is a good composite number greater than (2 * n - 1), used as the padding length
293  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void processDataLineBluestein(ComplexScalar* line_buf, Index line_len, Index good_composite, Index log_len, ComplexScalar* a, ComplexScalar* b, const ComplexScalar* pos_j_base_powered) {
294  Index n = line_len;
295  Index m = good_composite;
296  ComplexScalar* data = line_buf;
297 
298  for (Index i = 0; i < n; ++i) {
299  if(FFTDir == FFT_FORWARD) {
300  a[i] = data[i] * std::conj(pos_j_base_powered[i]);
301  }
302  else {
303  a[i] = data[i] * pos_j_base_powered[i];
304  }
305  }
306  for (Index i = n; i < m; ++i) {
307  a[i] = ComplexScalar(0, 0);
308  }
309 
310  for (Index i = 0; i < n; ++i) {
311  if(FFTDir == FFT_FORWARD) {
312  b[i] = pos_j_base_powered[i];
313  }
314  else {
315  b[i] = std::conj(pos_j_base_powered[i]);
316  }
317  }
318  for (Index i = n; i < m - n; ++i) {
319  b[i] = ComplexScalar(0, 0);
320  }
321  for (Index i = m - n; i < m; ++i) {
322  if(FFTDir == FFT_FORWARD) {
323  b[i] = pos_j_base_powered[m-i];
324  }
325  else {
326  b[i] = std::conj(pos_j_base_powered[m-i]);
327  }
328  }
329 
330  scramble_FFT(a, m);
331  compute_1D_Butterfly<FFT_FORWARD>(a, m, log_len);
332 
333  scramble_FFT(b, m);
334  compute_1D_Butterfly<FFT_FORWARD>(b, m, log_len);
335 
336  for (Index i = 0; i < m; ++i) {
337  a[i] *= b[i];
338  }
339 
340  scramble_FFT(a, m);
341  compute_1D_Butterfly<FFT_REVERSE>(a, m, log_len);
342 
343  //Do the scaling after ifft
344  for (Index i = 0; i < m; ++i) {
345  a[i] /= m;
346  }
347 
348  for (Index i = 0; i < n; ++i) {
349  if(FFTDir == FFT_FORWARD) {
350  data[i] = a[i] * std::conj(pos_j_base_powered[i]);
351  }
352  else {
353  data[i] = a[i] * pos_j_base_powered[i];
354  }
355  }
356  }
357 
358  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void scramble_FFT(ComplexScalar* data, Index n) {
359  eigen_assert(isPowerOfTwo(n));
360  Index j = 1;
361  for (Index i = 1; i < n; ++i){
362  if (j > i) {
363  std::swap(data[j-1], data[i-1]);
364  }
365  Index m = n >> 1;
366  while (m >= 2 && j > m) {
367  j -= m;
368  m >>= 1;
369  }
370  j += m;
371  }
372  }
373 
374  template<int Dir>
375  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_1D_Butterfly(ComplexScalar* data, Index n, Index n_power_of_2) {
376  eigen_assert(isPowerOfTwo(n));
377  if (n == 1) {
378  return;
379  }
380  else if (n == 2) {
381  ComplexScalar tmp = data[1];
382  data[1] = data[0] - data[1];
383  data[0] += tmp;
384  return;
385  }
386  else if (n == 4) {
387  ComplexScalar tmp[4];
388  tmp[0] = data[0] + data[1];
389  tmp[1] = data[0] - data[1];
390  tmp[2] = data[2] + data[3];
391  if(Dir == FFT_FORWARD) {
392  tmp[3] = ComplexScalar(0.0, -1.0) * (data[2] - data[3]);
393  }
394  else {
395  tmp[3] = ComplexScalar(0.0, 1.0) * (data[2] - data[3]);
396  }
397  data[0] = tmp[0] + tmp[2];
398  data[1] = tmp[1] + tmp[3];
399  data[2] = tmp[0] - tmp[2];
400  data[3] = tmp[1] - tmp[3];
401  return;
402  }
403  else if (n == 8) {
404  ComplexScalar tmp_1[8];
405  ComplexScalar tmp_2[8];
406 
407  tmp_1[0] = data[0] + data[1];
408  tmp_1[1] = data[0] - data[1];
409  tmp_1[2] = data[2] + data[3];
410  if (Dir == FFT_FORWARD) {
411  tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, -1);
412  }
413  else {
414  tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, 1);
415  }
416  tmp_1[4] = data[4] + data[5];
417  tmp_1[5] = data[4] - data[5];
418  tmp_1[6] = data[6] + data[7];
419  if (Dir == FFT_FORWARD) {
420  tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, -1);
421  }
422  else {
423  tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, 1);
424  }
425  tmp_2[0] = tmp_1[0] + tmp_1[2];
426  tmp_2[1] = tmp_1[1] + tmp_1[3];
427  tmp_2[2] = tmp_1[0] - tmp_1[2];
428  tmp_2[3] = tmp_1[1] - tmp_1[3];
429  tmp_2[4] = tmp_1[4] + tmp_1[6];
430  // SQRT2DIV2 = sqrt(2)/2
431  #define SQRT2DIV2 0.7071067811865476
432  if (Dir == FFT_FORWARD) {
433  tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, -SQRT2DIV2);
434  tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, -1);
435  tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, -SQRT2DIV2);
436  }
437  else {
438  tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, SQRT2DIV2);
439  tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, 1);
440  tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, SQRT2DIV2);
441  }
442  data[0] = tmp_2[0] + tmp_2[4];
443  data[1] = tmp_2[1] + tmp_2[5];
444  data[2] = tmp_2[2] + tmp_2[6];
445  data[3] = tmp_2[3] + tmp_2[7];
446  data[4] = tmp_2[0] - tmp_2[4];
447  data[5] = tmp_2[1] - tmp_2[5];
448  data[6] = tmp_2[2] - tmp_2[6];
449  data[7] = tmp_2[3] - tmp_2[7];
450 
451  return;
452  }
453  else {
454  compute_1D_Butterfly<Dir>(data, n/2, n_power_of_2 - 1);
455  compute_1D_Butterfly<Dir>(data + n/2, n/2, n_power_of_2 - 1);
456  //Original code:
457  //RealScalar wtemp = std::sin(M_PI/n);
458  //RealScalar wpi = -std::sin(2 * M_PI/n);
459  RealScalar wtemp = m_sin_PI_div_n_LUT[n_power_of_2];
460  RealScalar wpi;
461  if (Dir == FFT_FORWARD) {
462  wpi = m_minus_sin_2_PI_div_n_LUT[n_power_of_2];
463  }
464  else {
465  wpi = 0 - m_minus_sin_2_PI_div_n_LUT[n_power_of_2];
466  }
467 
468  const ComplexScalar wp(wtemp, wpi);
469  ComplexScalar w(1.0, 0.0);
470  for(Index i = 0; i < n/2; i++) {
471  ComplexScalar temp(data[i + n/2] * w);
472  data[i + n/2] = data[i] - temp;
473  data[i] += temp;
474  w += w * wp;
475  }
476  return;
477  }
478  }
479 
480  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index getBaseOffsetFromIndex(Index index, Index omitted_dim) const {
481  Index result = 0;
482 
483  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
484  for (int i = NumDims - 1; i > omitted_dim; --i) {
485  const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim];
486  const Index idx = index / partial_m_stride;
487  index -= idx * partial_m_stride;
488  result += idx * m_strides[i];
489  }
490  result += index;
491  }
492  else {
493  for (Index i = 0; i < omitted_dim; ++i) {
494  const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim];
495  const Index idx = index / partial_m_stride;
496  index -= idx * partial_m_stride;
497  result += idx * m_strides[i];
498  }
499  result += index;
500  }
501  // Value of index_coords[omitted_dim] is not determined to this step
502  return result;
503  }
504 
505  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index getIndexFromOffset(Index base, Index omitted_dim, Index offset) const {
506  Index result = base + offset * m_strides[omitted_dim] ;
507  return result;
508  }
509 
510  protected:
511  Index m_size;
512  const FFT& m_fft;
513  Dimensions m_dimensions;
514  array<Index, NumDims> m_strides;
515  TensorEvaluator<ArgType, Device> m_impl;
516  CoeffReturnType* m_data;
517  const Device& m_device;
518 
519  // This will support a maximum FFT size of 2^32 for each dimension
520  // m_sin_PI_div_n_LUT[i] = (-2) * std::sin(M_PI / std::pow(2,i)) ^ 2;
521  RealScalar m_sin_PI_div_n_LUT[32] = {
522  0.0,
523  -2,
524  -0.999999999999999,
525  -0.292893218813453,
526  -0.0761204674887130,
527  -0.0192147195967696,
528  -0.00481527332780311,
529  -0.00120454379482761,
530  -3.01181303795779e-04,
531  -7.52981608554592e-05,
532  -1.88247173988574e-05,
533  -4.70619042382852e-06,
534  -1.17654829809007e-06,
535  -2.94137117780840e-07,
536  -7.35342821488550e-08,
537  -1.83835707061916e-08,
538  -4.59589268710903e-09,
539  -1.14897317243732e-09,
540  -2.87243293150586e-10,
541  -7.18108232902250e-11,
542  -1.79527058227174e-11,
543  -4.48817645568941e-12,
544  -1.12204411392298e-12,
545  -2.80511028480785e-13,
546  -7.01277571201985e-14,
547  -1.75319392800498e-14,
548  -4.38298482001247e-15,
549  -1.09574620500312e-15,
550  -2.73936551250781e-16,
551  -6.84841378126949e-17,
552  -1.71210344531737e-17,
553  -4.28025861329343e-18
554  };
555 
556  // m_minus_sin_2_PI_div_n_LUT[i] = -std::sin(2 * M_PI / std::pow(2,i));
557  RealScalar m_minus_sin_2_PI_div_n_LUT[32] = {
558  0.0,
559  0.0,
560  -1.00000000000000e+00,
561  -7.07106781186547e-01,
562  -3.82683432365090e-01,
563  -1.95090322016128e-01,
564  -9.80171403295606e-02,
565  -4.90676743274180e-02,
566  -2.45412285229123e-02,
567  -1.22715382857199e-02,
568  -6.13588464915448e-03,
569  -3.06795676296598e-03,
570  -1.53398018628477e-03,
571  -7.66990318742704e-04,
572  -3.83495187571396e-04,
573  -1.91747597310703e-04,
574  -9.58737990959773e-05,
575  -4.79368996030669e-05,
576  -2.39684498084182e-05,
577  -1.19842249050697e-05,
578  -5.99211245264243e-06,
579  -2.99605622633466e-06,
580  -1.49802811316901e-06,
581  -7.49014056584716e-07,
582  -3.74507028292384e-07,
583  -1.87253514146195e-07,
584  -9.36267570730981e-08,
585  -4.68133785365491e-08,
586  -2.34066892682746e-08,
587  -1.17033446341373e-08,
588  -5.85167231706864e-09,
589  -2.92583615853432e-09
590  };
591 };
592 
593 } // end namespace Eigen
594 
595 #endif // __CUDACC__
596 
597 
598 #endif // EIGEN_CXX11_TENSOR_TENSOR_FFT_H
Namespace containing all symbols from the Eigen library.
Definition: CXX11Meta.h:13