10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H 11 #define EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H 24 template<DenseIndex DimId,
typename XprType>
25 struct traits<TensorChippingOp<DimId, XprType> > :
public traits<XprType>
27 typedef typename XprType::Scalar Scalar;
28 typedef traits<XprType> XprTraits;
29 typedef typename packet_traits<Scalar>::type Packet;
30 typedef typename XprTraits::StorageKind StorageKind;
31 typedef typename XprTraits::Index Index;
32 typedef typename XprType::Nested Nested;
33 typedef typename remove_reference<Nested>::type _Nested;
34 static const int NumDimensions = XprTraits::NumDimensions - 1;
35 static const int Layout = XprTraits::Layout;
38 template<DenseIndex DimId,
typename XprType>
39 struct eval<TensorChippingOp<DimId, XprType>,
Eigen::Dense>
41 typedef const TensorChippingOp<DimId, XprType>& type;
44 template<DenseIndex DimId,
typename XprType>
45 struct nested<TensorChippingOp<DimId, XprType>, 1, typename eval<TensorChippingOp<DimId, XprType> >::type>
47 typedef TensorChippingOp<DimId, XprType> type;
50 template <DenseIndex DimId>
53 DimensionId(DenseIndex dim) {
54 eigen_assert(dim == DimId);
56 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim()
const {
61 struct DimensionId<Dynamic>
63 DimensionId(DenseIndex dim) : actual_dim(dim) {
64 eigen_assert(dim >= 0);
66 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim()
const {
70 const DenseIndex actual_dim;
78 template<DenseIndex DimId,
typename XprType>
79 class TensorChippingOp :
public TensorBase<TensorChippingOp<DimId, XprType> >
82 typedef typename Eigen::internal::traits<TensorChippingOp>::Scalar Scalar;
83 typedef typename Eigen::internal::traits<TensorChippingOp>::Packet Packet;
84 typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
85 typedef typename XprType::CoeffReturnType CoeffReturnType;
86 typedef typename XprType::PacketReturnType PacketReturnType;
87 typedef typename Eigen::internal::nested<TensorChippingOp>::type Nested;
88 typedef typename Eigen::internal::traits<TensorChippingOp>::StorageKind StorageKind;
89 typedef typename Eigen::internal::traits<TensorChippingOp>::Index Index;
91 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp(
const XprType& expr,
const Index offset,
const Index dim)
92 : m_xpr(expr), m_offset(offset), m_dim(dim) {
96 const Index offset()
const {
return m_offset; }
98 const Index dim()
const {
return m_dim.actualDim(); }
101 const typename internal::remove_all<typename XprType::Nested>::type&
102 expression()
const {
return m_xpr; }
105 EIGEN_STRONG_INLINE TensorChippingOp& operator = (
const TensorChippingOp& other)
107 typedef TensorAssignOp<TensorChippingOp, const TensorChippingOp> Assign;
108 Assign assign(*
this, other);
109 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
113 template<
typename OtherDerived>
115 EIGEN_STRONG_INLINE TensorChippingOp& operator = (
const OtherDerived& other)
117 typedef TensorAssignOp<TensorChippingOp, const OtherDerived> Assign;
118 Assign assign(*
this, other);
119 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
124 typename XprType::Nested m_xpr;
125 const Index m_offset;
126 const internal::DimensionId<DimId> m_dim;
131 template<DenseIndex DimId,
typename ArgType,
typename Device>
132 struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
134 typedef TensorChippingOp<DimId, ArgType> XprType;
135 static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
136 static const int NumDims = NumInputDims-1;
137 typedef typename XprType::Index Index;
138 typedef DSizes<Index, NumDims> Dimensions;
139 typedef typename XprType::Scalar Scalar;
145 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
146 Layout = TensorEvaluator<ArgType, Device>::Layout,
150 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
151 : m_impl(op.expression(), device), m_dim(op.dim()), m_device(device)
154 EIGEN_STATIC_ASSERT(NumInputDims >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE);
155 eigen_assert(NumInputDims > m_dim.actualDim());
157 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
158 eigen_assert(op.offset() < input_dims[m_dim.actualDim()]);
161 for (
int i = 0; i < NumInputDims; ++i) {
162 if (i != m_dim.actualDim()) {
163 m_dimensions[j] = input_dims[i];
170 if (static_cast<int>(Layout) ==
static_cast<int>(ColMajor)) {
171 for (
int i = 0; i < m_dim.actualDim(); ++i) {
172 m_stride *= input_dims[i];
173 m_inputStride *= input_dims[i];
176 for (
int i = NumInputDims-1; i > m_dim.actualDim(); --i) {
177 m_stride *= input_dims[i];
178 m_inputStride *= input_dims[i];
181 m_inputStride *= input_dims[m_dim.actualDim()];
182 m_inputOffset = m_stride * op.offset();
185 typedef typename XprType::CoeffReturnType CoeffReturnType;
186 typedef typename XprType::PacketReturnType PacketReturnType;
188 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
190 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(Scalar* ) {
191 m_impl.evalSubExprsIfNeeded(NULL);
195 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void cleanup() {
199 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const 201 return m_impl.coeff(srcCoeff(index));
204 template<
int LoadMode>
205 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const 207 const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
208 EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
209 eigen_assert(index+packetSize-1 < dimensions().TotalSize());
211 if ((static_cast<
int>(Layout) == static_cast<
int>(ColMajor) && m_dim.actualDim() == 0) ||
212 (static_cast<
int>(Layout) == static_cast<
int>(RowMajor) && m_dim.actualDim() == NumInputDims-1)) {
214 eigen_assert(m_stride == 1);
215 Index inputIndex = index * m_inputStride + m_inputOffset;
216 EIGEN_ALIGN_MAX
typename internal::remove_const<CoeffReturnType>::type values[packetSize];
217 for (
int i = 0; i < packetSize; ++i) {
218 values[i] = m_impl.coeff(inputIndex);
219 inputIndex += m_inputStride;
221 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
223 }
else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims - 1) ||
224 (static_cast<int>(Layout) ==
static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
226 eigen_assert(m_stride > index);
227 return m_impl.template packet<LoadMode>(index + m_inputOffset);
229 const Index idx = index / m_stride;
230 const Index rem = index - idx * m_stride;
231 if (rem + packetSize <= m_stride) {
232 Index inputIndex = idx * m_inputStride + m_inputOffset + rem;
233 return m_impl.template packet<LoadMode>(inputIndex);
236 EIGEN_ALIGN_MAX
typename internal::remove_const<CoeffReturnType>::type values[packetSize];
237 for (
int i = 0; i < packetSize; ++i) {
238 values[i] = coeff(index);
241 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
247 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data()
const {
248 Scalar* result = m_impl.data();
249 if (((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumDims) ||
250 (static_cast<int>(Layout) ==
static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) &&
252 return result + m_inputOffset;
259 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index)
const 262 if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == 0) ||
263 (static_cast<int>(Layout) ==
static_cast<int>(RowMajor) && m_dim.actualDim() == NumInputDims-1)) {
265 eigen_assert(m_stride == 1);
266 inputIndex = index * m_inputStride + m_inputOffset;
267 }
else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims-1) ||
268 (static_cast<int>(Layout) ==
static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
270 eigen_assert(m_stride > index);
271 inputIndex = index + m_inputOffset;
273 const Index idx = index / m_stride;
274 inputIndex = idx * m_inputStride + m_inputOffset;
275 index -= idx * m_stride;
281 Dimensions m_dimensions;
285 TensorEvaluator<ArgType, Device> m_impl;
286 const internal::DimensionId<DimId> m_dim;
287 const Device& m_device;
292 template<DenseIndex DimId,
typename ArgType,
typename Device>
293 struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
294 :
public TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
296 typedef TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device> Base;
297 typedef TensorChippingOp<DimId, ArgType> XprType;
298 static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
299 static const int NumDims = NumInputDims-1;
300 typedef typename XprType::Index Index;
301 typedef DSizes<Index, NumDims> Dimensions;
302 typedef typename XprType::Scalar Scalar;
306 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
309 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
313 typedef typename XprType::CoeffReturnType CoeffReturnType;
314 typedef typename XprType::PacketReturnType PacketReturnType;
316 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
318 return this->m_impl.coeffRef(this->srcCoeff(index));
321 template <
int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
322 void writePacket(Index index,
const PacketReturnType& x)
324 static const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
325 EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
327 if ((static_cast<
int>(this->Layout) == static_cast<
int>(ColMajor) && this->m_dim.actualDim() == 0) ||
328 (static_cast<
int>(this->Layout) == static_cast<
int>(RowMajor) && this->m_dim.actualDim() == NumInputDims-1)) {
330 eigen_assert(this->m_stride == 1);
331 EIGEN_ALIGN_MAX
typename internal::remove_const<CoeffReturnType>::type values[packetSize];
332 internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
333 Index inputIndex = index * this->m_inputStride + this->m_inputOffset;
334 for (
int i = 0; i < packetSize; ++i) {
335 this->m_impl.coeffRef(inputIndex) = values[i];
336 inputIndex += this->m_inputStride;
338 }
else if ((static_cast<int>(this->Layout) == static_cast<int>(ColMajor) && this->m_dim.actualDim() == NumInputDims-1) ||
339 (static_cast<int>(this->Layout) ==
static_cast<int>(RowMajor) && this->m_dim.actualDim() == 0)) {
341 eigen_assert(this->m_stride > index);
342 this->m_impl.template writePacket<StoreMode>(index + this->m_inputOffset, x);
344 const Index idx = index / this->m_stride;
345 const Index rem = index - idx * this->m_stride;
346 if (rem + packetSize <= this->m_stride) {
347 const Index inputIndex = idx * this->m_inputStride + this->m_inputOffset + rem;
348 this->m_impl.template writePacket<StoreMode>(inputIndex, x);
351 EIGEN_ALIGN_MAX
typename internal::remove_const<CoeffReturnType>::type values[packetSize];
352 internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
353 for (
int i = 0; i < packetSize; ++i) {
354 this->coeffRef(index) = values[i];
365 #endif // EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H Namespace containing all symbols from the Eigen library.
Definition: CXX11Meta.h:13