11 #ifndef SPARSELU_KERNEL_BMOD_H 12 #define SPARSELU_KERNEL_BMOD_H 31 template <
int SegSizeAtCompileTime>
struct LU_kernel_bmod
33 template <
typename BlockScalarVector,
typename ScalarVector,
typename IndexVector>
34 static EIGEN_DONT_INLINE
void run(
const Index segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr,
const Index lda,
35 const Index nrow, IndexVector& lsub,
const Index lptr,
const Index no_zeros);
38 template <
int SegSizeAtCompileTime>
39 template <
typename BlockScalarVector,
typename ScalarVector,
typename IndexVector>
40 EIGEN_DONT_INLINE
void LU_kernel_bmod<SegSizeAtCompileTime>::run(
const Index segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr,
const Index lda,
41 const Index nrow, IndexVector& lsub,
const Index lptr,
const Index no_zeros)
43 typedef typename ScalarVector::Scalar Scalar;
47 Index isub = lptr + no_zeros;
50 for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++)
53 tempv(i) = dense(irow);
57 luptr += lda * no_zeros + no_zeros;
59 Map<Matrix<Scalar,SegSizeAtCompileTime,SegSizeAtCompileTime, ColMajor>, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) );
60 Map<Matrix<Scalar,SegSizeAtCompileTime,1> > u(tempv.data(), segsize);
62 u = A.template triangularView<UnitLower>().solve(u);
66 const Index PacketSize = internal::packet_traits<Scalar>::size;
67 Index ldl = internal::first_multiple(nrow, PacketSize);
68 Map<Matrix<Scalar,Dynamic,SegSizeAtCompileTime, ColMajor>, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) );
69 Index aligned_offset = internal::first_default_aligned(tempv.data()+segsize, PacketSize);
70 Index aligned_with_B_offset = (PacketSize-internal::first_default_aligned(B.data(), PacketSize))%PacketSize;
71 Map<Matrix<Scalar,Dynamic,1>, 0, OuterStride<> > l(tempv.data()+segsize+aligned_offset+aligned_with_B_offset, nrow, OuterStride<>(ldl) );
74 internal::sparselu_gemm<Scalar>(l.rows(), l.cols(), B.cols(), B.data(), B.outerStride(), u.data(), u.outerStride(), l.data(), l.outerStride());
77 isub = lptr + no_zeros;
78 for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++)
81 dense(irow) = tempv(i);
85 for (i = 0; i < nrow; i++)
92 template <>
struct LU_kernel_bmod<1>
94 template <
typename BlockScalarVector,
typename ScalarVector,
typename IndexVector>
95 static EIGEN_DONT_INLINE
void run(
const Index , BlockScalarVector& dense, ScalarVector& , ScalarVector& lusup, Index& luptr,
96 const Index lda,
const Index nrow, IndexVector& lsub,
const Index lptr,
const Index no_zeros);
100 template <
typename BlockScalarVector,
typename ScalarVector,
typename IndexVector>
101 EIGEN_DONT_INLINE
void LU_kernel_bmod<1>::run(
const Index , BlockScalarVector& dense, ScalarVector& , ScalarVector& lusup, Index& luptr,
102 const Index lda,
const Index nrow, IndexVector& lsub,
const Index lptr,
const Index no_zeros)
104 typedef typename ScalarVector::Scalar Scalar;
105 typedef typename IndexVector::Scalar StorageIndex;
106 Scalar f = dense(lsub(lptr + no_zeros));
107 luptr += lda * no_zeros + no_zeros + 1;
108 const Scalar* a(lusup.data() + luptr);
109 const StorageIndex* irow(lsub.data()+lptr + no_zeros + 1);
111 for (; i+1 < nrow; i+=2)
113 Index i0 = *(irow++);
114 Index i1 = *(irow++);
117 Scalar d0 = dense.coeff(i0);
118 Scalar d1 = dense.coeff(i1);
121 dense.coeffRef(i0) = d0;
122 dense.coeffRef(i1) = d1;
125 dense.coeffRef(*(irow++)) -= f * *(a++);
131 #endif // SPARSELU_KERNEL_BMOD_H
Definition: Eigen_Colamd.h:54