Eigen  3.3.0
 
Loading...
Searching...
No Matches
SparseSelfAdjointView.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
11#define EIGEN_SPARSE_SELFADJOINTVIEW_H
12
13namespace Eigen {
14
29namespace internal {
30
31template<typename MatrixType, unsigned int Mode>
32struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
33};
34
35template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
36void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
37
38template<int Mode,typename MatrixType,int DestOrder>
39void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
40
41}
42
43template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
44 : public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
45{
46 public:
47
48 enum {
49 Mode = _Mode,
50 RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
51 ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
52 };
53
55 typedef typename MatrixType::Scalar Scalar;
56 typedef typename MatrixType::StorageIndex StorageIndex;
58 typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
59 typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
60
61 explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
62 {
63 eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
64 }
65
66 inline Index rows() const { return m_matrix.rows(); }
67 inline Index cols() const { return m_matrix.cols(); }
68
70 const _MatrixTypeNested& matrix() const { return m_matrix; }
71 typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }
72
78 template<typename OtherDerived>
81 {
83 }
84
90 template<typename OtherDerived> friend
93 {
95 }
96
98 template<typename OtherDerived>
101 {
103 }
104
106 template<typename OtherDerived> friend
109 {
111 }
112
121 template<typename DerivedU>
122 SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
123
125 // TODO implement twists in a more evaluator friendly fashion
126 SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
127 {
128 return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
129 }
130
131 template<typename SrcMatrixType,int SrcMode>
132 SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
133 {
134 internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
135 return *this;
136 }
137
138 SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
139 {
140 PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
141 return *this = src.twistedBy(pnull);
142 }
143
144 template<typename SrcMatrixType,unsigned int SrcMode>
145 SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
146 {
147 PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
148 return *this = src.twistedBy(pnull);
149 }
150
151 void resize(Index rows, Index cols)
152 {
153 EIGEN_ONLY_USED_FOR_DEBUG(rows);
154 EIGEN_ONLY_USED_FOR_DEBUG(cols);
155 eigen_assert(rows == this->rows() && cols == this->cols()
156 && "SparseSelfadjointView::resize() does not actually allow to resize.");
157 }
158
159 protected:
160
161 MatrixTypeNested m_matrix;
162 //mutable VectorI m_countPerRow;
163 //mutable VectorI m_countPerCol;
164 private:
165 template<typename Dest> void evalTo(Dest &) const;
166};
167
168/***************************************************************************
169* Implementation of SparseMatrixBase methods
170***************************************************************************/
171
172template<typename Derived>
173template<unsigned int UpLo>
174typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
175{
176 return SparseSelfAdjointView<const Derived, UpLo>(derived());
177}
178
179template<typename Derived>
180template<unsigned int UpLo>
181typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
182{
183 return SparseSelfAdjointView<Derived, UpLo>(derived());
184}
185
186/***************************************************************************
187* Implementation of SparseSelfAdjointView methods
188***************************************************************************/
189
190template<typename MatrixType, unsigned int Mode>
191template<typename DerivedU>
192SparseSelfAdjointView<MatrixType,Mode>&
193SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
194{
195 SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
196 if(alpha==Scalar(0))
197 m_matrix = tmp.template triangularView<Mode>();
198 else
199 m_matrix += alpha * tmp.template triangularView<Mode>();
200
201 return *this;
202}
203
204namespace internal {
205
206// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
207// in the future selfadjoint-ness should be defined by the expression traits
208// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
209template<typename MatrixType, unsigned int Mode>
210struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
211{
212 typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
213 typedef SparseSelfAdjointShape Shape;
214};
215
216struct SparseSelfAdjoint2Sparse {};
217
218template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
219template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
220
221template< typename DstXprType, typename SrcXprType, typename Functor>
222struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
223{
224 typedef typename DstXprType::StorageIndex StorageIndex;
225 template<typename DestScalar,int StorageOrder>
226 static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
227 {
228 internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
229 }
230
231 template<typename DestScalar>
232 static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
233 {
234 // TODO directly evaluate into dst;
235 SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
236 internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
237 dst = tmp;
238 }
239};
240
241} // end namespace internal
242
243/***************************************************************************
244* Implementation of sparse self-adjoint time dense matrix
245***************************************************************************/
246
247namespace internal {
248
249template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
250inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
251{
252 EIGEN_ONLY_USED_FOR_DEBUG(alpha);
253
254 typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
255 typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
256 typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
257 typedef typename LhsEval::InnerIterator LhsIterator;
258 typedef typename SparseLhsType::Scalar LhsScalar;
259
260 enum {
261 LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
262 ProcessFirstHalf =
263 ((Mode&(Upper|Lower))==(Upper|Lower))
264 || ( (Mode&Upper) && !LhsIsRowMajor)
265 || ( (Mode&Lower) && LhsIsRowMajor),
266 ProcessSecondHalf = !ProcessFirstHalf
267 };
268
269 SparseLhsTypeNested lhs_nested(lhs);
270 LhsEval lhsEval(lhs_nested);
271
272 // work on one column at once
273 for (Index k=0; k<rhs.cols(); ++k)
274 {
275 for (Index j=0; j<lhs.outerSize(); ++j)
276 {
277 LhsIterator i(lhsEval,j);
278 // handle diagonal coeff
279 if (ProcessSecondHalf)
280 {
281 while (i && i.index()<j) ++i;
282 if(i && i.index()==j)
283 {
284 res(j,k) += alpha * i.value() * rhs(j,k);
285 ++i;
286 }
287 }
288
289 // premultiplied rhs for scatters
290 typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
291 // accumulator for partial scalar product
292 typename DenseResType::Scalar res_j(0);
293 for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
294 {
295 LhsScalar lhs_ij = i.value();
296 if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
297 res_j += lhs_ij * rhs(i.index(),k);
298 res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
299 }
300 res(j,k) += alpha * res_j;
301
302 // handle diagonal coeff
303 if (ProcessFirstHalf && i && (i.index()==j))
304 res(j,k) += alpha * i.value() * rhs(j,k);
305 }
306 }
307}
308
309
310template<typename LhsView, typename Rhs, int ProductType>
311struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
312: generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
313{
314 template<typename Dest>
315 static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
316 {
317 typedef typename LhsView::_MatrixTypeNested Lhs;
318 typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
319 typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
320 LhsNested lhsNested(lhsView.matrix());
321 RhsNested rhsNested(rhs);
322
323 internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
324 }
325};
326
327template<typename Lhs, typename RhsView, int ProductType>
328struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
329: generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
330{
331 template<typename Dest>
332 static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
333 {
334 typedef typename RhsView::_MatrixTypeNested Rhs;
335 typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
336 typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
337 LhsNested lhsNested(lhs);
338 RhsNested rhsNested(rhsView.matrix());
339
340 // transpose everything
341 Transpose<Dest> dstT(dst);
342 internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
343 }
344};
345
346// NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
347// TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
348
349template<typename LhsView, typename Rhs, int ProductTag>
350struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
351 : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
352{
353 typedef Product<LhsView, Rhs, DefaultProduct> XprType;
354 typedef typename XprType::PlainObject PlainObject;
355 typedef evaluator<PlainObject> Base;
356
357 product_evaluator(const XprType& xpr)
358 : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
359 {
360 ::new (static_cast<Base*>(this)) Base(m_result);
361 generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
362 }
363
364protected:
365 typename Rhs::PlainObject m_lhs;
366 PlainObject m_result;
367};
368
369template<typename Lhs, typename RhsView, int ProductTag>
370struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
371 : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
372{
373 typedef Product<Lhs, RhsView, DefaultProduct> XprType;
374 typedef typename XprType::PlainObject PlainObject;
375 typedef evaluator<PlainObject> Base;
376
377 product_evaluator(const XprType& xpr)
378 : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
379 {
380 ::new (static_cast<Base*>(this)) Base(m_result);
381 generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
382 }
383
384protected:
385 typename Lhs::PlainObject m_rhs;
386 PlainObject m_result;
387};
388
389} // namespace internal
390
391/***************************************************************************
392* Implementation of symmetric copies and permutations
393***************************************************************************/
394namespace internal {
395
396template<int Mode,typename MatrixType,int DestOrder>
397void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
398{
399 typedef typename MatrixType::StorageIndex StorageIndex;
400 typedef typename MatrixType::Scalar Scalar;
401 typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
402 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
403 typedef evaluator<MatrixType> MatEval;
404 typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
405
406 MatEval matEval(mat);
407 Dest& dest(_dest.derived());
408 enum {
409 StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
410 };
411
412 Index size = mat.rows();
413 VectorI count;
414 count.resize(size);
415 count.setZero();
416 dest.resize(size,size);
417 for(Index j = 0; j<size; ++j)
418 {
419 Index jp = perm ? perm[j] : j;
420 for(MatIterator it(matEval,j); it; ++it)
421 {
422 Index i = it.index();
423 Index r = it.row();
424 Index c = it.col();
425 Index ip = perm ? perm[i] : i;
426 if(Mode==(Upper|Lower))
427 count[StorageOrderMatch ? jp : ip]++;
428 else if(r==c)
429 count[ip]++;
430 else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
431 {
432 count[ip]++;
433 count[jp]++;
434 }
435 }
436 }
437 Index nnz = count.sum();
438
439 // reserve space
440 dest.resizeNonZeros(nnz);
441 dest.outerIndexPtr()[0] = 0;
442 for(Index j=0; j<size; ++j)
443 dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
444 for(Index j=0; j<size; ++j)
445 count[j] = dest.outerIndexPtr()[j];
446
447 // copy data
448 for(StorageIndex j = 0; j<size; ++j)
449 {
450 for(MatIterator it(matEval,j); it; ++it)
451 {
452 StorageIndex i = internal::convert_index<StorageIndex>(it.index());
453 Index r = it.row();
454 Index c = it.col();
455
456 StorageIndex jp = perm ? perm[j] : j;
457 StorageIndex ip = perm ? perm[i] : i;
458
459 if(Mode==(Upper|Lower))
460 {
461 Index k = count[StorageOrderMatch ? jp : ip]++;
462 dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
463 dest.valuePtr()[k] = it.value();
464 }
465 else if(r==c)
466 {
467 Index k = count[ip]++;
468 dest.innerIndexPtr()[k] = ip;
469 dest.valuePtr()[k] = it.value();
470 }
471 else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
472 {
473 if(!StorageOrderMatch)
474 std::swap(ip,jp);
475 Index k = count[jp]++;
476 dest.innerIndexPtr()[k] = ip;
477 dest.valuePtr()[k] = it.value();
478 k = count[ip]++;
479 dest.innerIndexPtr()[k] = jp;
480 dest.valuePtr()[k] = numext::conj(it.value());
481 }
482 }
483 }
484}
485
486template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
487void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
488{
489 typedef typename MatrixType::StorageIndex StorageIndex;
490 typedef typename MatrixType::Scalar Scalar;
491 SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
492 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
493 typedef evaluator<MatrixType> MatEval;
494 typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
495
496 enum {
497 SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
498 StorageOrderMatch = int(SrcOrder) == int(DstOrder),
499 DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
500 SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
501 };
502
503 MatEval matEval(mat);
504
505 Index size = mat.rows();
506 VectorI count(size);
507 count.setZero();
508 dest.resize(size,size);
509 for(StorageIndex j = 0; j<size; ++j)
510 {
511 StorageIndex jp = perm ? perm[j] : j;
512 for(MatIterator it(matEval,j); it; ++it)
513 {
514 StorageIndex i = it.index();
515 if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
516 continue;
517
518 StorageIndex ip = perm ? perm[i] : i;
519 count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
520 }
521 }
522 dest.outerIndexPtr()[0] = 0;
523 for(Index j=0; j<size; ++j)
524 dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
525 dest.resizeNonZeros(dest.outerIndexPtr()[size]);
526 for(Index j=0; j<size; ++j)
527 count[j] = dest.outerIndexPtr()[j];
528
529 for(StorageIndex j = 0; j<size; ++j)
530 {
531
532 for(MatIterator it(matEval,j); it; ++it)
533 {
534 StorageIndex i = it.index();
535 if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
536 continue;
537
538 StorageIndex jp = perm ? perm[j] : j;
539 StorageIndex ip = perm? perm[i] : i;
540
541 Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
542 dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
543
544 if(!StorageOrderMatch) std::swap(ip,jp);
545 if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
546 dest.valuePtr()[k] = numext::conj(it.value());
547 else
548 dest.valuePtr()[k] = it.value();
549 }
550 }
551}
552
553}
554
555// TODO implement twists in a more evaluator friendly fashion
556
557namespace internal {
558
559template<typename MatrixType, int Mode>
560struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
561};
562
563}
564
565template<typename MatrixType,int Mode>
566class SparseSymmetricPermutationProduct
567 : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
568{
569 public:
570 typedef typename MatrixType::Scalar Scalar;
571 typedef typename MatrixType::StorageIndex StorageIndex;
572 enum {
573 RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,
574 ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime
575 };
576 protected:
577 typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;
578 public:
579 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
580 typedef typename MatrixType::Nested MatrixTypeNested;
581 typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
582
583 SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
584 : m_matrix(mat), m_perm(perm)
585 {}
586
587 inline Index rows() const { return m_matrix.rows(); }
588 inline Index cols() const { return m_matrix.cols(); }
589
590 const NestedExpression& matrix() const { return m_matrix; }
591 const Perm& perm() const { return m_perm; }
592
593 protected:
594 MatrixTypeNested m_matrix;
595 const Perm& m_perm;
596
597};
598
599namespace internal {
600
601template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
602struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
603{
604 typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
605 typedef typename DstXprType::StorageIndex DstIndex;
606 template<int Options>
607 static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
608 {
609 // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
610 SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
611 internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
612 dst = tmp;
613 }
614
615 template<typename DestType,unsigned int DestMode>
616 static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
617 {
618 internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
619 }
620};
621
622} // end namespace internal
623
624} // end namespace Eigen
625
626#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
Derived & derived()
Definition: EigenBase.h:44
Base class for all dense matrices, vectors, and expressions.
Definition: MatrixBase.h:50
The matrix class, also used for vectors and row-vectors.
Definition: Matrix.h:180
Permutation matrix.
Definition: PermutationMatrix.h:309
Expression of the product of two arbitrary matrices or vectors.
Definition: Product.h:75
Base class of any sparse matrices or sparse expressions.
Definition: SparseMatrixBase.h:28
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:45
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const MatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Definition: SparseSelfAdjointView.h:108
SparseSelfAdjointView & rankUpdate(const SparseMatrixBase< DerivedU > &u, const Scalar &alpha=Scalar(1))
Product< SparseSelfAdjointView, OtherDerived > operator*(const MatrixBase< OtherDerived > &rhs) const
Definition: SparseSelfAdjointView.h:100
Product< SparseSelfAdjointView, OtherDerived > operator*(const SparseMatrixBase< OtherDerived > &rhs) const
Definition: SparseSelfAdjointView.h:80
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const SparseMatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Definition: SparseSelfAdjointView.h:92
SparseSymmetricPermutationProduct< _MatrixTypeNested, Mode > twistedBy(const PermutationMatrix< Dynamic, Dynamic, StorageIndex > &perm) const
Definition: SparseSelfAdjointView.h:126
@ Lower
Definition: Constants.h:204
@ Upper
Definition: Constants.h:206
@ ColMajor
Definition: Constants.h:320
@ RowMajor
Definition: Constants.h:322
const unsigned int RowMajorBit
Definition: Constants.h:61
Namespace containing all symbols from the Eigen library.
Definition: Core:287
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
Definition: EigenBase.h:29
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:37
Derived & derived()
Definition: EigenBase.h:44