Eigen  3.3.0
 
Loading...
Searching...
No Matches
SparseMatrix.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_SPARSEMATRIX_H
11#define EIGEN_SPARSEMATRIX_H
12
13namespace Eigen {
14
41namespace internal {
42template<typename _Scalar, int _Options, typename _Index>
43struct traits<SparseMatrix<_Scalar, _Options, _Index> >
44{
45 typedef _Scalar Scalar;
46 typedef _Index StorageIndex;
47 typedef Sparse StorageKind;
48 typedef MatrixXpr XprKind;
49 enum {
50 RowsAtCompileTime = Dynamic,
51 ColsAtCompileTime = Dynamic,
52 MaxRowsAtCompileTime = Dynamic,
53 MaxColsAtCompileTime = Dynamic,
54 Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
55 SupportedAccessPatterns = InnerRandomAccessPattern
56 };
57};
58
59template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
60struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
61{
62 typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
63 typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
64 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
65
66 typedef _Scalar Scalar;
67 typedef Dense StorageKind;
68 typedef _Index StorageIndex;
69 typedef MatrixXpr XprKind;
70
71 enum {
72 RowsAtCompileTime = Dynamic,
73 ColsAtCompileTime = 1,
74 MaxRowsAtCompileTime = Dynamic,
75 MaxColsAtCompileTime = 1,
76 Flags = LvalueBit
77 };
78};
79
80template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
81struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
82 : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
83{
84 enum {
85 Flags = 0
86 };
87};
88
89} // end namespace internal
90
91template<typename _Scalar, int _Options, typename _Index>
93 : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _Index> >
94{
96 using Base::convert_index;
97 friend class SparseVector<_Scalar,0,_Index>;
98 public:
100 using Base::nonZeros;
101 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
102 using Base::operator+=;
103 using Base::operator-=;
104
108 typedef typename Base::InnerIterator InnerIterator;
109 typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
110
111
112 using Base::IsRowMajor;
113 typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
114 enum {
115 Options = _Options
116 };
117
118 typedef typename Base::IndexVector IndexVector;
119 typedef typename Base::ScalarVector ScalarVector;
120 protected:
121 typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
122
123 Index m_outerSize;
124 Index m_innerSize;
125 StorageIndex* m_outerIndex;
126 StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
127 Storage m_data;
128
129 public:
130
132 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
134 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
135
137 inline Index innerSize() const { return m_innerSize; }
139 inline Index outerSize() const { return m_outerSize; }
140
144 inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
148 inline Scalar* valuePtr() { return m_data.valuePtr(); }
149
153 inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
157 inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
158
162 inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
166 inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
167
171 inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
175 inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
176
178 inline Storage& data() { return m_data; }
180 inline const Storage& data() const { return m_data; }
181
184 inline Scalar coeff(Index row, Index col) const
185 {
186 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
187
188 const Index outer = IsRowMajor ? row : col;
189 const Index inner = IsRowMajor ? col : row;
190 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
191 return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
192 }
193
202 inline Scalar& coeffRef(Index row, Index col)
203 {
204 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
205
206 const Index outer = IsRowMajor ? row : col;
207 const Index inner = IsRowMajor ? col : row;
208
209 Index start = m_outerIndex[outer];
210 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
211 eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
212 if(end<=start)
213 return insert(row,col);
214 const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
215 if((p<end) && (m_data.index(p)==inner))
216 return m_data.value(p);
217 else
218 return insert(row,col);
219 }
220
236 Scalar& insert(Index row, Index col);
237
238 public:
239
247 inline void setZero()
248 {
249 m_data.clear();
250 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
251 if(m_innerNonZeros)
252 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
253 }
254
258 inline void reserve(Index reserveSize)
259 {
260 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
261 m_data.reserve(reserveSize);
262 }
263
264 #ifdef EIGEN_PARSED_BY_DOXYGEN
277 template<class SizesType>
278 inline void reserve(const SizesType& reserveSizes);
279 #else
280 template<class SizesType>
281 inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
282 #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
283 typename
284 #endif
285 SizesType::value_type())
286 {
287 EIGEN_UNUSED_VARIABLE(enableif);
288 reserveInnerVectors(reserveSizes);
289 }
290 #endif // EIGEN_PARSED_BY_DOXYGEN
291 protected:
292 template<class SizesType>
293 inline void reserveInnerVectors(const SizesType& reserveSizes)
294 {
295 if(isCompressed())
296 {
297 Index totalReserveSize = 0;
298 // turn the matrix into non-compressed mode
299 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
300 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
301
302 // temporarily use m_innerSizes to hold the new starting points.
303 StorageIndex* newOuterIndex = m_innerNonZeros;
304
305 StorageIndex count = 0;
306 for(Index j=0; j<m_outerSize; ++j)
307 {
308 newOuterIndex[j] = count;
309 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
310 totalReserveSize += reserveSizes[j];
311 }
312 m_data.reserve(totalReserveSize);
313 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
314 for(Index j=m_outerSize-1; j>=0; --j)
315 {
316 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
317 for(Index i=innerNNZ-1; i>=0; --i)
318 {
319 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
320 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
321 }
322 previousOuterIndex = m_outerIndex[j];
323 m_outerIndex[j] = newOuterIndex[j];
324 m_innerNonZeros[j] = innerNNZ;
325 }
326 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
327
328 m_data.resize(m_outerIndex[m_outerSize]);
329 }
330 else
331 {
332 StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
333 if (!newOuterIndex) internal::throw_std_bad_alloc();
334
335 StorageIndex count = 0;
336 for(Index j=0; j<m_outerSize; ++j)
337 {
338 newOuterIndex[j] = count;
339 StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
340 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
341 count += toReserve + m_innerNonZeros[j];
342 }
343 newOuterIndex[m_outerSize] = count;
344
345 m_data.resize(count);
346 for(Index j=m_outerSize-1; j>=0; --j)
347 {
348 Index offset = newOuterIndex[j] - m_outerIndex[j];
349 if(offset>0)
350 {
351 StorageIndex innerNNZ = m_innerNonZeros[j];
352 for(Index i=innerNNZ-1; i>=0; --i)
353 {
354 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
355 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
356 }
357 }
358 }
359
360 std::swap(m_outerIndex, newOuterIndex);
361 std::free(newOuterIndex);
362 }
363
364 }
365 public:
366
367 //--- low level purely coherent filling ---
368
379 inline Scalar& insertBack(Index row, Index col)
380 {
381 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
382 }
383
386 inline Scalar& insertBackByOuterInner(Index outer, Index inner)
387 {
388 eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
389 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
390 Index p = m_outerIndex[outer+1];
391 ++m_outerIndex[outer+1];
392 m_data.append(Scalar(0), inner);
393 return m_data.value(p);
394 }
395
398 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
399 {
400 Index p = m_outerIndex[outer+1];
401 ++m_outerIndex[outer+1];
402 m_data.append(Scalar(0), inner);
403 return m_data.value(p);
404 }
405
408 inline void startVec(Index outer)
409 {
410 eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
411 eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
412 m_outerIndex[outer+1] = m_outerIndex[outer];
413 }
414
418 inline void finalize()
419 {
420 if(isCompressed())
421 {
422 StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
423 Index i = m_outerSize;
424 // find the last filled column
425 while (i>=0 && m_outerIndex[i]==0)
426 --i;
427 ++i;
428 while (i<=m_outerSize)
429 {
430 m_outerIndex[i] = size;
431 ++i;
432 }
433 }
434 }
435
436 //---
437
438 template<typename InputIterators>
439 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
440
441 template<typename InputIterators,typename DupFunctor>
442 void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
443
444 void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
445
446 template<typename DupFunctor>
447 void collapseDuplicates(DupFunctor dup_func = DupFunctor());
448
449 //---
450
453 Scalar& insertByOuterInner(Index j, Index i)
454 {
455 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
456 }
457
461 {
462 if(isCompressed())
463 return;
464
465 eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
466
467 Index oldStart = m_outerIndex[1];
468 m_outerIndex[1] = m_innerNonZeros[0];
469 for(Index j=1; j<m_outerSize; ++j)
470 {
471 Index nextOldStart = m_outerIndex[j+1];
472 Index offset = oldStart - m_outerIndex[j];
473 if(offset>0)
474 {
475 for(Index k=0; k<m_innerNonZeros[j]; ++k)
476 {
477 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
478 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
479 }
480 }
481 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
482 oldStart = nextOldStart;
483 }
484 std::free(m_innerNonZeros);
485 m_innerNonZeros = 0;
486 m_data.resize(m_outerIndex[m_outerSize]);
487 m_data.squeeze();
488 }
489
492 {
493 if(m_innerNonZeros != 0)
494 return;
495 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
496 for (Index i = 0; i < m_outerSize; i++)
497 {
498 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
499 }
500 }
501
503 void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
504 {
505 prune(default_prunning_func(reference,epsilon));
506 }
507
515 template<typename KeepFunc>
516 void prune(const KeepFunc& keep = KeepFunc())
517 {
518 // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
520
521 StorageIndex k = 0;
522 for(Index j=0; j<m_outerSize; ++j)
523 {
524 Index previousStart = m_outerIndex[j];
525 m_outerIndex[j] = k;
526 Index end = m_outerIndex[j+1];
527 for(Index i=previousStart; i<end; ++i)
528 {
529 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
530 {
531 m_data.value(k) = m_data.value(i);
532 m_data.index(k) = m_data.index(i);
533 ++k;
534 }
535 }
536 }
537 m_outerIndex[m_outerSize] = k;
538 m_data.resize(k,0);
539 }
540
550 {
551 // No change
552 if (this->rows() == rows && this->cols() == cols) return;
553
554 // If one dimension is null, then there is nothing to be preserved
555 if(rows==0 || cols==0) return resize(rows,cols);
556
557 Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
558 Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
559 StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
560
561 // Deals with inner non zeros
562 if (m_innerNonZeros)
563 {
564 // Resize m_innerNonZeros
565 StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
566 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
567 m_innerNonZeros = newInnerNonZeros;
568
569 for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
570 m_innerNonZeros[i] = 0;
571 }
572 else if (innerChange < 0)
573 {
574 // Inner size decreased: allocate a new m_innerNonZeros
575 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
576 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
577 for(Index i = 0; i < m_outerSize; i++)
578 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
579 }
580
581 // Change the m_innerNonZeros in case of a decrease of inner size
582 if (m_innerNonZeros && innerChange < 0)
583 {
584 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
585 {
586 StorageIndex &n = m_innerNonZeros[i];
587 StorageIndex start = m_outerIndex[i];
588 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
589 }
590 }
591
592 m_innerSize = newInnerSize;
593
594 // Re-allocate outer index structure if necessary
595 if (outerChange == 0)
596 return;
597
598 StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
599 if (!newOuterIndex) internal::throw_std_bad_alloc();
600 m_outerIndex = newOuterIndex;
601 if (outerChange > 0)
602 {
603 StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
604 for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
605 m_outerIndex[i] = last;
606 }
607 m_outerSize += outerChange;
608 }
609
618 {
619 const Index outerSize = IsRowMajor ? rows : cols;
620 m_innerSize = IsRowMajor ? cols : rows;
621 m_data.clear();
622 if (m_outerSize != outerSize || m_outerSize==0)
623 {
624 std::free(m_outerIndex);
625 m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
626 if (!m_outerIndex) internal::throw_std_bad_alloc();
627
628 m_outerSize = outerSize;
629 }
630 if(m_innerNonZeros)
631 {
632 std::free(m_innerNonZeros);
633 m_innerNonZeros = 0;
634 }
635 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
636 }
637
640 void resizeNonZeros(Index size)
641 {
642 m_data.resize(size);
643 }
644
647
653
656 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
657 {
658 check_template_parameters();
659 resize(0, 0);
660 }
661
664 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
665 {
666 check_template_parameters();
667 resize(rows, cols);
668 }
669
671 template<typename OtherDerived>
673 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
674 {
675 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
676 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
677 check_template_parameters();
678 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
679 if (needToTranspose)
680 *this = other.derived();
681 else
682 {
683 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
684 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
685 #endif
686 internal::call_assignment_no_alias(*this, other.derived());
687 }
688 }
689
691 template<typename OtherDerived, unsigned int UpLo>
693 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
694 {
695 check_template_parameters();
696 Base::operator=(other);
697 }
698
700 inline SparseMatrix(const SparseMatrix& other)
701 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
702 {
703 check_template_parameters();
704 *this = other.derived();
705 }
706
708 template<typename OtherDerived>
709 SparseMatrix(const ReturnByValue<OtherDerived>& other)
710 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
711 {
712 check_template_parameters();
713 initAssignment(other);
714 other.evalTo(*this);
715 }
716
718 template<typename OtherDerived>
719 explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
720 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
721 {
722 check_template_parameters();
723 *this = other.derived();
724 }
725
728 inline void swap(SparseMatrix& other)
729 {
730 //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
731 std::swap(m_outerIndex, other.m_outerIndex);
732 std::swap(m_innerSize, other.m_innerSize);
733 std::swap(m_outerSize, other.m_outerSize);
734 std::swap(m_innerNonZeros, other.m_innerNonZeros);
735 m_data.swap(other.m_data);
736 }
737
740 inline void setIdentity()
741 {
742 eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
743 this->m_data.resize(rows());
744 Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
745 Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
746 Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
747 std::free(m_innerNonZeros);
748 m_innerNonZeros = 0;
749 }
750 inline SparseMatrix& operator=(const SparseMatrix& other)
751 {
752 if (other.isRValue())
753 {
754 swap(other.const_cast_derived());
755 }
756 else if(this!=&other)
757 {
758 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
759 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
760 #endif
761 initAssignment(other);
762 if(other.isCompressed())
763 {
764 internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
765 m_data = other.m_data;
766 }
767 else
768 {
769 Base::operator=(other);
770 }
771 }
772 return *this;
773 }
774
775#ifndef EIGEN_PARSED_BY_DOXYGEN
776 template<typename OtherDerived>
777 inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
778 { return Base::operator=(other.derived()); }
779#endif // EIGEN_PARSED_BY_DOXYGEN
780
781 template<typename OtherDerived>
782 EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
783
784 friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
785 {
786 EIGEN_DBG_SPARSE(
787 s << "Nonzero entries:\n";
788 if(m.isCompressed())
789 for (Index i=0; i<m.nonZeros(); ++i)
790 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
791 else
792 for (Index i=0; i<m.outerSize(); ++i)
793 {
794 Index p = m.m_outerIndex[i];
795 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
796 Index k=p;
797 for (; k<pe; ++k)
798 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
799 for (; k<m.m_outerIndex[i+1]; ++k)
800 s << "(_,_) ";
801 }
802 s << std::endl;
803 s << std::endl;
804 s << "Outer pointers:\n";
805 for (Index i=0; i<m.outerSize(); ++i)
806 s << m.m_outerIndex[i] << " ";
807 s << " $" << std::endl;
808 if(!m.isCompressed())
809 {
810 s << "Inner non zeros:\n";
811 for (Index i=0; i<m.outerSize(); ++i)
812 s << m.m_innerNonZeros[i] << " ";
813 s << " $" << std::endl;
814 }
815 s << std::endl;
816 );
817 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
818 return s;
819 }
820
823 {
824 std::free(m_outerIndex);
825 std::free(m_innerNonZeros);
826 }
827
829 Scalar sum() const;
830
831# ifdef EIGEN_SPARSEMATRIX_PLUGIN
832# include EIGEN_SPARSEMATRIX_PLUGIN
833# endif
834
835protected:
836
837 template<typename Other>
838 void initAssignment(const Other& other)
839 {
840 resize(other.rows(), other.cols());
841 if(m_innerNonZeros)
842 {
843 std::free(m_innerNonZeros);
844 m_innerNonZeros = 0;
845 }
846 }
847
850 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
851
854 class SingletonVector
855 {
856 StorageIndex m_index;
857 StorageIndex m_value;
858 public:
859 typedef StorageIndex value_type;
860 SingletonVector(Index i, Index v)
861 : m_index(convert_index(i)), m_value(convert_index(v))
862 {}
863
864 StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
865 };
866
869 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
870
871public:
874 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
875 {
876 const Index outer = IsRowMajor ? row : col;
877 const Index inner = IsRowMajor ? col : row;
878
879 eigen_assert(!isCompressed());
880 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
881
882 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
883 m_data.index(p) = convert_index(inner);
884 return (m_data.value(p) = 0);
885 }
886
887private:
888 static void check_template_parameters()
889 {
890 EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
891 EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
892 }
893
894 struct default_prunning_func {
895 default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
896 inline bool operator() (const Index&, const Index&, const Scalar& value) const
897 {
898 return !internal::isMuchSmallerThan(value, reference, epsilon);
899 }
900 Scalar reference;
901 RealScalar epsilon;
902 };
903};
904
905namespace internal {
906
907template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
908void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
909{
910 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
911 typedef typename SparseMatrixType::Scalar Scalar;
912 typedef typename SparseMatrixType::StorageIndex StorageIndex;
913 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
914
915 if(begin!=end)
916 {
917 // pass 1: count the nnz per inner-vector
918 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
919 wi.setZero();
920 for(InputIterator it(begin); it!=end; ++it)
921 {
922 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
923 wi(IsRowMajor ? it->col() : it->row())++;
924 }
925
926 // pass 2: insert all the elements into trMat
927 trMat.reserve(wi);
928 for(InputIterator it(begin); it!=end; ++it)
929 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
930
931 // pass 3:
932 trMat.collapseDuplicates(dup_func);
933 }
934
935 // pass 4: transposed copy -> implicit sorting
936 mat = trMat;
937}
938
939}
940
941
979template<typename Scalar, int _Options, typename _Index>
980template<typename InputIterators>
981void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
982{
983 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
984}
985
995template<typename Scalar, int _Options, typename _Index>
996template<typename InputIterators,typename DupFunctor>
997void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
998{
999 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index>, DupFunctor>(begin, end, *this, dup_func);
1000}
1001
1003template<typename Scalar, int _Options, typename _Index>
1004template<typename DupFunctor>
1006{
1007 eigen_assert(!isCompressed());
1008 // TODO, in practice we should be able to use m_innerNonZeros for that task
1009 IndexVector wi(innerSize());
1010 wi.fill(-1);
1011 StorageIndex count = 0;
1012 // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1013 for(Index j=0; j<outerSize(); ++j)
1014 {
1015 StorageIndex start = count;
1016 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1017 for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1018 {
1019 Index i = m_data.index(k);
1020 if(wi(i)>=start)
1021 {
1022 // we already meet this entry => accumulate it
1023 m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1024 }
1025 else
1026 {
1027 m_data.value(count) = m_data.value(k);
1028 m_data.index(count) = m_data.index(k);
1029 wi(i) = count;
1030 ++count;
1031 }
1032 }
1033 m_outerIndex[j] = start;
1034 }
1035 m_outerIndex[m_outerSize] = count;
1036
1037 // turn the matrix into compressed form
1038 std::free(m_innerNonZeros);
1039 m_innerNonZeros = 0;
1040 m_data.resize(m_outerIndex[m_outerSize]);
1041}
1042
1043template<typename Scalar, int _Options, typename _Index>
1044template<typename OtherDerived>
1045EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
1046{
1047 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1048 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1049
1050 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1051 EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1052 #endif
1053
1054 const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1055 if (needToTranspose)
1056 {
1057 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1058 EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1059 #endif
1060 // two passes algorithm:
1061 // 1 - compute the number of coeffs per dest inner vector
1062 // 2 - do the actual copy/eval
1063 // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1064 typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1065 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1066 typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1067 OtherCopy otherCopy(other.derived());
1068 OtherCopyEval otherCopyEval(otherCopy);
1069
1070 SparseMatrix dest(other.rows(),other.cols());
1071 Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1072
1073 // pass 1
1074 // FIXME the above copy could be merged with that pass
1075 for (Index j=0; j<otherCopy.outerSize(); ++j)
1076 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1077 ++dest.m_outerIndex[it.index()];
1078
1079 // prefix sum
1080 StorageIndex count = 0;
1081 IndexVector positions(dest.outerSize());
1082 for (Index j=0; j<dest.outerSize(); ++j)
1083 {
1084 StorageIndex tmp = dest.m_outerIndex[j];
1085 dest.m_outerIndex[j] = count;
1086 positions[j] = count;
1087 count += tmp;
1088 }
1089 dest.m_outerIndex[dest.outerSize()] = count;
1090 // alloc
1091 dest.m_data.resize(count);
1092 // pass 2
1093 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1094 {
1095 for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1096 {
1097 Index pos = positions[it.index()]++;
1098 dest.m_data.index(pos) = j;
1099 dest.m_data.value(pos) = it.value();
1100 }
1101 }
1102 this->swap(dest);
1103 return *this;
1104 }
1105 else
1106 {
1107 if(other.isRValue())
1108 {
1109 initAssignment(other.derived());
1110 }
1111 // there is no special optimization
1112 return Base::operator=(other.derived());
1113 }
1114}
1115
1116template<typename _Scalar, int _Options, typename _Index>
1117typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insert(Index row, Index col)
1118{
1119 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1120
1121 const Index outer = IsRowMajor ? row : col;
1122 const Index inner = IsRowMajor ? col : row;
1123
1124 if(isCompressed())
1125 {
1126 if(nonZeros()==0)
1127 {
1128 // reserve space if not already done
1129 if(m_data.allocatedSize()==0)
1130 m_data.reserve(2*m_innerSize);
1131
1132 // turn the matrix into non-compressed mode
1133 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1134 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1135
1136 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1137
1138 // pack all inner-vectors to the end of the pre-allocated space
1139 // and allocate the entire free-space to the first inner-vector
1140 StorageIndex end = convert_index(m_data.allocatedSize());
1141 for(Index j=1; j<=m_outerSize; ++j)
1142 m_outerIndex[j] = end;
1143 }
1144 else
1145 {
1146 // turn the matrix into non-compressed mode
1147 m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1148 if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1149 for(Index j=0; j<m_outerSize; ++j)
1150 m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1151 }
1152 }
1153
1154 // check whether we can do a fast "push back" insertion
1155 Index data_end = m_data.allocatedSize();
1156
1157 // First case: we are filling a new inner vector which is packed at the end.
1158 // We assume that all remaining inner-vectors are also empty and packed to the end.
1159 if(m_outerIndex[outer]==data_end)
1160 {
1161 eigen_internal_assert(m_innerNonZeros[outer]==0);
1162
1163 // pack previous empty inner-vectors to end of the used-space
1164 // and allocate the entire free-space to the current inner-vector.
1165 StorageIndex p = convert_index(m_data.size());
1166 Index j = outer;
1167 while(j>=0 && m_innerNonZeros[j]==0)
1168 m_outerIndex[j--] = p;
1169
1170 // push back the new element
1171 ++m_innerNonZeros[outer];
1172 m_data.append(Scalar(0), inner);
1173
1174 // check for reallocation
1175 if(data_end != m_data.allocatedSize())
1176 {
1177 // m_data has been reallocated
1178 // -> move remaining inner-vectors back to the end of the free-space
1179 // so that the entire free-space is allocated to the current inner-vector.
1180 eigen_internal_assert(data_end < m_data.allocatedSize());
1181 StorageIndex new_end = convert_index(m_data.allocatedSize());
1182 for(Index k=outer+1; k<=m_outerSize; ++k)
1183 if(m_outerIndex[k]==data_end)
1184 m_outerIndex[k] = new_end;
1185 }
1186 return m_data.value(p);
1187 }
1188
1189 // Second case: the next inner-vector is packed to the end
1190 // and the current inner-vector end match the used-space.
1191 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1192 {
1193 eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1194
1195 // add space for the new element
1196 ++m_innerNonZeros[outer];
1197 m_data.resize(m_data.size()+1);
1198
1199 // check for reallocation
1200 if(data_end != m_data.allocatedSize())
1201 {
1202 // m_data has been reallocated
1203 // -> move remaining inner-vectors back to the end of the free-space
1204 // so that the entire free-space is allocated to the current inner-vector.
1205 eigen_internal_assert(data_end < m_data.allocatedSize());
1206 StorageIndex new_end = convert_index(m_data.allocatedSize());
1207 for(Index k=outer+1; k<=m_outerSize; ++k)
1208 if(m_outerIndex[k]==data_end)
1209 m_outerIndex[k] = new_end;
1210 }
1211
1212 // and insert it at the right position (sorted insertion)
1213 Index startId = m_outerIndex[outer];
1214 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1215 while ( (p > startId) && (m_data.index(p-1) > inner) )
1216 {
1217 m_data.index(p) = m_data.index(p-1);
1218 m_data.value(p) = m_data.value(p-1);
1219 --p;
1220 }
1221
1222 m_data.index(p) = convert_index(inner);
1223 return (m_data.value(p) = 0);
1224 }
1225
1226 if(m_data.size() != m_data.allocatedSize())
1227 {
1228 // make sure the matrix is compatible to random un-compressed insertion:
1229 m_data.resize(m_data.allocatedSize());
1230 this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1231 }
1232
1233 return insertUncompressed(row,col);
1234}
1235
1236template<typename _Scalar, int _Options, typename _Index>
1237EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
1238{
1239 eigen_assert(!isCompressed());
1240
1241 const Index outer = IsRowMajor ? row : col;
1242 const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1243
1244 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1245 StorageIndex innerNNZ = m_innerNonZeros[outer];
1246 if(innerNNZ>=room)
1247 {
1248 // this inner vector is full, we need to reallocate the whole buffer :(
1249 reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1250 }
1251
1252 Index startId = m_outerIndex[outer];
1253 Index p = startId + m_innerNonZeros[outer];
1254 while ( (p > startId) && (m_data.index(p-1) > inner) )
1255 {
1256 m_data.index(p) = m_data.index(p-1);
1257 m_data.value(p) = m_data.value(p-1);
1258 --p;
1259 }
1260 eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1261
1262 m_innerNonZeros[outer]++;
1263
1264 m_data.index(p) = inner;
1265 return (m_data.value(p) = 0);
1266}
1267
1268template<typename _Scalar, int _Options, typename _Index>
1269EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
1270{
1271 eigen_assert(isCompressed());
1272
1273 const Index outer = IsRowMajor ? row : col;
1274 const Index inner = IsRowMajor ? col : row;
1275
1276 Index previousOuter = outer;
1277 if (m_outerIndex[outer+1]==0)
1278 {
1279 // we start a new inner vector
1280 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1281 {
1282 m_outerIndex[previousOuter] = convert_index(m_data.size());
1283 --previousOuter;
1284 }
1285 m_outerIndex[outer+1] = m_outerIndex[outer];
1286 }
1287
1288 // here we have to handle the tricky case where the outerIndex array
1289 // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1290 // the 2nd inner vector...
1291 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1292 && (size_t(m_outerIndex[outer+1]) == m_data.size());
1293
1294 size_t startId = m_outerIndex[outer];
1295 // FIXME let's make sure sizeof(long int) == sizeof(size_t)
1296 size_t p = m_outerIndex[outer+1];
1297 ++m_outerIndex[outer+1];
1298
1299 double reallocRatio = 1;
1300 if (m_data.allocatedSize()<=m_data.size())
1301 {
1302 // if there is no preallocated memory, let's reserve a minimum of 32 elements
1303 if (m_data.size()==0)
1304 {
1305 m_data.reserve(32);
1306 }
1307 else
1308 {
1309 // we need to reallocate the data, to reduce multiple reallocations
1310 // we use a smart resize algorithm based on the current filling ratio
1311 // in addition, we use double to avoid integers overflows
1312 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1313 reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1314 // furthermore we bound the realloc ratio to:
1315 // 1) reduce multiple minor realloc when the matrix is almost filled
1316 // 2) avoid to allocate too much memory when the matrix is almost empty
1317 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1318 }
1319 }
1320 m_data.resize(m_data.size()+1,reallocRatio);
1321
1322 if (!isLastVec)
1323 {
1324 if (previousOuter==-1)
1325 {
1326 // oops wrong guess.
1327 // let's correct the outer offsets
1328 for (Index k=0; k<=(outer+1); ++k)
1329 m_outerIndex[k] = 0;
1330 Index k=outer+1;
1331 while(m_outerIndex[k]==0)
1332 m_outerIndex[k++] = 1;
1333 while (k<=m_outerSize && m_outerIndex[k]!=0)
1334 m_outerIndex[k++]++;
1335 p = 0;
1336 --k;
1337 k = m_outerIndex[k]-1;
1338 while (k>0)
1339 {
1340 m_data.index(k) = m_data.index(k-1);
1341 m_data.value(k) = m_data.value(k-1);
1342 k--;
1343 }
1344 }
1345 else
1346 {
1347 // we are not inserting into the last inner vec
1348 // update outer indices:
1349 Index j = outer+2;
1350 while (j<=m_outerSize && m_outerIndex[j]!=0)
1351 m_outerIndex[j++]++;
1352 --j;
1353 // shift data of last vecs:
1354 Index k = m_outerIndex[j]-1;
1355 while (k>=Index(p))
1356 {
1357 m_data.index(k) = m_data.index(k-1);
1358 m_data.value(k) = m_data.value(k-1);
1359 k--;
1360 }
1361 }
1362 }
1363
1364 while ( (p > startId) && (m_data.index(p-1) > inner) )
1365 {
1366 m_data.index(p) = m_data.index(p-1);
1367 m_data.value(p) = m_data.value(p-1);
1368 --p;
1369 }
1370
1371 m_data.index(p) = inner;
1372 return (m_data.value(p) = 0);
1373}
1374
1375namespace internal {
1376
1377template<typename _Scalar, int _Options, typename _Index>
1378struct evaluator<SparseMatrix<_Scalar,_Options,_Index> >
1379 : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_Index> > >
1380{
1381 typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_Index> > > Base;
1382 typedef SparseMatrix<_Scalar,_Options,_Index> SparseMatrixType;
1383 evaluator() : Base() {}
1384 explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1385};
1386
1387}
1388
1389} // end namespace Eigen
1390
1391#endif // EIGEN_SPARSEMATRIX_H
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:47
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:65
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:90
Sparse matrix.
Definition: MappedSparseMatrix.h:34
Common base class for sparse [compressed]-{row|column}-storage format.
Definition: SparseCompressedBase.h:38
Index nonZeros() const
Definition: SparseCompressedBase.h:56
bool isCompressed() const
Definition: SparseCompressedBase.h:107
Base class of any sparse matrices or sparse expressions.
Definition: SparseMatrixBase.h:28
Index size() const
Definition: SparseMatrixBase.h:172
@ Flags
Definition: SparseMatrixBase.h:86
A versatible sparse matrix representation.
Definition: SparseMatrix.h:94
Scalar sum() const
Definition: SparseRedux.h:30
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:672
void setFromTriplets(const InputIterators &begin, const InputIterators &end, DupFunctor dup_func)
Definition: SparseMatrix.h:997
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:709
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:162
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:157
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:171
~SparseMatrix()
Definition: SparseMatrix.h:822
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:549
void setIdentity()
Definition: SparseMatrix.h:740
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:153
const Scalar * valuePtr() const
Definition: SparseMatrix.h:144
void uncompress()
Definition: SparseMatrix.h:491
Scalar * valuePtr()
Definition: SparseMatrix.h:148
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:719
void reserve(Index reserveSize)
Definition: SparseMatrix.h:258
SparseMatrix()
Definition: SparseMatrix.h:655
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:184
Index rows() const
Definition: SparseMatrix.h:132
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:166
bool isCompressed() const
Definition: SparseCompressedBase.h:107
void reserve(const SizesType &reserveSizes)
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:700
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:617
Index cols() const
Definition: SparseMatrix.h:134
Index innerSize() const
Definition: SparseMatrix.h:137
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:692
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:646
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:516
Index outerSize() const
Definition: SparseMatrix.h:139
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:728
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:175
void setZero()
Definition: SparseMatrix.h:247
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:663
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:981
void makeCompressed()
Definition: SparseMatrix.h:460
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:503
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:202
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:652
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:1117
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:45
a sparse vector class
Definition: SparseVector.h:66
@ ColMajor
Definition: Constants.h:320
@ RowMajor
Definition: Constants.h:322
const unsigned int LvalueBit
Definition: Constants.h:139
const unsigned int RowMajorBit
Definition: Constants.h:61
const unsigned int CompressedAccessBit
Definition: Constants.h:186
Namespace containing all symbols from the Eigen library.
Definition: Core:287
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
const int Dynamic
Definition: Constants.h:21
Derived & derived()
Definition: EigenBase.h:44
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition: NumTraits.h:151