xref: /aosp_15_r20/external/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h (revision bf2c37156dfe67e5dfebd6d394bad8b2ab5804d4)
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2009-2014 Gael Guennebaud <[email protected]>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
11 #define EIGEN_SPARSE_SELFADJOINTVIEW_H
12 
13 namespace Eigen {
14 
15 /** \ingroup SparseCore_Module
16   * \class SparseSelfAdjointView
17   *
18   * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
19   *
20   * \param MatrixType the type of the dense matrix storing the coefficients
21   * \param Mode can be either \c #Lower or \c #Upper
22   *
23   * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
24   * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
25   * and most of the time this is the only way that it is used.
26   *
27   * \sa SparseMatrixBase::selfadjointView()
28   */
29 namespace internal {
30 
31 template<typename MatrixType, unsigned int Mode>
32 struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
33 };
34 
35 template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
36 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
37 
38 template<int Mode,typename MatrixType,int DestOrder>
39 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
40 
41 }
42 
43 template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
44   : public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
45 {
46   public:
47 
48     enum {
49       Mode = _Mode,
50       TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0),
51       RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
52       ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
53     };
54 
55     typedef EigenBase<SparseSelfAdjointView> Base;
56     typedef typename MatrixType::Scalar Scalar;
57     typedef typename MatrixType::StorageIndex StorageIndex;
58     typedef Matrix<StorageIndex,Dynamic,1> VectorI;
59     typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
60     typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
61 
62     explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
63     {
64       eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
65     }
66 
67     inline Index rows() const { return m_matrix.rows(); }
68     inline Index cols() const { return m_matrix.cols(); }
69 
70     /** \internal \returns a reference to the nested matrix */
71     const _MatrixTypeNested& matrix() const { return m_matrix; }
72     typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }
73 
74     /** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs.
75       *
76       * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
77       * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
78       */
79     template<typename OtherDerived>
80     Product<SparseSelfAdjointView, OtherDerived>
81     operator*(const SparseMatrixBase<OtherDerived>& rhs) const
82     {
83       return Product<SparseSelfAdjointView, OtherDerived>(*this, rhs.derived());
84     }
85 
86     /** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs.
87       *
88       * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
89       * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
90       */
91     template<typename OtherDerived> friend
92     Product<OtherDerived, SparseSelfAdjointView>
93     operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
94     {
95       return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);
96     }
97 
98     /** Efficient sparse self-adjoint matrix times dense vector/matrix product */
99     template<typename OtherDerived>
100     Product<SparseSelfAdjointView,OtherDerived>
101     operator*(const MatrixBase<OtherDerived>& rhs) const
102     {
103       return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
104     }
105 
106     /** Efficient dense vector/matrix times sparse self-adjoint matrix product */
107     template<typename OtherDerived> friend
108     Product<OtherDerived,SparseSelfAdjointView>
109     operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
110     {
111       return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
112     }
113 
114     /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
115       * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
116       *
117       * \returns a reference to \c *this
118       *
119       * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
120       * call this function with u.adjoint().
121       */
122     template<typename DerivedU>
123     SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
124 
125     /** \returns an expression of P H P^-1 */
126     // TODO implement twists in a more evaluator friendly fashion
127     SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
128     {
129       return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
130     }
131 
132     template<typename SrcMatrixType,int SrcMode>
133     SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
134     {
135       internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
136       return *this;
137     }
138 
139     SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
140     {
141       PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
142       return *this = src.twistedBy(pnull);
143     }
144 
145     // Since we override the copy-assignment operator, we need to explicitly re-declare the copy-constructor
146     EIGEN_DEFAULT_COPY_CONSTRUCTOR(SparseSelfAdjointView)
147 
148     template<typename SrcMatrixType,unsigned int SrcMode>
149     SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
150     {
151       PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
152       return *this = src.twistedBy(pnull);
153     }
154 
155     void resize(Index rows, Index cols)
156     {
157       EIGEN_ONLY_USED_FOR_DEBUG(rows);
158       EIGEN_ONLY_USED_FOR_DEBUG(cols);
159       eigen_assert(rows == this->rows() && cols == this->cols()
160                 && "SparseSelfadjointView::resize() does not actually allow to resize.");
161     }
162 
163   protected:
164 
165     MatrixTypeNested m_matrix;
166     //mutable VectorI m_countPerRow;
167     //mutable VectorI m_countPerCol;
168   private:
169     template<typename Dest> void evalTo(Dest &) const;
170 };
171 
172 /***************************************************************************
173 * Implementation of SparseMatrixBase methods
174 ***************************************************************************/
175 
176 template<typename Derived>
177 template<unsigned int UpLo>
178 typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
179 {
180   return SparseSelfAdjointView<const Derived, UpLo>(derived());
181 }
182 
183 template<typename Derived>
184 template<unsigned int UpLo>
185 typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
186 {
187   return SparseSelfAdjointView<Derived, UpLo>(derived());
188 }
189 
190 /***************************************************************************
191 * Implementation of SparseSelfAdjointView methods
192 ***************************************************************************/
193 
194 template<typename MatrixType, unsigned int Mode>
195 template<typename DerivedU>
196 SparseSelfAdjointView<MatrixType,Mode>&
197 SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
198 {
199   SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
200   if(alpha==Scalar(0))
201     m_matrix = tmp.template triangularView<Mode>();
202   else
203     m_matrix += alpha * tmp.template triangularView<Mode>();
204 
205   return *this;
206 }
207 
208 namespace internal {
209 
210 // TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
211 //      in the future selfadjoint-ness should be defined by the expression traits
212 //      such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
213 template<typename MatrixType, unsigned int Mode>
214 struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
215 {
216   typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
217   typedef SparseSelfAdjointShape Shape;
218 };
219 
220 struct SparseSelfAdjoint2Sparse {};
221 
222 template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
223 template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
224 
225 template< typename DstXprType, typename SrcXprType, typename Functor>
226 struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
227 {
228   typedef typename DstXprType::StorageIndex StorageIndex;
229   typedef internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> AssignOpType;
230 
231   template<typename DestScalar,int StorageOrder>
232   static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&/*func*/)
233   {
234     internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
235   }
236 
237   // FIXME: the handling of += and -= in sparse matrices should be cleanup so that next two overloads could be reduced to:
238   template<typename DestScalar,int StorageOrder,typename AssignFunc>
239   static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignFunc& func)
240   {
241     SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
242     run(tmp, src, AssignOpType());
243     call_assignment_no_alias_no_transpose(dst, tmp, func);
244   }
245 
246   template<typename DestScalar,int StorageOrder>
247   static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
248                   const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
249   {
250     SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
251     run(tmp, src, AssignOpType());
252     dst += tmp;
253   }
254 
255   template<typename DestScalar,int StorageOrder>
256   static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
257                   const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
258   {
259     SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
260     run(tmp, src, AssignOpType());
261     dst -= tmp;
262   }
263 
264   template<typename DestScalar>
265   static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&/*func*/)
266   {
267     // TODO directly evaluate into dst;
268     SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
269     internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
270     dst = tmp;
271   }
272 };
273 
274 } // end namespace internal
275 
276 /***************************************************************************
277 * Implementation of sparse self-adjoint time dense matrix
278 ***************************************************************************/
279 
280 namespace internal {
281 
282 template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
283 inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
284 {
285   EIGEN_ONLY_USED_FOR_DEBUG(alpha);
286 
287   typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
288   typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
289   typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
290   typedef typename LhsEval::InnerIterator LhsIterator;
291   typedef typename SparseLhsType::Scalar LhsScalar;
292 
293   enum {
294     LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
295     ProcessFirstHalf =
296               ((Mode&(Upper|Lower))==(Upper|Lower))
297           || ( (Mode&Upper) && !LhsIsRowMajor)
298           || ( (Mode&Lower) && LhsIsRowMajor),
299     ProcessSecondHalf = !ProcessFirstHalf
300   };
301 
302   SparseLhsTypeNested lhs_nested(lhs);
303   LhsEval lhsEval(lhs_nested);
304 
305   // work on one column at once
306   for (Index k=0; k<rhs.cols(); ++k)
307   {
308     for (Index j=0; j<lhs.outerSize(); ++j)
309     {
310       LhsIterator i(lhsEval,j);
311       // handle diagonal coeff
312       if (ProcessSecondHalf)
313       {
314         while (i && i.index()<j) ++i;
315         if(i && i.index()==j)
316         {
317           res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
318           ++i;
319         }
320       }
321 
322       // premultiplied rhs for scatters
323       typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
324       // accumulator for partial scalar product
325       typename DenseResType::Scalar res_j(0);
326       for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
327       {
328         LhsScalar lhs_ij = i.value();
329         if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
330         res_j += lhs_ij * rhs.coeff(i.index(),k);
331         res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
332       }
333       res.coeffRef(j,k) += alpha * res_j;
334 
335       // handle diagonal coeff
336       if (ProcessFirstHalf && i && (i.index()==j))
337         res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
338     }
339   }
340 }
341 
342 
343 template<typename LhsView, typename Rhs, int ProductType>
344 struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
345 : generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
346 {
347   template<typename Dest>
348   static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
349   {
350     typedef typename LhsView::_MatrixTypeNested Lhs;
351     typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
352     typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
353     LhsNested lhsNested(lhsView.matrix());
354     RhsNested rhsNested(rhs);
355 
356     internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
357   }
358 };
359 
360 template<typename Lhs, typename RhsView, int ProductType>
361 struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
362 : generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
363 {
364   template<typename Dest>
365   static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
366   {
367     typedef typename RhsView::_MatrixTypeNested Rhs;
368     typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
369     typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
370     LhsNested lhsNested(lhs);
371     RhsNested rhsNested(rhsView.matrix());
372 
373     // transpose everything
374     Transpose<Dest> dstT(dst);
375     internal::sparse_selfadjoint_time_dense_product<RhsView::TransposeMode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
376   }
377 };
378 
379 // NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
380 // TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
381 
382 template<typename LhsView, typename Rhs, int ProductTag>
383 struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
384   : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
385 {
386   typedef Product<LhsView, Rhs, DefaultProduct> XprType;
387   typedef typename XprType::PlainObject PlainObject;
388   typedef evaluator<PlainObject> Base;
389 
390   product_evaluator(const XprType& xpr)
391     : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
392   {
393     ::new (static_cast<Base*>(this)) Base(m_result);
394     generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
395   }
396 
397 protected:
398   typename Rhs::PlainObject m_lhs;
399   PlainObject m_result;
400 };
401 
402 template<typename Lhs, typename RhsView, int ProductTag>
403 struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
404   : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
405 {
406   typedef Product<Lhs, RhsView, DefaultProduct> XprType;
407   typedef typename XprType::PlainObject PlainObject;
408   typedef evaluator<PlainObject> Base;
409 
410   product_evaluator(const XprType& xpr)
411     : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
412   {
413     ::new (static_cast<Base*>(this)) Base(m_result);
414     generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
415   }
416 
417 protected:
418   typename Lhs::PlainObject m_rhs;
419   PlainObject m_result;
420 };
421 
422 } // namespace internal
423 
424 /***************************************************************************
425 * Implementation of symmetric copies and permutations
426 ***************************************************************************/
427 namespace internal {
428 
429 template<int Mode,typename MatrixType,int DestOrder>
430 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
431 {
432   typedef typename MatrixType::StorageIndex StorageIndex;
433   typedef typename MatrixType::Scalar Scalar;
434   typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
435   typedef Matrix<StorageIndex,Dynamic,1> VectorI;
436   typedef evaluator<MatrixType> MatEval;
437   typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
438 
439   MatEval matEval(mat);
440   Dest& dest(_dest.derived());
441   enum {
442     StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
443   };
444 
445   Index size = mat.rows();
446   VectorI count;
447   count.resize(size);
448   count.setZero();
449   dest.resize(size,size);
450   for(Index j = 0; j<size; ++j)
451   {
452     Index jp = perm ? perm[j] : j;
453     for(MatIterator it(matEval,j); it; ++it)
454     {
455       Index i = it.index();
456       Index r = it.row();
457       Index c = it.col();
458       Index ip = perm ? perm[i] : i;
459       if(Mode==int(Upper|Lower))
460         count[StorageOrderMatch ? jp : ip]++;
461       else if(r==c)
462         count[ip]++;
463       else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
464       {
465         count[ip]++;
466         count[jp]++;
467       }
468     }
469   }
470   Index nnz = count.sum();
471 
472   // reserve space
473   dest.resizeNonZeros(nnz);
474   dest.outerIndexPtr()[0] = 0;
475   for(Index j=0; j<size; ++j)
476     dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
477   for(Index j=0; j<size; ++j)
478     count[j] = dest.outerIndexPtr()[j];
479 
480   // copy data
481   for(StorageIndex j = 0; j<size; ++j)
482   {
483     for(MatIterator it(matEval,j); it; ++it)
484     {
485       StorageIndex i = internal::convert_index<StorageIndex>(it.index());
486       Index r = it.row();
487       Index c = it.col();
488 
489       StorageIndex jp = perm ? perm[j] : j;
490       StorageIndex ip = perm ? perm[i] : i;
491 
492       if(Mode==int(Upper|Lower))
493       {
494         Index k = count[StorageOrderMatch ? jp : ip]++;
495         dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
496         dest.valuePtr()[k] = it.value();
497       }
498       else if(r==c)
499       {
500         Index k = count[ip]++;
501         dest.innerIndexPtr()[k] = ip;
502         dest.valuePtr()[k] = it.value();
503       }
504       else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
505       {
506         if(!StorageOrderMatch)
507           std::swap(ip,jp);
508         Index k = count[jp]++;
509         dest.innerIndexPtr()[k] = ip;
510         dest.valuePtr()[k] = it.value();
511         k = count[ip]++;
512         dest.innerIndexPtr()[k] = jp;
513         dest.valuePtr()[k] = numext::conj(it.value());
514       }
515     }
516   }
517 }
518 
519 template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
520 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
521 {
522   typedef typename MatrixType::StorageIndex StorageIndex;
523   typedef typename MatrixType::Scalar Scalar;
524   SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
525   typedef Matrix<StorageIndex,Dynamic,1> VectorI;
526   typedef evaluator<MatrixType> MatEval;
527   typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
528 
529   enum {
530     SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
531     StorageOrderMatch = int(SrcOrder) == int(DstOrder),
532     DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
533     SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
534   };
535 
536   MatEval matEval(mat);
537 
538   Index size = mat.rows();
539   VectorI count(size);
540   count.setZero();
541   dest.resize(size,size);
542   for(StorageIndex j = 0; j<size; ++j)
543   {
544     StorageIndex jp = perm ? perm[j] : j;
545     for(MatIterator it(matEval,j); it; ++it)
546     {
547       StorageIndex i = it.index();
548       if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
549         continue;
550 
551       StorageIndex ip = perm ? perm[i] : i;
552       count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
553     }
554   }
555   dest.outerIndexPtr()[0] = 0;
556   for(Index j=0; j<size; ++j)
557     dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
558   dest.resizeNonZeros(dest.outerIndexPtr()[size]);
559   for(Index j=0; j<size; ++j)
560     count[j] = dest.outerIndexPtr()[j];
561 
562   for(StorageIndex j = 0; j<size; ++j)
563   {
564 
565     for(MatIterator it(matEval,j); it; ++it)
566     {
567       StorageIndex i = it.index();
568       if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
569         continue;
570 
571       StorageIndex jp = perm ? perm[j] : j;
572       StorageIndex ip = perm? perm[i] : i;
573 
574       Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
575       dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
576 
577       if(!StorageOrderMatch) std::swap(ip,jp);
578       if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
579         dest.valuePtr()[k] = numext::conj(it.value());
580       else
581         dest.valuePtr()[k] = it.value();
582     }
583   }
584 }
585 
586 }
587 
588 // TODO implement twists in a more evaluator friendly fashion
589 
590 namespace internal {
591 
592 template<typename MatrixType, int Mode>
593 struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
594 };
595 
596 }
597 
598 template<typename MatrixType,int Mode>
599 class SparseSymmetricPermutationProduct
600   : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
601 {
602   public:
603     typedef typename MatrixType::Scalar Scalar;
604     typedef typename MatrixType::StorageIndex StorageIndex;
605     enum {
606       RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,
607       ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime
608     };
609   protected:
610     typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;
611   public:
612     typedef Matrix<StorageIndex,Dynamic,1> VectorI;
613     typedef typename MatrixType::Nested MatrixTypeNested;
614     typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
615 
616     SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
617       : m_matrix(mat), m_perm(perm)
618     {}
619 
620     inline Index rows() const { return m_matrix.rows(); }
621     inline Index cols() const { return m_matrix.cols(); }
622 
623     const NestedExpression& matrix() const { return m_matrix; }
624     const Perm& perm() const { return m_perm; }
625 
626   protected:
627     MatrixTypeNested m_matrix;
628     const Perm& m_perm;
629 
630 };
631 
632 namespace internal {
633 
634 template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
635 struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
636 {
637   typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
638   typedef typename DstXprType::StorageIndex DstIndex;
639   template<int Options>
640   static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
641   {
642     // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
643     SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
644     internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
645     dst = tmp;
646   }
647 
648   template<typename DestType,unsigned int DestMode>
649   static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
650   {
651     internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
652   }
653 };
654 
655 } // end namespace internal
656 
657 } // end namespace Eigen
658 
659 #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
660