xref: /aosp_15_r20/external/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h (revision bf2c37156dfe67e5dfebd6d394bad8b2ab5804d4)
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2010 Gael Guennebaud <[email protected]>
5 // Copyright (C) 2010 Jitse Niesen <[email protected]>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #ifndef EIGEN_SELFADJOINTEIGENSOLVER_H
12 #define EIGEN_SELFADJOINTEIGENSOLVER_H
13 
14 #include "./Tridiagonalization.h"
15 
16 namespace Eigen {
17 
18 template<typename _MatrixType>
19 class GeneralizedSelfAdjointEigenSolver;
20 
21 namespace internal {
22 template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues;
23 
24 template<typename MatrixType, typename DiagType, typename SubDiagType>
25 EIGEN_DEVICE_FUNC
26 ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec);
27 }
28 
29 /** \eigenvalues_module \ingroup Eigenvalues_Module
30   *
31   *
32   * \class SelfAdjointEigenSolver
33   *
34   * \brief Computes eigenvalues and eigenvectors of selfadjoint matrices
35   *
36   * \tparam _MatrixType the type of the matrix of which we are computing the
37   * eigendecomposition; this is expected to be an instantiation of the Matrix
38   * class template.
39   *
40   * A matrix \f$ A \f$ is selfadjoint if it equals its adjoint. For real
41   * matrices, this means that the matrix is symmetric: it equals its
42   * transpose. This class computes the eigenvalues and eigenvectors of a
43   * selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors
44   * \f$ v \f$ such that \f$ Av = \lambda v \f$.  The eigenvalues of a
45   * selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with
46   * the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the
47   * eigenvectors as its columns, then \f$ A = V D V^{-1} \f$. This is called the
48   * eigendecomposition.
49   *
50   * For a selfadjoint matrix, \f$ V \f$ is unitary, meaning its inverse is equal
51   * to its adjoint, \f$ V^{-1} = V^{\dagger} \f$. If \f$ A \f$ is real, then
52   * \f$ V \f$ is also real and therefore orthogonal, meaning its inverse is
53   * equal to its transpose, \f$ V^{-1} = V^T \f$.
54   *
55   * The algorithm exploits the fact that the matrix is selfadjoint, making it
56   * faster and more accurate than the general purpose eigenvalue algorithms
57   * implemented in EigenSolver and ComplexEigenSolver.
58   *
59   * Only the \b lower \b triangular \b part of the input matrix is referenced.
60   *
61   * Call the function compute() to compute the eigenvalues and eigenvectors of
62   * a given matrix. Alternatively, you can use the
63   * SelfAdjointEigenSolver(const MatrixType&, int) constructor which computes
64   * the eigenvalues and eigenvectors at construction time. Once the eigenvalue
65   * and eigenvectors are computed, they can be retrieved with the eigenvalues()
66   * and eigenvectors() functions.
67   *
68   * The documentation for SelfAdjointEigenSolver(const MatrixType&, int)
69   * contains an example of the typical use of this class.
70   *
71   * To solve the \em generalized eigenvalue problem \f$ Av = \lambda Bv \f$ and
72   * the likes, see the class GeneralizedSelfAdjointEigenSolver.
73   *
74   * \sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver
75   */
76 template<typename _MatrixType> class SelfAdjointEigenSolver
77 {
78   public:
79 
80     typedef _MatrixType MatrixType;
81     enum {
82       Size = MatrixType::RowsAtCompileTime,
83       ColsAtCompileTime = MatrixType::ColsAtCompileTime,
84       Options = MatrixType::Options,
85       MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
86     };
87 
88     /** \brief Scalar type for matrices of type \p _MatrixType. */
89     typedef typename MatrixType::Scalar Scalar;
90     typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
91 
92     typedef Matrix<Scalar,Size,Size,ColMajor,MaxColsAtCompileTime,MaxColsAtCompileTime> EigenvectorsType;
93 
94     /** \brief Real scalar type for \p _MatrixType.
95       *
96       * This is just \c Scalar if #Scalar is real (e.g., \c float or
97       * \c double), and the type of the real part of \c Scalar if #Scalar is
98       * complex.
99       */
100     typedef typename NumTraits<Scalar>::Real RealScalar;
101 
102     friend struct internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>;
103 
104     /** \brief Type for vector of eigenvalues as returned by eigenvalues().
105       *
106       * This is a column vector with entries of type #RealScalar.
107       * The length of the vector is the size of \p _MatrixType.
108       */
109     typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVectorType;
110     typedef Tridiagonalization<MatrixType> TridiagonalizationType;
111     typedef typename TridiagonalizationType::SubDiagonalType SubDiagonalType;
112 
113     /** \brief Default constructor for fixed-size matrices.
114       *
115       * The default constructor is useful in cases in which the user intends to
116       * perform decompositions via compute(). This constructor
117       * can only be used if \p _MatrixType is a fixed-size matrix; use
118       * SelfAdjointEigenSolver(Index) for dynamic-size matrices.
119       *
120       * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
121       * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out
122       */
123     EIGEN_DEVICE_FUNC
124     SelfAdjointEigenSolver()
125         : m_eivec(),
126           m_eivalues(),
127           m_subdiag(),
128           m_hcoeffs(),
129           m_info(InvalidInput),
130           m_isInitialized(false),
131           m_eigenvectorsOk(false)
132     { }
133 
134     /** \brief Constructor, pre-allocates memory for dynamic-size matrices.
135       *
136       * \param [in]  size  Positive integer, size of the matrix whose
137       * eigenvalues and eigenvectors will be computed.
138       *
139       * This constructor is useful for dynamic-size matrices, when the user
140       * intends to perform decompositions via compute(). The \p size
141       * parameter is only used as a hint. It is not an error to give a wrong
142       * \p size, but it may impair performance.
143       *
144       * \sa compute() for an example
145       */
146     EIGEN_DEVICE_FUNC
147     explicit SelfAdjointEigenSolver(Index size)
148         : m_eivec(size, size),
149           m_eivalues(size),
150           m_subdiag(size > 1 ? size - 1 : 1),
151           m_hcoeffs(size > 1 ? size - 1 : 1),
152           m_isInitialized(false),
153           m_eigenvectorsOk(false)
154     {}
155 
156     /** \brief Constructor; computes eigendecomposition of given matrix.
157       *
158       * \param[in]  matrix  Selfadjoint matrix whose eigendecomposition is to
159       *    be computed. Only the lower triangular part of the matrix is referenced.
160       * \param[in]  options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
161       *
162       * This constructor calls compute(const MatrixType&, int) to compute the
163       * eigenvalues of the matrix \p matrix. The eigenvectors are computed if
164       * \p options equals #ComputeEigenvectors.
165       *
166       * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp
167       * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out
168       *
169       * \sa compute(const MatrixType&, int)
170       */
171     template<typename InputType>
172     EIGEN_DEVICE_FUNC
173     explicit SelfAdjointEigenSolver(const EigenBase<InputType>& matrix, int options = ComputeEigenvectors)
174       : m_eivec(matrix.rows(), matrix.cols()),
175         m_eivalues(matrix.cols()),
176         m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
177         m_hcoeffs(matrix.cols() > 1 ? matrix.cols() - 1 : 1),
178         m_isInitialized(false),
179         m_eigenvectorsOk(false)
180     {
181       compute(matrix.derived(), options);
182     }
183 
184     /** \brief Computes eigendecomposition of given matrix.
185       *
186       * \param[in]  matrix  Selfadjoint matrix whose eigendecomposition is to
187       *    be computed. Only the lower triangular part of the matrix is referenced.
188       * \param[in]  options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
189       * \returns    Reference to \c *this
190       *
191       * This function computes the eigenvalues of \p matrix.  The eigenvalues()
192       * function can be used to retrieve them.  If \p options equals #ComputeEigenvectors,
193       * then the eigenvectors are also computed and can be retrieved by
194       * calling eigenvectors().
195       *
196       * This implementation uses a symmetric QR algorithm. The matrix is first
197       * reduced to tridiagonal form using the Tridiagonalization class. The
198       * tridiagonal matrix is then brought to diagonal form with implicit
199       * symmetric QR steps with Wilkinson shift. Details can be found in
200       * Section 8.3 of Golub \& Van Loan, <i>%Matrix Computations</i>.
201       *
202       * The cost of the computation is about \f$ 9n^3 \f$ if the eigenvectors
203       * are required and \f$ 4n^3/3 \f$ if they are not required.
204       *
205       * This method reuses the memory in the SelfAdjointEigenSolver object that
206       * was allocated when the object was constructed, if the size of the
207       * matrix does not change.
208       *
209       * Example: \include SelfAdjointEigenSolver_compute_MatrixType.cpp
210       * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType.out
211       *
212       * \sa SelfAdjointEigenSolver(const MatrixType&, int)
213       */
214     template<typename InputType>
215     EIGEN_DEVICE_FUNC
216     SelfAdjointEigenSolver& compute(const EigenBase<InputType>& matrix, int options = ComputeEigenvectors);
217 
218     /** \brief Computes eigendecomposition of given matrix using a closed-form algorithm
219       *
220       * This is a variant of compute(const MatrixType&, int options) which
221       * directly solves the underlying polynomial equation.
222       *
223       * Currently only 2x2 and 3x3 matrices for which the sizes are known at compile time are supported (e.g., Matrix3d).
224       *
225       * This method is usually significantly faster than the QR iterative algorithm
226       * but it might also be less accurate. It is also worth noting that
227       * for 3x3 matrices it involves trigonometric operations which are
228       * not necessarily available for all scalar types.
229       *
230       * For the 3x3 case, we observed the following worst case relative error regarding the eigenvalues:
231       *   - double: 1e-8
232       *   - float:  1e-3
233       *
234       * \sa compute(const MatrixType&, int options)
235       */
236     EIGEN_DEVICE_FUNC
237     SelfAdjointEigenSolver& computeDirect(const MatrixType& matrix, int options = ComputeEigenvectors);
238 
239     /**
240       *\brief Computes the eigen decomposition from a tridiagonal symmetric matrix
241       *
242       * \param[in] diag The vector containing the diagonal of the matrix.
243       * \param[in] subdiag The subdiagonal of the matrix.
244       * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
245       * \returns Reference to \c *this
246       *
247       * This function assumes that the matrix has been reduced to tridiagonal form.
248       *
249       * \sa compute(const MatrixType&, int) for more information
250       */
251     SelfAdjointEigenSolver& computeFromTridiagonal(const RealVectorType& diag, const SubDiagonalType& subdiag , int options=ComputeEigenvectors);
252 
253     /** \brief Returns the eigenvectors of given matrix.
254       *
255       * \returns  A const reference to the matrix whose columns are the eigenvectors.
256       *
257       * \pre The eigenvectors have been computed before.
258       *
259       * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
260       * to eigenvalue number \f$ k \f$ as returned by eigenvalues().  The
261       * eigenvectors are normalized to have (Euclidean) norm equal to one. If
262       * this object was used to solve the eigenproblem for the selfadjoint
263       * matrix \f$ A \f$, then the matrix returned by this function is the
264       * matrix \f$ V \f$ in the eigendecomposition \f$ A = V D V^{-1} \f$.
265       *
266       * For a selfadjoint matrix, \f$ V \f$ is unitary, meaning its inverse is equal
267       * to its adjoint, \f$ V^{-1} = V^{\dagger} \f$. If \f$ A \f$ is real, then
268       * \f$ V \f$ is also real and therefore orthogonal, meaning its inverse is
269       * equal to its transpose, \f$ V^{-1} = V^T \f$.
270       *
271       * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp
272       * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out
273       *
274       * \sa eigenvalues()
275       */
276     EIGEN_DEVICE_FUNC
277     const EigenvectorsType& eigenvectors() const
278     {
279       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
280       eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
281       return m_eivec;
282     }
283 
284     /** \brief Returns the eigenvalues of given matrix.
285       *
286       * \returns A const reference to the column vector containing the eigenvalues.
287       *
288       * \pre The eigenvalues have been computed before.
289       *
290       * The eigenvalues are repeated according to their algebraic multiplicity,
291       * so there are as many eigenvalues as rows in the matrix. The eigenvalues
292       * are sorted in increasing order.
293       *
294       * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp
295       * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out
296       *
297       * \sa eigenvectors(), MatrixBase::eigenvalues()
298       */
299     EIGEN_DEVICE_FUNC
300     const RealVectorType& eigenvalues() const
301     {
302       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
303       return m_eivalues;
304     }
305 
306     /** \brief Computes the positive-definite square root of the matrix.
307       *
308       * \returns the positive-definite square root of the matrix
309       *
310       * \pre The eigenvalues and eigenvectors of a positive-definite matrix
311       * have been computed before.
312       *
313       * The square root of a positive-definite matrix \f$ A \f$ is the
314       * positive-definite matrix whose square equals \f$ A \f$. This function
315       * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the
316       * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$.
317       *
318       * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp
319       * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out
320       *
321       * \sa operatorInverseSqrt(), <a href="unsupported/group__MatrixFunctions__Module.html">MatrixFunctions Module</a>
322       */
323     EIGEN_DEVICE_FUNC
324     MatrixType operatorSqrt() const
325     {
326       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
327       eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
328       return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint();
329     }
330 
331     /** \brief Computes the inverse square root of the matrix.
332       *
333       * \returns the inverse positive-definite square root of the matrix
334       *
335       * \pre The eigenvalues and eigenvectors of a positive-definite matrix
336       * have been computed before.
337       *
338       * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to
339       * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is
340       * cheaper than first computing the square root with operatorSqrt() and
341       * then its inverse with MatrixBase::inverse().
342       *
343       * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp
344       * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out
345       *
346       * \sa operatorSqrt(), MatrixBase::inverse(), <a href="unsupported/group__MatrixFunctions__Module.html">MatrixFunctions Module</a>
347       */
348     EIGEN_DEVICE_FUNC
349     MatrixType operatorInverseSqrt() const
350     {
351       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
352       eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
353       return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint();
354     }
355 
356     /** \brief Reports whether previous computation was successful.
357       *
358       * \returns \c Success if computation was successful, \c NoConvergence otherwise.
359       */
360     EIGEN_DEVICE_FUNC
361     ComputationInfo info() const
362     {
363       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
364       return m_info;
365     }
366 
367     /** \brief Maximum number of iterations.
368       *
369       * The algorithm terminates if it does not converge within m_maxIterations * n iterations, where n
370       * denotes the size of the matrix. This value is currently set to 30 (copied from LAPACK).
371       */
372     static const int m_maxIterations = 30;
373 
374   protected:
375     static EIGEN_DEVICE_FUNC
376     void check_template_parameters()
377     {
378       EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
379     }
380 
381     EigenvectorsType m_eivec;
382     RealVectorType m_eivalues;
383     typename TridiagonalizationType::SubDiagonalType m_subdiag;
384     typename TridiagonalizationType::CoeffVectorType m_hcoeffs;
385     ComputationInfo m_info;
386     bool m_isInitialized;
387     bool m_eigenvectorsOk;
388 };
389 
390 namespace internal {
391 /** \internal
392   *
393   * \eigenvalues_module \ingroup Eigenvalues_Module
394   *
395   * Performs a QR step on a tridiagonal symmetric matrix represented as a
396   * pair of two vectors \a diag and \a subdiag.
397   *
398   * \param diag the diagonal part of the input selfadjoint tridiagonal matrix
399   * \param subdiag the sub-diagonal part of the input selfadjoint tridiagonal matrix
400   * \param start starting index of the submatrix to work on
401   * \param end last+1 index of the submatrix to work on
402   * \param matrixQ pointer to the column-major matrix holding the eigenvectors, can be 0
403   * \param n size of the input matrix
404   *
405   * For compilation efficiency reasons, this procedure does not use eigen expression
406   * for its arguments.
407   *
408   * Implemented from Golub's "Matrix Computations", algorithm 8.3.2:
409   * "implicit symmetric QR step with Wilkinson shift"
410   */
411 template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
412 EIGEN_DEVICE_FUNC
413 static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
414 }
415 
416 template<typename MatrixType>
417 template<typename InputType>
418 EIGEN_DEVICE_FUNC
419 SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
420 ::compute(const EigenBase<InputType>& a_matrix, int options)
421 {
422   check_template_parameters();
423 
424   const InputType &matrix(a_matrix.derived());
425 
426   EIGEN_USING_STD(abs);
427   eigen_assert(matrix.cols() == matrix.rows());
428   eigen_assert((options&~(EigVecMask|GenEigMask))==0
429           && (options&EigVecMask)!=EigVecMask
430           && "invalid option parameter");
431   bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
432   Index n = matrix.cols();
433   m_eivalues.resize(n,1);
434 
435   if(n==1)
436   {
437     m_eivec = matrix;
438     m_eivalues.coeffRef(0,0) = numext::real(m_eivec.coeff(0,0));
439     if(computeEigenvectors)
440       m_eivec.setOnes(n,n);
441     m_info = Success;
442     m_isInitialized = true;
443     m_eigenvectorsOk = computeEigenvectors;
444     return *this;
445   }
446 
447   // declare some aliases
448   RealVectorType& diag = m_eivalues;
449   EigenvectorsType& mat = m_eivec;
450 
451   // map the matrix coefficients to [-1:1] to avoid over- and underflow.
452   mat = matrix.template triangularView<Lower>();
453   RealScalar scale = mat.cwiseAbs().maxCoeff();
454   if(scale==RealScalar(0)) scale = RealScalar(1);
455   mat.template triangularView<Lower>() /= scale;
456   m_subdiag.resize(n-1);
457   m_hcoeffs.resize(n-1);
458   internal::tridiagonalization_inplace(mat, diag, m_subdiag, m_hcoeffs, computeEigenvectors);
459 
460   m_info = internal::computeFromTridiagonal_impl(diag, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
461 
462   // scale back the eigen values
463   m_eivalues *= scale;
464 
465   m_isInitialized = true;
466   m_eigenvectorsOk = computeEigenvectors;
467   return *this;
468 }
469 
470 template<typename MatrixType>
471 SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
472 ::computeFromTridiagonal(const RealVectorType& diag, const SubDiagonalType& subdiag , int options)
473 {
474   //TODO : Add an option to scale the values beforehand
475   bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
476 
477   m_eivalues = diag;
478   m_subdiag = subdiag;
479   if (computeEigenvectors)
480   {
481     m_eivec.setIdentity(diag.size(), diag.size());
482   }
483   m_info = internal::computeFromTridiagonal_impl(m_eivalues, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
484 
485   m_isInitialized = true;
486   m_eigenvectorsOk = computeEigenvectors;
487   return *this;
488 }
489 
490 namespace internal {
491 /**
492   * \internal
493   * \brief Compute the eigendecomposition from a tridiagonal matrix
494   *
495   * \param[in,out] diag : On input, the diagonal of the matrix, on output the eigenvalues
496   * \param[in,out] subdiag : The subdiagonal part of the matrix (entries are modified during the decomposition)
497   * \param[in] maxIterations : the maximum number of iterations
498   * \param[in] computeEigenvectors : whether the eigenvectors have to be computed or not
499   * \param[out] eivec : The matrix to store the eigenvectors if computeEigenvectors==true. Must be allocated on input.
500   * \returns \c Success or \c NoConvergence
501   */
502 template<typename MatrixType, typename DiagType, typename SubDiagType>
503 EIGEN_DEVICE_FUNC
504 ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec)
505 {
506   ComputationInfo info;
507   typedef typename MatrixType::Scalar Scalar;
508 
509   Index n = diag.size();
510   Index end = n-1;
511   Index start = 0;
512   Index iter = 0; // total number of iterations
513 
514   typedef typename DiagType::RealScalar RealScalar;
515   const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
516   const RealScalar precision_inv = RealScalar(1)/NumTraits<RealScalar>::epsilon();
517   while (end>0)
518   {
519     for (Index i = start; i<end; ++i) {
520       if (numext::abs(subdiag[i]) < considerAsZero) {
521         subdiag[i] = RealScalar(0);
522       } else {
523         // abs(subdiag[i]) <= epsilon * sqrt(abs(diag[i]) + abs(diag[i+1]))
524         // Scaled to prevent underflows.
525         const RealScalar scaled_subdiag = precision_inv * subdiag[i];
526         if (scaled_subdiag * scaled_subdiag <= (numext::abs(diag[i])+numext::abs(diag[i+1]))) {
527           subdiag[i] = RealScalar(0);
528         }
529       }
530     }
531 
532     // find the largest unreduced block at the end of the matrix.
533     while (end>0 && subdiag[end-1]==RealScalar(0))
534     {
535       end--;
536     }
537     if (end<=0)
538       break;
539 
540     // if we spent too many iterations, we give up
541     iter++;
542     if(iter > maxIterations * n) break;
543 
544     start = end - 1;
545     while (start>0 && subdiag[start-1]!=0)
546       start--;
547 
548     internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), subdiag.data(), start, end, computeEigenvectors ? eivec.data() : (Scalar*)0, n);
549   }
550   if (iter <= maxIterations * n)
551     info = Success;
552   else
553     info = NoConvergence;
554 
555   // Sort eigenvalues and corresponding vectors.
556   // TODO make the sort optional ?
557   // TODO use a better sort algorithm !!
558   if (info == Success)
559   {
560     for (Index i = 0; i < n-1; ++i)
561     {
562       Index k;
563       diag.segment(i,n-i).minCoeff(&k);
564       if (k > 0)
565       {
566         numext::swap(diag[i], diag[k+i]);
567         if(computeEigenvectors)
568           eivec.col(i).swap(eivec.col(k+i));
569       }
570     }
571   }
572   return info;
573 }
574 
575 template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues
576 {
577   EIGEN_DEVICE_FUNC
578   static inline void run(SolverType& eig, const typename SolverType::MatrixType& A, int options)
579   { eig.compute(A,options); }
580 };
581 
582 template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3,false>
583 {
584   typedef typename SolverType::MatrixType MatrixType;
585   typedef typename SolverType::RealVectorType VectorType;
586   typedef typename SolverType::Scalar Scalar;
587   typedef typename SolverType::EigenvectorsType EigenvectorsType;
588 
589 
590   /** \internal
591    * Computes the roots of the characteristic polynomial of \a m.
592    * For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.
593    */
594   EIGEN_DEVICE_FUNC
595   static inline void computeRoots(const MatrixType& m, VectorType& roots)
596   {
597     EIGEN_USING_STD(sqrt)
598     EIGEN_USING_STD(atan2)
599     EIGEN_USING_STD(cos)
600     EIGEN_USING_STD(sin)
601     const Scalar s_inv3 = Scalar(1)/Scalar(3);
602     const Scalar s_sqrt3 = sqrt(Scalar(3));
603 
604     // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0.  The
605     // eigenvalues are the roots to this equation, all guaranteed to be
606     // real-valued, because the matrix is symmetric.
607     Scalar c0 = m(0,0)*m(1,1)*m(2,2) + Scalar(2)*m(1,0)*m(2,0)*m(2,1) - m(0,0)*m(2,1)*m(2,1) - m(1,1)*m(2,0)*m(2,0) - m(2,2)*m(1,0)*m(1,0);
608     Scalar c1 = m(0,0)*m(1,1) - m(1,0)*m(1,0) + m(0,0)*m(2,2) - m(2,0)*m(2,0) + m(1,1)*m(2,2) - m(2,1)*m(2,1);
609     Scalar c2 = m(0,0) + m(1,1) + m(2,2);
610 
611     // Construct the parameters used in classifying the roots of the equation
612     // and in solving the equation for the roots in closed form.
613     Scalar c2_over_3 = c2*s_inv3;
614     Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;
615     a_over_3 = numext::maxi(a_over_3, Scalar(0));
616 
617     Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
618 
619     Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;
620     q = numext::maxi(q, Scalar(0));
621 
622     // Compute the eigenvalues by solving for the roots of the polynomial.
623     Scalar rho = sqrt(a_over_3);
624     Scalar theta = atan2(sqrt(q),half_b)*s_inv3;  // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]
625     Scalar cos_theta = cos(theta);
626     Scalar sin_theta = sin(theta);
627     // roots are already sorted, since cos is monotonically decreasing on [0, pi]
628     roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)
629     roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)
630     roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
631   }
632 
633   EIGEN_DEVICE_FUNC
634   static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
635   {
636     EIGEN_USING_STD(abs);
637     EIGEN_USING_STD(sqrt);
638     Index i0;
639     // Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
640     mat.diagonal().cwiseAbs().maxCoeff(&i0);
641     // mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,
642     // so let's save it:
643     representative = mat.col(i0);
644     Scalar n0, n1;
645     VectorType c0, c1;
646     n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
647     n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
648     if(n0>n1) res = c0/sqrt(n0);
649     else      res = c1/sqrt(n1);
650 
651     return true;
652   }
653 
654   EIGEN_DEVICE_FUNC
655   static inline void run(SolverType& solver, const MatrixType& mat, int options)
656   {
657     eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
658     eigen_assert((options&~(EigVecMask|GenEigMask))==0
659             && (options&EigVecMask)!=EigVecMask
660             && "invalid option parameter");
661     bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
662 
663     EigenvectorsType& eivecs = solver.m_eivec;
664     VectorType& eivals = solver.m_eivalues;
665 
666     // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
667     Scalar shift = mat.trace() / Scalar(3);
668     // TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later
669     MatrixType scaledMat = mat.template selfadjointView<Lower>();
670     scaledMat.diagonal().array() -= shift;
671     Scalar scale = scaledMat.cwiseAbs().maxCoeff();
672     if(scale > 0) scaledMat /= scale;   // TODO for scale==0 we could save the remaining operations
673 
674     // compute the eigenvalues
675     computeRoots(scaledMat,eivals);
676 
677     // compute the eigenvectors
678     if(computeEigenvectors)
679     {
680       if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
681       {
682         // All three eigenvalues are numerically the same
683         eivecs.setIdentity();
684       }
685       else
686       {
687         MatrixType tmp;
688         tmp = scaledMat;
689 
690         // Compute the eigenvector of the most distinct eigenvalue
691         Scalar d0 = eivals(2) - eivals(1);
692         Scalar d1 = eivals(1) - eivals(0);
693         Index k(0), l(2);
694         if(d0 > d1)
695         {
696           numext::swap(k,l);
697           d0 = d1;
698         }
699 
700         // Compute the eigenvector of index k
701         {
702           tmp.diagonal().array () -= eivals(k);
703           // By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.
704           extract_kernel(tmp, eivecs.col(k), eivecs.col(l));
705         }
706 
707         // Compute eigenvector of index l
708         if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)
709         {
710           // If d0 is too small, then the two other eigenvalues are numerically the same,
711           // and thus we only have to ortho-normalize the near orthogonal vector we saved above.
712           eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);
713           eivecs.col(l).normalize();
714         }
715         else
716         {
717           tmp = scaledMat;
718           tmp.diagonal().array () -= eivals(l);
719 
720           VectorType dummy;
721           extract_kernel(tmp, eivecs.col(l), dummy);
722         }
723 
724         // Compute last eigenvector from the other two
725         eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();
726       }
727     }
728 
729     // Rescale back to the original size.
730     eivals *= scale;
731     eivals.array() += shift;
732 
733     solver.m_info = Success;
734     solver.m_isInitialized = true;
735     solver.m_eigenvectorsOk = computeEigenvectors;
736   }
737 };
738 
739 // 2x2 direct eigenvalues decomposition, code from Hauke Heibel
740 template<typename SolverType>
741 struct direct_selfadjoint_eigenvalues<SolverType,2,false>
742 {
743   typedef typename SolverType::MatrixType MatrixType;
744   typedef typename SolverType::RealVectorType VectorType;
745   typedef typename SolverType::Scalar Scalar;
746   typedef typename SolverType::EigenvectorsType EigenvectorsType;
747 
748   EIGEN_DEVICE_FUNC
749   static inline void computeRoots(const MatrixType& m, VectorType& roots)
750   {
751     EIGEN_USING_STD(sqrt);
752     const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
753     const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
754     roots(0) = t1 - t0;
755     roots(1) = t1 + t0;
756   }
757 
758   EIGEN_DEVICE_FUNC
759   static inline void run(SolverType& solver, const MatrixType& mat, int options)
760   {
761     EIGEN_USING_STD(sqrt);
762     EIGEN_USING_STD(abs);
763 
764     eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
765     eigen_assert((options&~(EigVecMask|GenEigMask))==0
766             && (options&EigVecMask)!=EigVecMask
767             && "invalid option parameter");
768     bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
769 
770     EigenvectorsType& eivecs = solver.m_eivec;
771     VectorType& eivals = solver.m_eivalues;
772 
773     // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
774     Scalar shift = mat.trace() / Scalar(2);
775     MatrixType scaledMat = mat;
776     scaledMat.coeffRef(0,1) = mat.coeff(1,0);
777     scaledMat.diagonal().array() -= shift;
778     Scalar scale = scaledMat.cwiseAbs().maxCoeff();
779     if(scale > Scalar(0))
780       scaledMat /= scale;
781 
782     // Compute the eigenvalues
783     computeRoots(scaledMat,eivals);
784 
785     // compute the eigen vectors
786     if(computeEigenvectors)
787     {
788       if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())
789       {
790         eivecs.setIdentity();
791       }
792       else
793       {
794         scaledMat.diagonal().array () -= eivals(1);
795         Scalar a2 = numext::abs2(scaledMat(0,0));
796         Scalar c2 = numext::abs2(scaledMat(1,1));
797         Scalar b2 = numext::abs2(scaledMat(1,0));
798         if(a2>c2)
799         {
800           eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
801           eivecs.col(1) /= sqrt(a2+b2);
802         }
803         else
804         {
805           eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
806           eivecs.col(1) /= sqrt(c2+b2);
807         }
808 
809         eivecs.col(0) << eivecs.col(1).unitOrthogonal();
810       }
811     }
812 
813     // Rescale back to the original size.
814     eivals *= scale;
815     eivals.array() += shift;
816 
817     solver.m_info = Success;
818     solver.m_isInitialized = true;
819     solver.m_eigenvectorsOk = computeEigenvectors;
820   }
821 };
822 
823 }
824 
825 template<typename MatrixType>
826 EIGEN_DEVICE_FUNC
827 SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
828 ::computeDirect(const MatrixType& matrix, int options)
829 {
830   internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>::run(*this,matrix,options);
831   return *this;
832 }
833 
834 namespace internal {
835 
836 // Francis implicit QR step.
837 template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
838 EIGEN_DEVICE_FUNC
839 static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
840 {
841   // Wilkinson Shift.
842   RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
843   RealScalar e = subdiag[end-1];
844   // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
845   // underflow thus leading to inf/NaN values when using the following commented code:
846   //   RealScalar e2 = numext::abs2(subdiag[end-1]);
847   //   RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
848   // This explain the following, somewhat more complicated, version:
849   RealScalar mu = diag[end];
850   if(td==RealScalar(0)) {
851     mu -= numext::abs(e);
852   } else if (e != RealScalar(0)) {
853     const RealScalar e2 = numext::abs2(e);
854     const RealScalar h = numext::hypot(td,e);
855     if(e2 == RealScalar(0)) {
856       mu -= e / ((td + (td>RealScalar(0) ? h : -h)) / e);
857     } else {
858       mu -= e2 / (td + (td>RealScalar(0) ? h : -h));
859     }
860   }
861 
862   RealScalar x = diag[start] - mu;
863   RealScalar z = subdiag[start];
864   // If z ever becomes zero, the Givens rotation will be the identity and
865   // z will stay zero for all future iterations.
866   for (Index k = start; k < end && z != RealScalar(0); ++k)
867   {
868     JacobiRotation<RealScalar> rot;
869     rot.makeGivens(x, z);
870 
871     // do T = G' T G
872     RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k];
873     RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1];
874 
875     diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]);
876     diag[k+1] = rot.s() * sdk + rot.c() * dkp1;
877     subdiag[k] = rot.c() * sdk - rot.s() * dkp1;
878 
879     if (k > start)
880       subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z;
881 
882     // "Chasing the bulge" to return to triangular form.
883     x = subdiag[k];
884     if (k < end - 1)
885     {
886       z = -rot.s() * subdiag[k+1];
887       subdiag[k + 1] = rot.c() * subdiag[k+1];
888     }
889 
890     // apply the givens rotation to the unit matrix Q = Q * G
891     if (matrixQ)
892     {
893       // FIXME if StorageOrder == RowMajor this operation is not very efficient
894       Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);
895       q.applyOnTheRight(k,k+1,rot);
896     }
897   }
898 }
899 
900 } // end namespace internal
901 
902 } // end namespace Eigen
903 
904 #endif // EIGEN_SELFADJOINTEIGENSOLVER_H
905