Squashed 'third_party/eigen/' content from commit 61d72f6

Change-Id: Iccc90fa0b55ab44037f018046d2fcffd90d9d025
git-subtree-dir: third_party/eigen
git-subtree-split: 61d72f6383cfa842868c53e30e087b0258177257
diff --git a/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt b/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt
new file mode 100644
index 0000000..7986afc
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_IterativeSolvers_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_IterativeSolvers_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/IterativeSolvers COMPONENT Devel
+  )
diff --git a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
new file mode 100644
index 0000000..dc0093e
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
@@ -0,0 +1,189 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+
+/* NOTE The functions of this file have been adapted from the GMM++ library */
+
+//========================================================================
+//
+// Copyright (C) 2002-2007 Yves Renard
+//
+// This file is a part of GETFEM++
+//
+// Getfem++ is free software; you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation; version 2.1 of the License.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Lesser General Public License for more details.
+// You should have received a copy of the GNU Lesser General Public
+// License along with this program; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301,
+// USA.
+//
+//========================================================================
+
+#include "../../../../Eigen/src/Core/util/NonMPL2.h"
+
+#ifndef EIGEN_CONSTRAINEDCG_H
+#define EIGEN_CONSTRAINEDCG_H
+
+#include <Eigen/Core>
+
+namespace Eigen { 
+
+namespace internal {
+
+/** \ingroup IterativeSolvers_Module
+  * Compute the pseudo inverse of the non-square matrix C such that
+  * \f$ CINV = (C * C^T)^{-1} * C \f$ based on a conjugate gradient method.
+  *
+  * This function is internally used by constrained_cg.
+  */
+template <typename CMatrix, typename CINVMatrix>
+void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV)
+{
+  // optimisable : copie de la ligne, precalcul de C * trans(C).
+  typedef typename CMatrix::Scalar Scalar;
+  typedef typename CMatrix::Index Index;
+  // FIXME use sparse vectors ?
+  typedef Matrix<Scalar,Dynamic,1> TmpVec;
+
+  Index rows = C.rows(), cols = C.cols();
+
+  TmpVec d(rows), e(rows), l(cols), p(rows), q(rows), r(rows);
+  Scalar rho, rho_1, alpha;
+  d.setZero();
+
+  typedef Triplet<double> T;
+  std::vector<T> tripletList;
+    
+  for (Index i = 0; i < rows; ++i)
+  {
+    d[i] = 1.0;
+    rho = 1.0;
+    e.setZero();
+    r = d;
+    p = d;
+
+    while (rho >= 1e-38)
+    { /* conjugate gradient to compute e             */
+      /* which is the i-th row of inv(C * trans(C))  */
+      l = C.transpose() * p;
+      q = C * l;
+      alpha = rho / p.dot(q);
+      e +=  alpha * p;
+      r += -alpha * q;
+      rho_1 = rho;
+      rho = r.dot(r);
+      p = (rho/rho_1) * p + r;
+    }
+
+    l = C.transpose() * e; // l is the i-th row of CINV
+    // FIXME add a generic "prune/filter" expression for both dense and sparse object to sparse
+    for (Index j=0; j<l.size(); ++j)
+      if (l[j]<1e-15)
+	tripletList.push_back(T(i,j,l(j)));
+
+	
+    d[i] = 0.0;
+  }
+  CINV.setFromTriplets(tripletList.begin(), tripletList.end());
+}
+
+
+
+/** \ingroup IterativeSolvers_Module
+  * Constrained conjugate gradient
+  *
+  * Computes the minimum of \f$ 1/2((Ax).x) - bx \f$ under the contraint \f$ Cx \le f \f$
+  */
+template<typename TMatrix, typename CMatrix,
+         typename VectorX, typename VectorB, typename VectorF>
+void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x,
+                       const VectorB& b, const VectorF& f, IterationController &iter)
+{
+  using std::sqrt;
+  typedef typename TMatrix::Scalar Scalar;
+  typedef typename TMatrix::Index Index;
+  typedef Matrix<Scalar,Dynamic,1>  TmpVec;
+
+  Scalar rho = 1.0, rho_1, lambda, gamma;
+  Index xSize = x.size();
+  TmpVec  p(xSize), q(xSize), q2(xSize),
+          r(xSize), old_z(xSize), z(xSize),
+          memox(xSize);
+  std::vector<bool> satured(C.rows());
+  p.setZero();
+  iter.setRhsNorm(sqrt(b.dot(b))); // gael vect_sp(PS, b, b)
+  if (iter.rhsNorm() == 0.0) iter.setRhsNorm(1.0);
+
+  SparseMatrix<Scalar,RowMajor> CINV(C.rows(), C.cols());
+  pseudo_inverse(C, CINV);
+
+  while(true)
+  {
+    // computation of residual
+    old_z = z;
+    memox = x;
+    r = b;
+    r += A * -x;
+    z = r;
+    bool transition = false;
+    for (Index i = 0; i < C.rows(); ++i)
+    {
+      Scalar al = C.row(i).dot(x) - f.coeff(i);
+      if (al >= -1.0E-15)
+      {
+        if (!satured[i])
+        {
+          satured[i] = true;
+          transition = true;
+        }
+        Scalar bb = CINV.row(i).dot(z);
+        if (bb > 0.0)
+          // FIXME: we should allow that: z += -bb * C.row(i);
+          for (typename CMatrix::InnerIterator it(C,i); it; ++it)
+            z.coeffRef(it.index()) -= bb*it.value();
+      }
+      else
+        satured[i] = false;
+    }
+
+    // descent direction
+    rho_1 = rho;
+    rho = r.dot(z);
+
+    if (iter.finished(rho)) break;
+
+    if (iter.noiseLevel() > 0 && transition) std::cerr << "CCG: transition\n";
+    if (transition || iter.first()) gamma = 0.0;
+    else gamma = (std::max)(0.0, (rho - old_z.dot(z)) / rho_1);
+    p = z + gamma*p;
+
+    ++iter;
+    // one dimensionnal optimization
+    q = A * p;
+    lambda = rho / q.dot(p);
+    for (Index i = 0; i < C.rows(); ++i)
+    {
+      if (!satured[i])
+      {
+        Scalar bb = C.row(i).dot(p) - f[i];
+        if (bb > 0.0)
+          lambda = (std::min)(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb);
+      }
+    }
+    x += lambda * p;
+    memox -= x;
+  }
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CONSTRAINEDCG_H
diff --git a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
new file mode 100644
index 0000000..9fcc8a8
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
@@ -0,0 +1,542 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DGMRES_H
+#define EIGEN_DGMRES_H
+
+#include <Eigen/Eigenvalues>
+
+namespace Eigen { 
+  
+template< typename _MatrixType,
+          typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
+class DGMRES;
+
+namespace internal {
+
+template< typename _MatrixType, typename _Preconditioner>
+struct traits<DGMRES<_MatrixType,_Preconditioner> >
+{
+  typedef _MatrixType MatrixType;
+  typedef _Preconditioner Preconditioner;
+};
+
+/** \brief Computes a permutation vector to have a sorted sequence
+  * \param vec The vector to reorder.
+  * \param perm gives the sorted sequence on output. Must be initialized with 0..n-1
+  * \param ncut Put  the ncut smallest elements at the end of the vector
+  * WARNING This is an expensive sort, so should be used only 
+  * for small size vectors
+  * TODO Use modified QuickSplit or std::nth_element to get the smallest values 
+  */
+template <typename VectorType, typename IndexType>
+void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::Scalar& ncut)
+{
+  eigen_assert(vec.size() == perm.size());
+  typedef typename IndexType::Scalar Index; 
+  typedef typename VectorType::Scalar Scalar; 
+  bool flag; 
+  for (Index k  = 0; k < ncut; k++)
+  {
+    flag = false;
+    for (Index j = 0; j < vec.size()-1; j++)
+    {
+      if ( vec(perm(j)) < vec(perm(j+1)) )
+      {
+        std::swap(perm(j),perm(j+1)); 
+        flag = true;
+      }
+      if (!flag) break; // The vector is in sorted order
+    }
+  }
+}
+
+}
+/**
+ * \ingroup IterativeLInearSolvers_Module
+ * \brief A Restarted GMRES with deflation.
+ * This class implements a modification of the GMRES solver for
+ * sparse linear systems. The basis is built with modified 
+ * Gram-Schmidt. At each restart, a few approximated eigenvectors
+ * corresponding to the smallest eigenvalues are used to build a
+ * preconditioner for the next cycle. This preconditioner 
+ * for deflation can be combined with any other preconditioner, 
+ * the IncompleteLUT for instance. The preconditioner is applied 
+ * at right of the matrix and the combination is multiplicative.
+ * 
+ * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+ * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+ * Typical usage :
+ * \code
+ * SparseMatrix<double> A;
+ * VectorXd x, b; 
+ * //Fill A and b ...
+ * DGMRES<SparseMatrix<double> > solver;
+ * solver.set_restart(30); // Set restarting value
+ * solver.setEigenv(1); // Set the number of eigenvalues to deflate
+ * solver.compute(A);
+ * x = solver.solve(b);
+ * \endcode
+ * 
+ * References :
+ * [1] D. NUENTSA WAKAM and F. PACULL, Memory Efficient Hybrid
+ *  Algebraic Solvers for Linear Systems Arising from Compressible
+ *  Flows, Computers and Fluids, In Press,
+ *  http://dx.doi.org/10.1016/j.compfluid.2012.03.023   
+ * [2] K. Burrage and J. Erhel, On the performance of various 
+ * adaptive preconditioned GMRES strategies, 5(1998), 101-121.
+ * [3] J. Erhel, K. Burrage and B. Pohl, Restarted GMRES 
+ *  preconditioned by deflation,J. Computational and Applied
+ *  Mathematics, 69(1996), 303-318. 
+
+ * 
+ */
+template< typename _MatrixType, typename _Preconditioner>
+class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
+{
+    typedef IterativeSolverBase<DGMRES> Base;
+    using Base::mp_matrix;
+    using Base::m_error;
+    using Base::m_iterations;
+    using Base::m_info;
+    using Base::m_isInitialized;
+    using Base::m_tolerance; 
+  public:
+    typedef _MatrixType MatrixType;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::Index Index;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef _Preconditioner Preconditioner;
+    typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix; 
+    typedef Matrix<RealScalar,Dynamic,Dynamic> DenseRealMatrix; 
+    typedef Matrix<Scalar,Dynamic,1> DenseVector;
+    typedef Matrix<RealScalar,Dynamic,1> DenseRealVector; 
+    typedef Matrix<std::complex<RealScalar>, Dynamic, 1> ComplexVector;
+ 
+    
+  /** Default constructor. */
+  DGMRES() : Base(),m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) {}
+
+  /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+    * 
+    * This constructor is a shortcut for the default constructor followed
+    * by a call to compute().
+    * 
+    * \warning this class stores a reference to the matrix A as well as some
+    * precomputed values that depend on it. Therefore, if \a A is changed
+    * this class becomes invalid. Call compute() to update it with the new
+    * matrix A, or modify a copy of A.
+    */
+  DGMRES(const MatrixType& A) : Base(A),m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false)
+  {}
+
+  ~DGMRES() {}
+  
+  /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+    * \a x0 as an initial solution.
+    *
+    * \sa compute()
+    */
+  template<typename Rhs,typename Guess>
+  inline const internal::solve_retval_with_guess<DGMRES, Rhs, Guess>
+  solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+  {
+    eigen_assert(m_isInitialized && "DGMRES is not initialized.");
+    eigen_assert(Base::rows()==b.rows()
+              && "DGMRES::solve(): invalid number of rows of the right hand side matrix b");
+    return internal::solve_retval_with_guess
+            <DGMRES, Rhs, Guess>(*this, b.derived(), x0);
+  }
+  
+  /** \internal */
+  template<typename Rhs,typename Dest>
+  void _solveWithGuess(const Rhs& b, Dest& x) const
+  {    
+    bool failed = false;
+    for(int j=0; j<b.cols(); ++j)
+    {
+      m_iterations = Base::maxIterations();
+      m_error = Base::m_tolerance;
+      
+      typename Dest::ColXpr xj(x,j);
+      dgmres(*mp_matrix, b.col(j), xj, Base::m_preconditioner);
+    }
+    m_info = failed ? NumericalIssue
+           : m_error <= Base::m_tolerance ? Success
+           : NoConvergence;
+    m_isInitialized = true;
+  }
+
+  /** \internal */
+  template<typename Rhs,typename Dest>
+  void _solve(const Rhs& b, Dest& x) const
+  {
+    x = b;
+    _solveWithGuess(b,x);
+  }
+  /** 
+   * Get the restart value
+    */
+  int restart() { return m_restart; }
+  
+  /** 
+   * Set the restart value (default is 30)  
+   */
+  void set_restart(const int restart) { m_restart=restart; }
+  
+  /** 
+   * Set the number of eigenvalues to deflate at each restart 
+   */
+  void setEigenv(const int neig) 
+  {
+    m_neig = neig;
+    if (neig+1 > m_maxNeig) m_maxNeig = neig+1; // To allow for complex conjugates
+  }
+  
+  /** 
+   * Get the size of the deflation subspace size
+   */ 
+  int deflSize() {return m_r; }
+  
+  /**
+   * Set the maximum size of the deflation subspace
+   */
+  void setMaxEigenv(const int maxNeig) { m_maxNeig = maxNeig; }
+  
+  protected:
+    // DGMRES algorithm 
+    template<typename Rhs, typename Dest>
+    void dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x, const Preconditioner& precond) const;
+    // Perform one cycle of GMRES
+    template<typename Dest>
+    int dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, int& nbIts) const; 
+    // Compute data to use for deflation 
+    int dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, Index& neig) const;
+    // Apply deflation to a vector
+    template<typename RhsType, typename DestType>
+    int dgmresApplyDeflation(const RhsType& In, DestType& Out) const; 
+    ComplexVector schurValues(const ComplexSchur<DenseMatrix>& schurofH) const;
+    ComplexVector schurValues(const RealSchur<DenseMatrix>& schurofH) const;
+    // Init data for deflation
+    void dgmresInitDeflation(Index& rows) const; 
+    mutable DenseMatrix m_V; // Krylov basis vectors
+    mutable DenseMatrix m_H; // Hessenberg matrix 
+    mutable DenseMatrix m_Hes; // Initial hessenberg matrix wihout Givens rotations applied
+    mutable Index m_restart; // Maximum size of the Krylov subspace
+    mutable DenseMatrix m_U; // Vectors that form the basis of the invariant subspace 
+    mutable DenseMatrix m_MU; // matrix operator applied to m_U (for next cycles)
+    mutable DenseMatrix m_T; /* T=U^T*M^{-1}*A*U */
+    mutable PartialPivLU<DenseMatrix> m_luT; // LU factorization of m_T
+    mutable int m_neig; //Number of eigenvalues to extract at each restart
+    mutable int m_r; // Current number of deflated eigenvalues, size of m_U
+    mutable int m_maxNeig; // Maximum number of eigenvalues to deflate
+    mutable RealScalar m_lambdaN; //Modulus of the largest eigenvalue of A
+    mutable bool m_isDeflAllocated;
+    mutable bool m_isDeflInitialized;
+    
+    //Adaptive strategy 
+    mutable RealScalar m_smv; // Smaller multiple of the remaining number of steps allowed
+    mutable bool m_force; // Force the use of deflation at each restart
+    
+}; 
+/** 
+ * \brief Perform several cycles of restarted GMRES with modified Gram Schmidt, 
+ * 
+ * A right preconditioner is used combined with deflation.
+ * 
+ */
+template< typename _MatrixType, typename _Preconditioner>
+template<typename Rhs, typename Dest>
+void DGMRES<_MatrixType, _Preconditioner>::dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x,
+              const Preconditioner& precond) const
+{
+  //Initialization
+  int n = mat.rows(); 
+  DenseVector r0(n); 
+  int nbIts = 0; 
+  m_H.resize(m_restart+1, m_restart);
+  m_Hes.resize(m_restart, m_restart);
+  m_V.resize(n,m_restart+1);
+  //Initial residual vector and intial norm
+  x = precond.solve(x);
+  r0 = rhs - mat * x; 
+  RealScalar beta = r0.norm(); 
+  RealScalar normRhs = rhs.norm();
+  m_error = beta/normRhs; 
+  if(m_error < m_tolerance)
+    m_info = Success; 
+  else
+    m_info = NoConvergence;
+  
+  // Iterative process
+  while (nbIts < m_iterations && m_info == NoConvergence)
+  {
+    dgmresCycle(mat, precond, x, r0, beta, normRhs, nbIts); 
+    
+    // Compute the new residual vector for the restart 
+    if (nbIts < m_iterations && m_info == NoConvergence)
+      r0 = rhs - mat * x; 
+  }
+} 
+
+/**
+ * \brief Perform one restart cycle of DGMRES
+ * \param mat The coefficient matrix
+ * \param precond The preconditioner
+ * \param x the new approximated solution
+ * \param r0 The initial residual vector
+ * \param beta The norm of the residual computed so far
+ * \param normRhs The norm of the right hand side vector
+ * \param nbIts The number of iterations
+ */
+template< typename _MatrixType, typename _Preconditioner>
+template<typename Dest>
+int DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, int& nbIts) const
+{
+  //Initialization 
+  DenseVector g(m_restart+1); // Right hand side of the least square problem
+  g.setZero();  
+  g(0) = Scalar(beta); 
+  m_V.col(0) = r0/beta; 
+  m_info = NoConvergence; 
+  std::vector<JacobiRotation<Scalar> >gr(m_restart); // Givens rotations
+  int it = 0; // Number of inner iterations 
+  int n = mat.rows();
+  DenseVector tv1(n), tv2(n);  //Temporary vectors
+  while (m_info == NoConvergence && it < m_restart && nbIts < m_iterations)
+  {    
+    // Apply preconditioner(s) at right
+    if (m_isDeflInitialized )
+    {
+      dgmresApplyDeflation(m_V.col(it), tv1); // Deflation
+      tv2 = precond.solve(tv1); 
+    }
+    else
+    {
+      tv2 = precond.solve(m_V.col(it)); // User's selected preconditioner
+    }
+    tv1 = mat * tv2; 
+   
+    // Orthogonalize it with the previous basis in the basis using modified Gram-Schmidt
+    Scalar coef; 
+    for (int i = 0; i <= it; ++i)
+    { 
+      coef = tv1.dot(m_V.col(i));
+      tv1 = tv1 - coef * m_V.col(i); 
+      m_H(i,it) = coef; 
+      m_Hes(i,it) = coef; 
+    }
+    // Normalize the vector 
+    coef = tv1.norm(); 
+    m_V.col(it+1) = tv1/coef;
+    m_H(it+1, it) = coef;
+//     m_Hes(it+1,it) = coef; 
+    
+    // FIXME Check for happy breakdown 
+    
+    // Update Hessenberg matrix with Givens rotations
+    for (int i = 1; i <= it; ++i) 
+    {
+      m_H.col(it).applyOnTheLeft(i-1,i,gr[i-1].adjoint());
+    }
+    // Compute the new plane rotation 
+    gr[it].makeGivens(m_H(it, it), m_H(it+1,it)); 
+    // Apply the new rotation
+    m_H.col(it).applyOnTheLeft(it,it+1,gr[it].adjoint());
+    g.applyOnTheLeft(it,it+1, gr[it].adjoint()); 
+    
+    beta = std::abs(g(it+1));
+    m_error = beta/normRhs; 
+    std::cerr << nbIts << " Relative Residual Norm " << m_error << std::endl;
+    it++; nbIts++; 
+    
+    if (m_error < m_tolerance)
+    {
+      // The method has converged
+      m_info = Success;
+      break;
+    }
+  }
+  
+  // Compute the new coefficients by solving the least square problem
+//   it++;
+  //FIXME  Check first if the matrix is singular ... zero diagonal
+  DenseVector nrs(m_restart); 
+  nrs = m_H.topLeftCorner(it,it).template triangularView<Upper>().solve(g.head(it)); 
+  
+  // Form the new solution
+  if (m_isDeflInitialized)
+  {
+    tv1 = m_V.leftCols(it) * nrs; 
+    dgmresApplyDeflation(tv1, tv2); 
+    x = x + precond.solve(tv2);
+  }
+  else
+    x = x + precond.solve(m_V.leftCols(it) * nrs); 
+  
+  // Go for a new cycle and compute data for deflation
+  if(nbIts < m_iterations && m_info == NoConvergence && m_neig > 0 && (m_r+m_neig) < m_maxNeig)
+    dgmresComputeDeflationData(mat, precond, it, m_neig); 
+  return 0; 
+  
+}
+
+
+template< typename _MatrixType, typename _Preconditioner>
+void DGMRES<_MatrixType, _Preconditioner>::dgmresInitDeflation(Index& rows) const
+{
+  m_U.resize(rows, m_maxNeig);
+  m_MU.resize(rows, m_maxNeig); 
+  m_T.resize(m_maxNeig, m_maxNeig);
+  m_lambdaN = 0.0; 
+  m_isDeflAllocated = true; 
+}
+
+template< typename _MatrixType, typename _Preconditioner>
+inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const ComplexSchur<DenseMatrix>& schurofH) const
+{
+  return schurofH.matrixT().diagonal();
+}
+
+template< typename _MatrixType, typename _Preconditioner>
+inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const RealSchur<DenseMatrix>& schurofH) const
+{
+  typedef typename MatrixType::Index Index;
+  const DenseMatrix& T = schurofH.matrixT();
+  Index it = T.rows();
+  ComplexVector eig(it);
+  Index j = 0;
+  while (j < it-1)
+  {
+    if (T(j+1,j) ==Scalar(0))
+    {
+      eig(j) = std::complex<RealScalar>(T(j,j),RealScalar(0)); 
+      j++; 
+    }
+    else
+    {
+      eig(j) = std::complex<RealScalar>(T(j,j),T(j+1,j)); 
+      eig(j+1) = std::complex<RealScalar>(T(j,j+1),T(j+1,j+1));
+      j++;
+    }
+  }
+  if (j < it-1) eig(j) = std::complex<RealScalar>(T(j,j),RealScalar(0));
+  return eig;
+}
+
+template< typename _MatrixType, typename _Preconditioner>
+int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, Index& neig) const
+{
+  // First, find the Schur form of the Hessenberg matrix H
+  typename internal::conditional<NumTraits<Scalar>::IsComplex, ComplexSchur<DenseMatrix>, RealSchur<DenseMatrix> >::type schurofH; 
+  bool computeU = true;
+  DenseMatrix matrixQ(it,it); 
+  matrixQ.setIdentity();
+  schurofH.computeFromHessenberg(m_Hes.topLeftCorner(it,it), matrixQ, computeU); 
+  
+  ComplexVector eig(it);
+  Matrix<Index,Dynamic,1>perm(it); 
+  eig = this->schurValues(schurofH);
+  
+  // Reorder the absolute values of Schur values
+  DenseRealVector modulEig(it); 
+  for (int j=0; j<it; ++j) modulEig(j) = std::abs(eig(j)); 
+  perm.setLinSpaced(it,0,it-1);
+  internal::sortWithPermutation(modulEig, perm, neig);
+  
+  if (!m_lambdaN)
+  {
+    m_lambdaN = (std::max)(modulEig.maxCoeff(), m_lambdaN);
+  }
+  //Count the real number of extracted eigenvalues (with complex conjugates)
+  int nbrEig = 0; 
+  while (nbrEig < neig)
+  {
+    if(eig(perm(it-nbrEig-1)).imag() == RealScalar(0)) nbrEig++; 
+    else nbrEig += 2; 
+  }
+  // Extract the  Schur vectors corresponding to the smallest Ritz values
+  DenseMatrix Sr(it, nbrEig); 
+  Sr.setZero();
+  for (int j = 0; j < nbrEig; j++)
+  {
+    Sr.col(j) = schurofH.matrixU().col(perm(it-j-1));
+  }
+  
+  // Form the Schur vectors of the initial matrix using the Krylov basis
+  DenseMatrix X; 
+  X = m_V.leftCols(it) * Sr;
+  if (m_r)
+  {
+   // Orthogonalize X against m_U using modified Gram-Schmidt
+   for (int j = 0; j < nbrEig; j++)
+     for (int k =0; k < m_r; k++)
+      X.col(j) = X.col(j) - (m_U.col(k).dot(X.col(j)))*m_U.col(k); 
+  }
+  
+  // Compute m_MX = A * M^-1 * X
+  Index m = m_V.rows();
+  if (!m_isDeflAllocated) 
+    dgmresInitDeflation(m); 
+  DenseMatrix MX(m, nbrEig);
+  DenseVector tv1(m);
+  for (int j = 0; j < nbrEig; j++)
+  {
+    tv1 = mat * X.col(j);
+    MX.col(j) = precond.solve(tv1);
+  }
+  
+  //Update m_T = [U'MU U'MX; X'MU X'MX]
+  m_T.block(m_r, m_r, nbrEig, nbrEig) = X.transpose() * MX; 
+  if(m_r)
+  {
+    m_T.block(0, m_r, m_r, nbrEig) = m_U.leftCols(m_r).transpose() * MX; 
+    m_T.block(m_r, 0, nbrEig, m_r) = X.transpose() * m_MU.leftCols(m_r);
+  }
+  
+  // Save X into m_U and m_MX in m_MU
+  for (int j = 0; j < nbrEig; j++) m_U.col(m_r+j) = X.col(j);
+  for (int j = 0; j < nbrEig; j++) m_MU.col(m_r+j) = MX.col(j);
+  // Increase the size of the invariant subspace
+  m_r += nbrEig; 
+  
+  // Factorize m_T into m_luT
+  m_luT.compute(m_T.topLeftCorner(m_r, m_r));
+  
+  //FIXME CHeck if the factorization was correctly done (nonsingular matrix)
+  m_isDeflInitialized = true;
+  return 0; 
+}
+template<typename _MatrixType, typename _Preconditioner>
+template<typename RhsType, typename DestType>
+int DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x, DestType &y) const
+{
+  DenseVector x1 = m_U.leftCols(m_r).transpose() * x; 
+  y = x + m_U.leftCols(m_r) * ( m_lambdaN * m_luT.solve(x1) - x1);
+  return 0; 
+}
+
+namespace internal {
+
+  template<typename _MatrixType, typename _Preconditioner, typename Rhs>
+struct solve_retval<DGMRES<_MatrixType, _Preconditioner>, Rhs>
+  : solve_retval_base<DGMRES<_MatrixType, _Preconditioner>, Rhs>
+{
+  typedef DGMRES<_MatrixType, _Preconditioner> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+} // end namespace internal
+
+} // end namespace Eigen
+#endif 
diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
new file mode 100644
index 0000000..7ba13af
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
@@ -0,0 +1,370 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2012, 2014 Kolja Brix <brix@igpm.rwth-aaachen.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GMRES_H
+#define EIGEN_GMRES_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/**
+ * Generalized Minimal Residual Algorithm based on the
+ * Arnoldi algorithm implemented with Householder reflections.
+ *
+ * Parameters:
+ *  \param mat       matrix of linear system of equations
+ *  \param Rhs       right hand side vector of linear system of equations
+ *  \param x         on input: initial guess, on output: solution
+ *  \param precond   preconditioner used
+ *  \param iters     on input: maximum number of iterations to perform
+ *                   on output: number of iterations performed
+ *  \param restart   number of iterations for a restart
+ *  \param tol_error on input: residual tolerance
+ *                   on output: residuum achieved
+ *
+ * \sa IterativeMethods::bicgstab() 
+ *  
+ *
+ * For references, please see:
+ *
+ * Saad, Y. and Schultz, M. H.
+ * GMRES: A Generalized Minimal Residual Algorithm for Solving Nonsymmetric Linear Systems.
+ * SIAM J.Sci.Stat.Comp. 7, 1986, pp. 856 - 869.
+ *
+ * Saad, Y.
+ * Iterative Methods for Sparse Linear Systems.
+ * Society for Industrial and Applied Mathematics, Philadelphia, 2003.
+ *
+ * Walker, H. F.
+ * Implementations of the GMRES method.
+ * Comput.Phys.Comm. 53, 1989, pp. 311 - 320.
+ *
+ * Walker, H. F.
+ * Implementation of the GMRES Method using Householder Transformations.
+ * SIAM J.Sci.Stat.Comp. 9, 1988, pp. 152 - 163.
+ *
+ */
+template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond,
+		int &iters, const int &restart, typename Dest::RealScalar & tol_error) {
+
+	using std::sqrt;
+	using std::abs;
+
+	typedef typename Dest::RealScalar RealScalar;
+	typedef typename Dest::Scalar Scalar;
+	typedef Matrix < Scalar, Dynamic, 1 > VectorType;
+	typedef Matrix < Scalar, Dynamic, Dynamic > FMatrixType;
+
+	RealScalar tol = tol_error;
+	const int maxIters = iters;
+	iters = 0;
+
+	const int m = mat.rows();
+
+	VectorType p0 = rhs - mat*x;
+	VectorType r0 = precond.solve(p0);
+ 
+	// is initial guess already good enough?
+	if(abs(r0.norm()) < tol) {
+		return true; 
+	}
+
+	VectorType w = VectorType::Zero(restart + 1);
+
+	FMatrixType H = FMatrixType::Zero(m, restart + 1); // Hessenberg matrix
+	VectorType tau = VectorType::Zero(restart + 1);
+	std::vector < JacobiRotation < Scalar > > G(restart);
+
+	// generate first Householder vector
+	VectorType e(m-1);
+	RealScalar beta;
+	r0.makeHouseholder(e, tau.coeffRef(0), beta);
+	w(0)=(Scalar) beta;
+	H.bottomLeftCorner(m - 1, 1) = e;
+
+	for (int k = 1; k <= restart; ++k) {
+
+		++iters;
+
+		VectorType v = VectorType::Unit(m, k - 1), workspace(m);
+
+		// apply Householder reflections H_{1} ... H_{k-1} to v
+		for (int i = k - 1; i >= 0; --i) {
+			v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
+		}
+
+		// apply matrix M to v:  v = mat * v;
+		VectorType t=mat*v;
+		v=precond.solve(t);
+
+		// apply Householder reflections H_{k-1} ... H_{1} to v
+		for (int i = 0; i < k; ++i) {
+			v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
+		}
+
+		if (v.tail(m - k).norm() != 0.0) {
+
+			if (k <= restart) {
+
+				// generate new Householder vector
+                                  VectorType e(m - k - 1);
+				RealScalar beta;
+				v.tail(m - k).makeHouseholder(e, tau.coeffRef(k), beta);
+				H.col(k).tail(m - k - 1) = e;
+
+				// apply Householder reflection H_{k} to v
+				v.tail(m - k).applyHouseholderOnTheLeft(H.col(k).tail(m - k - 1), tau.coeffRef(k), workspace.data());
+
+			}
+                }
+
+                if (k > 1) {
+                        for (int i = 0; i < k - 1; ++i) {
+                                // apply old Givens rotations to v
+                                v.applyOnTheLeft(i, i + 1, G[i].adjoint());
+                        }
+                }
+
+                if (k<m && v(k) != (Scalar) 0) {
+                        // determine next Givens rotation
+                        G[k - 1].makeGivens(v(k - 1), v(k));
+
+                        // apply Givens rotation to v and w
+                        v.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
+                        w.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
+
+                }
+
+                // insert coefficients into upper matrix triangle
+                H.col(k - 1).head(k) = v.head(k);
+
+                bool stop=(k==m || abs(w(k)) < tol || iters == maxIters);
+
+                if (stop || k == restart) {
+
+                        // solve upper triangular system
+                        VectorType y = w.head(k);
+                        H.topLeftCorner(k, k).template triangularView < Eigen::Upper > ().solveInPlace(y);
+
+                        // use Horner-like scheme to calculate solution vector
+                        VectorType x_new = y(k - 1) * VectorType::Unit(m, k - 1);
+
+                        // apply Householder reflection H_{k} to x_new
+                        x_new.tail(m - k + 1).applyHouseholderOnTheLeft(H.col(k - 1).tail(m - k), tau.coeffRef(k - 1), workspace.data());
+
+                        for (int i = k - 2; i >= 0; --i) {
+                                x_new += y(i) * VectorType::Unit(m, i);
+                                // apply Householder reflection H_{i} to x_new
+                                x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
+                        }
+
+                        x += x_new;
+
+                        if (stop) {
+                                return true;
+                        } else {
+                                k=0;
+
+                                // reset data for a restart  r0 = rhs - mat * x;
+                                VectorType p0=mat*x;
+                                VectorType p1=precond.solve(p0);
+                                r0 = rhs - p1;
+//                                 r0_sqnorm = r0.squaredNorm();
+                                w = VectorType::Zero(restart + 1);
+                                H = FMatrixType::Zero(m, restart + 1);
+                                tau = VectorType::Zero(restart + 1);
+
+                                // generate first Householder vector
+                                RealScalar beta;
+                                r0.makeHouseholder(e, tau.coeffRef(0), beta);
+                                w(0)=(Scalar) beta;
+                                H.bottomLeftCorner(m - 1, 1) = e;
+
+                        }
+
+                }
+
+
+
+	}
+	
+	return false;
+
+}
+
+}
+
+template< typename _MatrixType,
+          typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
+class GMRES;
+
+namespace internal {
+
+template< typename _MatrixType, typename _Preconditioner>
+struct traits<GMRES<_MatrixType,_Preconditioner> >
+{
+  typedef _MatrixType MatrixType;
+  typedef _Preconditioner Preconditioner;
+};
+
+}
+
+/** \ingroup IterativeLinearSolvers_Module
+  * \brief A GMRES solver for sparse square problems
+  *
+  * This class allows to solve for A.x = b sparse linear problems using a generalized minimal
+  * residual method. The vectors x and b can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+  * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+  *
+  * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
+  * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
+  * and NumTraits<Scalar>::epsilon() for the tolerance.
+  * 
+  * This class can be used as the direct solver classes. Here is a typical usage example:
+  * \code
+  * int n = 10000;
+  * VectorXd x(n), b(n);
+  * SparseMatrix<double> A(n,n);
+  * // fill A and b
+  * GMRES<SparseMatrix<double> > solver(A);
+  * x = solver.solve(b);
+  * std::cout << "#iterations:     " << solver.iterations() << std::endl;
+  * std::cout << "estimated error: " << solver.error()      << std::endl;
+  * // update b, and solve again
+  * x = solver.solve(b);
+  * \endcode
+  * 
+  * By default the iterations start with x=0 as an initial guess of the solution.
+  * One can control the start using the solveWithGuess() method.
+  * 
+  * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+  */
+template< typename _MatrixType, typename _Preconditioner>
+class GMRES : public IterativeSolverBase<GMRES<_MatrixType,_Preconditioner> >
+{
+  typedef IterativeSolverBase<GMRES> Base;
+  using Base::mp_matrix;
+  using Base::m_error;
+  using Base::m_iterations;
+  using Base::m_info;
+  using Base::m_isInitialized;
+ 
+private:
+  int m_restart;
+  
+public:
+  typedef _MatrixType MatrixType;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::RealScalar RealScalar;
+  typedef _Preconditioner Preconditioner;
+
+public:
+
+  /** Default constructor. */
+  GMRES() : Base(), m_restart(30) {}
+
+  /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+    * 
+    * This constructor is a shortcut for the default constructor followed
+    * by a call to compute().
+    * 
+    * \warning this class stores a reference to the matrix A as well as some
+    * precomputed values that depend on it. Therefore, if \a A is changed
+    * this class becomes invalid. Call compute() to update it with the new
+    * matrix A, or modify a copy of A.
+    */
+  GMRES(const MatrixType& A) : Base(A), m_restart(30) {}
+
+  ~GMRES() {}
+  
+  /** Get the number of iterations after that a restart is performed.
+    */
+  int get_restart() { return m_restart; }
+  
+  /** Set the number of iterations after that a restart is performed.
+    *  \param restart   number of iterations for a restarti, default is 30.
+    */
+  void set_restart(const int restart) { m_restart=restart; }
+  
+  /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+    * \a x0 as an initial solution.
+    *
+    * \sa compute()
+    */
+  template<typename Rhs,typename Guess>
+  inline const internal::solve_retval_with_guess<GMRES, Rhs, Guess>
+  solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+  {
+    eigen_assert(m_isInitialized && "GMRES is not initialized.");
+    eigen_assert(Base::rows()==b.rows()
+              && "GMRES::solve(): invalid number of rows of the right hand side matrix b");
+    return internal::solve_retval_with_guess
+            <GMRES, Rhs, Guess>(*this, b.derived(), x0);
+  }
+  
+  /** \internal */
+  template<typename Rhs,typename Dest>
+  void _solveWithGuess(const Rhs& b, Dest& x) const
+  {    
+    bool failed = false;
+    for(int j=0; j<b.cols(); ++j)
+    {
+      m_iterations = Base::maxIterations();
+      m_error = Base::m_tolerance;
+      
+      typename Dest::ColXpr xj(x,j);
+      if(!internal::gmres(*mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_restart, m_error))
+        failed = true;
+    }
+    m_info = failed ? NumericalIssue
+           : m_error <= Base::m_tolerance ? Success
+           : NoConvergence;
+    m_isInitialized = true;
+  }
+
+  /** \internal */
+  template<typename Rhs,typename Dest>
+  void _solve(const Rhs& b, Dest& x) const
+  {
+    x = b;
+    if(x.squaredNorm() == 0) return; // Check Zero right hand side
+    _solveWithGuess(b,x);
+  }
+
+protected:
+
+};
+
+
+namespace internal {
+
+  template<typename _MatrixType, typename _Preconditioner, typename Rhs>
+struct solve_retval<GMRES<_MatrixType, _Preconditioner>, Rhs>
+  : solve_retval_base<GMRES<_MatrixType, _Preconditioner>, Rhs>
+{
+  typedef GMRES<_MatrixType, _Preconditioner> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_GMRES_H
diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
new file mode 100644
index 0000000..661c1f2
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
@@ -0,0 +1,278 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_INCOMPLETE_CHOlESKY_H
+#define EIGEN_INCOMPLETE_CHOlESKY_H
+#include "Eigen/src/IterativeLinearSolvers/IncompleteLUT.h" 
+#include <Eigen/OrderingMethods>
+#include <list>
+
+namespace Eigen {  
+/** 
+ * \brief Modified Incomplete Cholesky with dual threshold
+ * 
+ * References : C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with
+ *              Limited memory, SIAM J. Sci. Comput.  21(1), pp. 24-45, 1999
+ * 
+ * \tparam _MatrixType The type of the sparse matrix. It should be a symmetric 
+ *                     matrix. It is advised to give  a row-oriented sparse matrix 
+ * \tparam _UpLo The triangular part of the matrix to reference. 
+ * \tparam _OrderingType 
+ */
+
+template <typename Scalar, int _UpLo = Lower, typename _OrderingType = NaturalOrdering<int> >
+class IncompleteCholesky : internal::noncopyable
+{
+  public:
+    typedef SparseMatrix<Scalar,ColMajor> MatrixType;
+    typedef _OrderingType OrderingType;
+    typedef typename MatrixType::RealScalar RealScalar; 
+    typedef typename MatrixType::Index Index; 
+    typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+    typedef Matrix<Scalar,Dynamic,1> ScalarType; 
+    typedef Matrix<Index,Dynamic, 1> IndexType;
+    typedef std::vector<std::list<Index> > VectorList; 
+    enum { UpLo = _UpLo };
+  public:
+    IncompleteCholesky() : m_shift(1),m_factorizationIsOk(false) {}
+    IncompleteCholesky(const MatrixType& matrix) : m_shift(1),m_factorizationIsOk(false)
+    {
+      compute(matrix);
+    }
+    
+    Index rows() const { return m_L.rows(); }
+    
+    Index cols() const { return m_L.cols(); }
+    
+
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "IncompleteLLT is not initialized.");
+      return m_info;
+    }
+    
+    /** 
+     * \brief Set the initial shift parameter
+     */
+    void setShift( Scalar shift) { m_shift = shift; }
+    
+    /**
+    * \brief Computes the fill reducing permutation vector. 
+    */
+    template<typename MatrixType>
+    void analyzePattern(const MatrixType& mat)
+    {
+      OrderingType ord; 
+      ord(mat.template selfadjointView<UpLo>(), m_perm); 
+      m_analysisIsOk = true; 
+    }
+    
+    template<typename MatrixType>
+    void factorize(const MatrixType& amat);
+    
+    template<typename MatrixType>
+    void compute (const MatrixType& matrix)
+    {
+      analyzePattern(matrix); 
+      factorize(matrix);
+    }
+    
+    template<typename Rhs, typename Dest>
+    void _solve(const Rhs& b, Dest& x) const
+    {
+      eigen_assert(m_factorizationIsOk && "factorize() should be called first");
+      if (m_perm.rows() == b.rows())
+        x = m_perm.inverse() * b; 
+      else 
+        x = b; 
+      x = m_scal.asDiagonal() * x;
+      x = m_L.template triangularView<UnitLower>().solve(x); 
+      x = m_L.adjoint().template triangularView<Upper>().solve(x); 
+      if (m_perm.rows() == b.rows())
+        x = m_perm * x;
+      x = m_scal.asDiagonal() * x;
+    }
+    template<typename Rhs> inline const internal::solve_retval<IncompleteCholesky, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_factorizationIsOk && "IncompleteLLT did not succeed");
+      eigen_assert(m_isInitialized && "IncompleteLLT is not initialized.");
+      eigen_assert(cols()==b.rows()
+                && "IncompleteLLT::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<IncompleteCholesky, Rhs>(*this, b.derived());
+    }
+  protected:
+    SparseMatrix<Scalar,ColMajor> m_L;  // The lower part stored in CSC
+    ScalarType m_scal; // The vector for scaling the matrix 
+    Scalar m_shift; //The initial shift parameter
+    bool m_analysisIsOk; 
+    bool m_factorizationIsOk; 
+    bool m_isInitialized;
+    ComputationInfo m_info;
+    PermutationType m_perm; 
+    
+  private:
+    template <typename IdxType, typename SclType>
+    inline void updateList(const IdxType& colPtr, IdxType& rowIdx, SclType& vals, const Index& col, const Index& jk, IndexType& firstElt, VectorList& listCol); 
+}; 
+
+template<typename Scalar, int _UpLo, typename OrderingType>
+template<typename _MatrixType>
+void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType& mat)
+{
+  using std::sqrt;
+  using std::min;
+  eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); 
+    
+  // Dropping strategies : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added
+  
+  // Apply the fill-reducing permutation computed in analyzePattern()
+  if (m_perm.rows() == mat.rows() ) // To detect the null permutation
+    m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>().twistedBy(m_perm);
+  else
+    m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>();
+  
+  Index n = m_L.cols(); 
+  Index nnz = m_L.nonZeros();
+  Map<ScalarType> vals(m_L.valuePtr(), nnz); //values
+  Map<IndexType> rowIdx(m_L.innerIndexPtr(), nnz);  //Row indices
+  Map<IndexType> colPtr( m_L.outerIndexPtr(), n+1); // Pointer to the beginning of each row
+  IndexType firstElt(n-1); // for each j, points to the next entry in vals that will be used in the factorization
+  VectorList listCol(n); // listCol(j) is a linked list of columns to update column j
+  ScalarType curCol(n); // Store a  nonzero values in each column
+  IndexType irow(n); // Row indices of nonzero elements in each column
+  
+  
+  // Computes the scaling factors 
+  m_scal.resize(n);
+  for (int j = 0; j < n; j++)
+  {
+    m_scal(j) = m_L.col(j).norm();
+    m_scal(j) = sqrt(m_scal(j));
+  }
+  // Scale and compute the shift for the matrix 
+  Scalar mindiag = vals[0];
+  for (int j = 0; j < n; j++){
+    for (int k = colPtr[j]; k < colPtr[j+1]; k++)
+     vals[k] /= (m_scal(j) * m_scal(rowIdx[k]));
+    mindiag = (min)(vals[colPtr[j]], mindiag);
+  }
+  
+  if(mindiag < Scalar(0.)) m_shift = m_shift - mindiag;
+  // Apply the shift to the diagonal elements of the matrix
+  for (int j = 0; j < n; j++)
+    vals[colPtr[j]] += m_shift;
+  // jki version of the Cholesky factorization 
+  for (int j=0; j < n; ++j)
+  {  
+    //Left-looking factorize the column j 
+    // First, load the jth column into curCol 
+    Scalar diag = vals[colPtr[j]];  // It is assumed that only the lower part is stored
+    curCol.setZero();
+    irow.setLinSpaced(n,0,n-1); 
+    for (int i = colPtr[j] + 1; i < colPtr[j+1]; i++)
+    {
+      curCol(rowIdx[i]) = vals[i]; 
+      irow(rowIdx[i]) = rowIdx[i]; 
+    }
+    std::list<int>::iterator k; 
+    // Browse all previous columns that will update column j
+    for(k = listCol[j].begin(); k != listCol[j].end(); k++) 
+    {
+      int jk = firstElt(*k); // First element to use in the column 
+      jk += 1; 
+      for (int i = jk; i < colPtr[*k+1]; i++)
+      {
+        curCol(rowIdx[i]) -= vals[i] * vals[jk] ;
+      }
+      updateList(colPtr,rowIdx,vals, *k, jk, firstElt, listCol);
+    }
+    
+    // Scale the current column
+    if(RealScalar(diag) <= 0) 
+    {
+      std::cerr << "\nNegative diagonal during Incomplete factorization... "<< j << "\n";
+      m_info = NumericalIssue; 
+      return; 
+    }
+    RealScalar rdiag = sqrt(RealScalar(diag));
+    vals[colPtr[j]] = rdiag;
+    for (int i = j+1; i < n; i++)
+    {
+      //Scale 
+      curCol(i) /= rdiag;
+      //Update the remaining diagonals with curCol
+      vals[colPtr[i]] -= curCol(i) * curCol(i);
+    }
+    // Select the largest p elements
+    //  p is the original number of elements in the column (without the diagonal)
+    int p = colPtr[j+1] - colPtr[j] - 1 ; 
+    internal::QuickSplit(curCol, irow, p); 
+    // Insert the largest p elements in the matrix
+    int cpt = 0; 
+    for (int i = colPtr[j]+1; i < colPtr[j+1]; i++)
+    {
+      vals[i] = curCol(cpt); 
+      rowIdx[i] = irow(cpt); 
+      cpt ++; 
+    }
+    // Get the first smallest row index and put it after the diagonal element
+    Index jk = colPtr(j)+1;
+    updateList(colPtr,rowIdx,vals,j,jk,firstElt,listCol); 
+  }
+  m_factorizationIsOk = true; 
+  m_isInitialized = true;
+  m_info = Success; 
+}
+
+template<typename Scalar, int _UpLo, typename OrderingType>
+template <typename IdxType, typename SclType>
+inline void IncompleteCholesky<Scalar,_UpLo, OrderingType>::updateList(const IdxType& colPtr, IdxType& rowIdx, SclType& vals, const Index& col, const Index& jk, IndexType& firstElt, VectorList& listCol)
+{
+  if (jk < colPtr(col+1) )
+  {
+    Index p = colPtr(col+1) - jk;
+    Index minpos; 
+    rowIdx.segment(jk,p).minCoeff(&minpos);
+    minpos += jk;
+    if (rowIdx(minpos) != rowIdx(jk))
+    {
+      //Swap
+      std::swap(rowIdx(jk),rowIdx(minpos));
+      std::swap(vals(jk),vals(minpos));
+    }
+    firstElt(col) = jk;
+    listCol[rowIdx(jk)].push_back(col);
+  }
+}
+namespace internal {
+
+template<typename _Scalar, int _UpLo, typename OrderingType, typename Rhs>
+struct solve_retval<IncompleteCholesky<_Scalar,  _UpLo, OrderingType>, Rhs>
+  : solve_retval_base<IncompleteCholesky<_Scalar, _UpLo, OrderingType>, Rhs>
+{
+  typedef IncompleteCholesky<_Scalar, _UpLo, OrderingType> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen 
+
+#endif
diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
new file mode 100644
index 0000000..67e7801
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
@@ -0,0 +1,113 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_INCOMPLETE_LU_H
+#define EIGEN_INCOMPLETE_LU_H
+
+namespace Eigen { 
+
+template <typename _Scalar>
+class IncompleteLU
+{
+    typedef _Scalar Scalar;
+    typedef Matrix<Scalar,Dynamic,1> Vector;
+    typedef typename Vector::Index Index;
+    typedef SparseMatrix<Scalar,RowMajor> FactorType;
+
+  public:
+    typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
+
+    IncompleteLU() : m_isInitialized(false) {}
+
+    template<typename MatrixType>
+    IncompleteLU(const MatrixType& mat) : m_isInitialized(false)
+    {
+      compute(mat);
+    }
+
+    Index rows() const { return m_lu.rows(); }
+    Index cols() const { return m_lu.cols(); }
+
+    template<typename MatrixType>
+    IncompleteLU& compute(const MatrixType& mat)
+    {
+      m_lu = mat;
+      int size = mat.cols();
+      Vector diag(size);
+      for(int i=0; i<size; ++i)
+      {
+        typename FactorType::InnerIterator k_it(m_lu,i);
+        for(; k_it && k_it.index()<i; ++k_it)
+        {
+          int k = k_it.index();
+          k_it.valueRef() /= diag(k);
+
+          typename FactorType::InnerIterator j_it(k_it);
+          typename FactorType::InnerIterator kj_it(m_lu, k);
+          while(kj_it && kj_it.index()<=k) ++kj_it;
+          for(++j_it; j_it; )
+          {
+            if(kj_it.index()==j_it.index())
+            {
+              j_it.valueRef() -= k_it.value() * kj_it.value();
+              ++j_it;
+              ++kj_it;
+            }
+            else if(kj_it.index()<j_it.index()) ++kj_it;
+            else                                ++j_it;
+          }
+        }
+        if(k_it && k_it.index()==i) diag(i) = k_it.value();
+        else                        diag(i) = 1;
+      }
+      m_isInitialized = true;
+      return *this;
+    }
+
+    template<typename Rhs, typename Dest>
+    void _solve(const Rhs& b, Dest& x) const
+    {
+      x = m_lu.template triangularView<UnitLower>().solve(b);
+      x = m_lu.template triangularView<Upper>().solve(x);
+    }
+
+    template<typename Rhs> inline const internal::solve_retval<IncompleteLU, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "IncompleteLU is not initialized.");
+      eigen_assert(cols()==b.rows()
+                && "IncompleteLU::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<IncompleteLU, Rhs>(*this, b.derived());
+    }
+
+  protected:
+    FactorType m_lu;
+    bool m_isInitialized;
+};
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<IncompleteLU<_MatrixType>, Rhs>
+  : solve_retval_base<IncompleteLU<_MatrixType>, Rhs>
+{
+  typedef IncompleteLU<_MatrixType> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_INCOMPLETE_LU_H
diff --git a/unsupported/Eigen/src/IterativeSolvers/IterationController.h b/unsupported/Eigen/src/IterativeSolvers/IterationController.h
new file mode 100644
index 0000000..c9c1a4b
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/IterationController.h
@@ -0,0 +1,154 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+
+/* NOTE The class IterationController has been adapted from the iteration
+ *      class of the GMM++ and ITL libraries.
+ */
+
+//=======================================================================
+// Copyright (C) 1997-2001
+// Authors: Andrew Lumsdaine <lums@osl.iu.edu> 
+//          Lie-Quan Lee     <llee@osl.iu.edu>
+//
+// This file is part of the Iterative Template Library
+//
+// You should have received a copy of the License Agreement for the
+// Iterative Template Library along with the software;  see the
+// file LICENSE.  
+//
+// Permission to modify the code and to distribute modified code is
+// granted, provided the text of this NOTICE is retained, a notice that
+// the code was modified is included with the above COPYRIGHT NOTICE and
+// with the COPYRIGHT NOTICE in the LICENSE file, and that the LICENSE
+// file is distributed with the modified code.
+//
+// LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED.
+// By way of example, but not limitation, Licensor MAKES NO
+// REPRESENTATIONS OR WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY
+// PARTICULAR PURPOSE OR THAT THE USE OF THE LICENSED SOFTWARE COMPONENTS
+// OR DOCUMENTATION WILL NOT INFRINGE ANY PATENTS, COPYRIGHTS, TRADEMARKS
+// OR OTHER RIGHTS.
+//=======================================================================
+
+//========================================================================
+//
+// Copyright (C) 2002-2007 Yves Renard
+//
+// This file is a part of GETFEM++
+//
+// Getfem++ is free software; you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation; version 2.1 of the License.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Lesser General Public License for more details.
+// You should have received a copy of the GNU Lesser General Public
+// License along with this program; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301,
+// USA.
+//
+//========================================================================
+
+#include "../../../../Eigen/src/Core/util/NonMPL2.h"
+
+#ifndef EIGEN_ITERATION_CONTROLLER_H
+#define EIGEN_ITERATION_CONTROLLER_H
+
+namespace Eigen { 
+
+/** \ingroup IterativeSolvers_Module
+  * \class IterationController
+  *
+  * \brief Controls the iterations of the iterative solvers
+  *
+  * This class has been adapted from the iteration class of GMM++ and ITL libraries.
+  *
+  */
+class IterationController
+{
+  protected :
+    double m_rhsn;        ///< Right hand side norm
+    size_t m_maxiter;     ///< Max. number of iterations
+    int m_noise;          ///< if noise > 0 iterations are printed
+    double m_resmax;      ///< maximum residual
+    double m_resminreach, m_resadd;
+    size_t m_nit;         ///< iteration number
+    double m_res;         ///< last computed residual
+    bool m_written;
+    void (*m_callback)(const IterationController&);
+  public :
+
+    void init()
+    {
+      m_nit = 0; m_res = 0.0; m_written = false;
+      m_resminreach = 1E50; m_resadd = 0.0;
+      m_callback = 0;
+    }
+
+    IterationController(double r = 1.0E-8, int noi = 0, size_t mit = size_t(-1))
+      : m_rhsn(1.0), m_maxiter(mit), m_noise(noi), m_resmax(r) { init(); }
+
+    void operator ++(int) { m_nit++; m_written = false; m_resadd += m_res; }
+    void operator ++() { (*this)++; }
+
+    bool first() { return m_nit == 0; }
+
+    /* get/set the "noisyness" (verbosity) of the solvers */
+    int noiseLevel() const { return m_noise; }
+    void setNoiseLevel(int n) { m_noise = n; }
+    void reduceNoiseLevel() { if (m_noise > 0) m_noise--; }
+
+    double maxResidual() const { return m_resmax; }
+    void setMaxResidual(double r) { m_resmax = r; }
+
+    double residual() const { return m_res; }
+
+    /* change the user-definable callback, called after each iteration */
+    void setCallback(void (*t)(const IterationController&))
+    {
+      m_callback = t;
+    }
+
+    size_t iteration() const { return m_nit; }
+    void setIteration(size_t i) { m_nit = i; }
+
+    size_t maxIterarions() const { return m_maxiter; }
+    void setMaxIterations(size_t i) { m_maxiter = i; }
+
+    double rhsNorm() const { return m_rhsn; }
+    void setRhsNorm(double r) { m_rhsn = r; }
+
+    bool converged() const { return m_res <= m_rhsn * m_resmax; }
+    bool converged(double nr)
+    {
+      using std::abs;
+      m_res = abs(nr); 
+      m_resminreach = (std::min)(m_resminreach, m_res);
+      return converged();
+    }
+    template<typename VectorType> bool converged(const VectorType &v)
+    { return converged(v.squaredNorm()); }
+
+    bool finished(double nr)
+    {
+      if (m_callback) m_callback(*this);
+      if (m_noise > 0 && !m_written)
+      {
+        converged(nr);
+        m_written = true;
+      }
+      return (m_nit >= m_maxiter || converged(nr));
+    }
+    template <typename VectorType>
+    bool finished(const MatrixBase<VectorType> &v)
+    { return finished(double(v.squaredNorm())); }
+
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_ITERATION_CONTROLLER_H
diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
new file mode 100644
index 0000000..30f26aa
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
@@ -0,0 +1,310 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Giacomo Po <gpo@ucla.edu>
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#ifndef EIGEN_MINRES_H_
+#define EIGEN_MINRES_H_
+
+
+namespace Eigen {
+    
+    namespace internal {
+        
+        /** \internal Low-level MINRES algorithm
+         * \param mat The matrix A
+         * \param rhs The right hand side vector b
+         * \param x On input and initial solution, on output the computed solution.
+         * \param precond A right preconditioner being able to efficiently solve for an
+         *                approximation of Ax=b (regardless of b)
+         * \param iters On input the max number of iteration, on output the number of performed iterations.
+         * \param tol_error On input the tolerance error, on output an estimation of the relative error.
+         */
+        template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+        EIGEN_DONT_INLINE
+        void minres(const MatrixType& mat, const Rhs& rhs, Dest& x,
+                    const Preconditioner& precond, int& iters,
+                    typename Dest::RealScalar& tol_error)
+        {
+            using std::sqrt;
+            typedef typename Dest::RealScalar RealScalar;
+            typedef typename Dest::Scalar Scalar;
+            typedef Matrix<Scalar,Dynamic,1> VectorType;
+
+            // Check for zero rhs
+            const RealScalar rhsNorm2(rhs.squaredNorm());
+            if(rhsNorm2 == 0)
+            {
+                x.setZero();
+                iters = 0;
+                tol_error = 0;
+                return;
+            }
+            
+            // initialize
+            const int maxIters(iters);  // initialize maxIters to iters
+            const int N(mat.cols());    // the size of the matrix
+            const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2)
+            
+            // Initialize preconditioned Lanczos
+            VectorType v_old(N); // will be initialized inside loop
+            VectorType v( VectorType::Zero(N) ); //initialize v
+            VectorType v_new(rhs-mat*x); //initialize v_new
+            RealScalar residualNorm2(v_new.squaredNorm());
+            VectorType w(N); // will be initialized inside loop
+            VectorType w_new(precond.solve(v_new)); // initialize w_new
+//            RealScalar beta; // will be initialized inside loop
+            RealScalar beta_new2(v_new.dot(w_new));
+            eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
+            RealScalar beta_new(sqrt(beta_new2));
+            const RealScalar beta_one(beta_new);
+            v_new /= beta_new;
+            w_new /= beta_new;
+            // Initialize other variables
+            RealScalar c(1.0); // the cosine of the Givens rotation
+            RealScalar c_old(1.0);
+            RealScalar s(0.0); // the sine of the Givens rotation
+            RealScalar s_old(0.0); // the sine of the Givens rotation
+            VectorType p_oold(N); // will be initialized in loop
+            VectorType p_old(VectorType::Zero(N)); // initialize p_old=0
+            VectorType p(p_old); // initialize p=0
+            RealScalar eta(1.0);
+                        
+            iters = 0; // reset iters
+            while ( iters < maxIters )
+            {
+                // Preconditioned Lanczos
+                /* Note that there are 4 variants on the Lanczos algorithm. These are
+                 * described in Paige, C. C. (1972). Computational variants of
+                 * the Lanczos method for the eigenproblem. IMA Journal of Applied
+                 * Mathematics, 10(3), 373–381. The current implementation corresponds 
+                 * to the case A(2,7) in the paper. It also corresponds to 
+                 * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear
+                 * Systems, 2003 p.173. For the preconditioned version see 
+                 * A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987).
+                 */
+                const RealScalar beta(beta_new);
+                v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter
+//                const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT
+                v = v_new; // update
+                w = w_new; // update
+//                const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT
+                v_new.noalias() = mat*w - beta*v_old; // compute v_new
+                const RealScalar alpha = v_new.dot(w);
+                v_new -= alpha*v; // overwrite v_new
+                w_new = precond.solve(v_new); // overwrite w_new
+                beta_new2 = v_new.dot(w_new); // compute beta_new
+                eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
+                beta_new = sqrt(beta_new2); // compute beta_new
+                v_new /= beta_new; // overwrite v_new for next iteration
+                w_new /= beta_new; // overwrite w_new for next iteration
+                
+                // Givens rotation
+                const RealScalar r2 =s*alpha+c*c_old*beta; // s, s_old, c and c_old are still from previous iteration
+                const RealScalar r3 =s_old*beta; // s, s_old, c and c_old are still from previous iteration
+                const RealScalar r1_hat=c*alpha-c_old*s*beta;
+                const RealScalar r1 =sqrt( std::pow(r1_hat,2) + std::pow(beta_new,2) );
+                c_old = c; // store for next iteration
+                s_old = s; // store for next iteration
+                c=r1_hat/r1; // new cosine
+                s=beta_new/r1; // new sine
+                
+                // Update solution
+                p_oold = p_old;
+//                const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT
+                p_old = p;
+                p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED?
+                x += beta_one*c*eta*p;
+                
+                /* Update the squared residual. Note that this is the estimated residual.
+                The real residual |Ax-b|^2 may be slightly larger */
+                residualNorm2 *= s*s;
+                
+                if ( residualNorm2 < threshold2)
+                {
+                    break;
+                }
+                
+                eta=-s*eta; // update eta
+                iters++; // increment iteration number (for output purposes)
+            }
+            
+            /* Compute error. Note that this is the estimated error. The real 
+             error |Ax-b|/|b| may be slightly larger */
+            tol_error = std::sqrt(residualNorm2 / rhsNorm2);
+        }
+        
+    }
+    
+    template< typename _MatrixType, int _UpLo=Lower,
+    typename _Preconditioner = IdentityPreconditioner>
+//    typename _Preconditioner = IdentityPreconditioner<typename _MatrixType::Scalar> > // preconditioner must be positive definite
+    class MINRES;
+    
+    namespace internal {
+        
+        template< typename _MatrixType, int _UpLo, typename _Preconditioner>
+        struct traits<MINRES<_MatrixType,_UpLo,_Preconditioner> >
+        {
+            typedef _MatrixType MatrixType;
+            typedef _Preconditioner Preconditioner;
+        };
+        
+    }
+    
+    /** \ingroup IterativeLinearSolvers_Module
+     * \brief A minimal residual solver for sparse symmetric problems
+     *
+     * This class allows to solve for A.x = b sparse linear problems using the MINRES algorithm
+     * of Paige and Saunders (1975). The sparse matrix A must be symmetric (possibly indefinite).
+     * The vectors x and b can be either dense or sparse.
+     *
+     * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+     * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+     *               or Upper. Default is Lower.
+     * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+     *
+     * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
+     * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
+     * and NumTraits<Scalar>::epsilon() for the tolerance.
+     *
+     * This class can be used as the direct solver classes. Here is a typical usage example:
+     * \code
+     * int n = 10000;
+     * VectorXd x(n), b(n);
+     * SparseMatrix<double> A(n,n);
+     * // fill A and b
+     * MINRES<SparseMatrix<double> > mr;
+     * mr.compute(A);
+     * x = mr.solve(b);
+     * std::cout << "#iterations:     " << mr.iterations() << std::endl;
+     * std::cout << "estimated error: " << mr.error()      << std::endl;
+     * // update b, and solve again
+     * x = mr.solve(b);
+     * \endcode
+     *
+     * By default the iterations start with x=0 as an initial guess of the solution.
+     * One can control the start using the solveWithGuess() method.
+     *
+     * \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+     */
+    template< typename _MatrixType, int _UpLo, typename _Preconditioner>
+    class MINRES : public IterativeSolverBase<MINRES<_MatrixType,_UpLo,_Preconditioner> >
+    {
+        
+        typedef IterativeSolverBase<MINRES> Base;
+        using Base::mp_matrix;
+        using Base::m_error;
+        using Base::m_iterations;
+        using Base::m_info;
+        using Base::m_isInitialized;
+    public:
+        typedef _MatrixType MatrixType;
+        typedef typename MatrixType::Scalar Scalar;
+        typedef typename MatrixType::Index Index;
+        typedef typename MatrixType::RealScalar RealScalar;
+        typedef _Preconditioner Preconditioner;
+        
+        enum {UpLo = _UpLo};
+        
+    public:
+        
+        /** Default constructor. */
+        MINRES() : Base() {}
+        
+        /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+         *
+         * This constructor is a shortcut for the default constructor followed
+         * by a call to compute().
+         *
+         * \warning this class stores a reference to the matrix A as well as some
+         * precomputed values that depend on it. Therefore, if \a A is changed
+         * this class becomes invalid. Call compute() to update it with the new
+         * matrix A, or modify a copy of A.
+         */
+        MINRES(const MatrixType& A) : Base(A) {}
+        
+        /** Destructor. */
+        ~MINRES(){}
+		
+        /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+         * \a x0 as an initial solution.
+         *
+         * \sa compute()
+         */
+        template<typename Rhs,typename Guess>
+        inline const internal::solve_retval_with_guess<MINRES, Rhs, Guess>
+        solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+        {
+            eigen_assert(m_isInitialized && "MINRES is not initialized.");
+            eigen_assert(Base::rows()==b.rows()
+                         && "MINRES::solve(): invalid number of rows of the right hand side matrix b");
+            return internal::solve_retval_with_guess
+            <MINRES, Rhs, Guess>(*this, b.derived(), x0);
+        }
+        
+        /** \internal */
+        template<typename Rhs,typename Dest>
+        void _solveWithGuess(const Rhs& b, Dest& x) const
+        {
+            typedef typename internal::conditional<UpLo==(Lower|Upper),
+                                                   const MatrixType&,
+                                                   SparseSelfAdjointView<const MatrixType, UpLo>
+                                                  >::type MatrixWrapperType;
+                                          
+            m_iterations = Base::maxIterations();
+            m_error = Base::m_tolerance;
+            
+            for(int j=0; j<b.cols(); ++j)
+            {
+                m_iterations = Base::maxIterations();
+                m_error = Base::m_tolerance;
+                
+                typename Dest::ColXpr xj(x,j);
+                internal::minres(MatrixWrapperType(*mp_matrix), b.col(j), xj,
+                                 Base::m_preconditioner, m_iterations, m_error);
+            }
+            
+            m_isInitialized = true;
+            m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
+        }
+        
+        /** \internal */
+        template<typename Rhs,typename Dest>
+        void _solve(const Rhs& b, Dest& x) const
+        {
+            x.setZero();
+            _solveWithGuess(b,x);
+        }
+        
+    protected:
+        
+    };
+    
+    namespace internal {
+        
+        template<typename _MatrixType, int _UpLo, typename _Preconditioner, typename Rhs>
+        struct solve_retval<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
+        : solve_retval_base<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
+        {
+            typedef MINRES<_MatrixType,_UpLo,_Preconditioner> Dec;
+            EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+            
+            template<typename Dest> void evalTo(Dest& dst) const
+            {
+                dec()._solve(rhs(),dst);
+            }
+        };
+        
+    } // end namespace internal
+    
+} // end namespace Eigen
+
+#endif // EIGEN_MINRES_H
+
diff --git a/unsupported/Eigen/src/IterativeSolvers/Scaling.h b/unsupported/Eigen/src/IterativeSolvers/Scaling.h
new file mode 100644
index 0000000..4fd4392
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/Scaling.h
@@ -0,0 +1,185 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Desire NUENTSA WAKAM <desire.nuentsa_wakam@inria.fr
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ITERSCALING_H
+#define EIGEN_ITERSCALING_H
+/**
+  * \ingroup IterativeSolvers_Module
+  * \brief iterative scaling algorithm to equilibrate rows and column norms in matrices
+  * 
+  * This class can be used as a preprocessing tool to accelerate the convergence of iterative methods 
+  * 
+  * This feature is  useful to limit the pivoting amount during LU/ILU factorization
+  * The  scaling strategy as presented here preserves the symmetry of the problem
+  * NOTE It is assumed that the matrix does not have empty row or column, 
+  * 
+  * Example with key steps 
+  * \code
+  * VectorXd x(n), b(n);
+  * SparseMatrix<double> A;
+  * // fill A and b;
+  * IterScaling<SparseMatrix<double> > scal; 
+  * // Compute the left and right scaling vectors. The matrix is equilibrated at output
+  * scal.computeRef(A); 
+  * // Scale the right hand side
+  * b = scal.LeftScaling().cwiseProduct(b); 
+  * // Now, solve the equilibrated linear system with any available solver
+  * 
+  * // Scale back the computed solution
+  * x = scal.RightScaling().cwiseProduct(x); 
+  * \endcode
+  * 
+  * \tparam _MatrixType the type of the matrix. It should be a real square sparsematrix
+  * 
+  * References : D. Ruiz and B. Ucar, A Symmetry Preserving Algorithm for Matrix Scaling, INRIA Research report RR-7552
+  * 
+  * \sa \ref IncompleteLUT 
+  */
+namespace Eigen {
+using std::abs; 
+template<typename _MatrixType>
+class IterScaling
+{
+  public:
+    typedef _MatrixType MatrixType; 
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::Index Index;
+    
+  public:
+    IterScaling() { init(); }
+    
+    IterScaling(const MatrixType& matrix)
+    {
+      init();
+      compute(matrix);
+    }
+    
+    ~IterScaling() { }
+    
+    /** 
+     * Compute the left and right diagonal matrices to scale the input matrix @p mat
+     * 
+     * FIXME This algorithm will be modified such that the diagonal elements are permuted on the diagonal. 
+     * 
+     * \sa LeftScaling() RightScaling()
+     */
+    void compute (const MatrixType& mat)
+    {
+      int m = mat.rows(); 
+      int n = mat.cols();
+      eigen_assert((m>0 && m == n) && "Please give a non - empty matrix");
+      m_left.resize(m); 
+      m_right.resize(n);
+      m_left.setOnes();
+      m_right.setOnes();
+      m_matrix = mat;
+      VectorXd Dr, Dc, DrRes, DcRes; // Temporary Left and right scaling vectors
+      Dr.resize(m); Dc.resize(n);
+      DrRes.resize(m); DcRes.resize(n);
+      double EpsRow = 1.0, EpsCol = 1.0;
+      int its = 0; 
+      do
+      { // Iterate until the infinite norm of each row and column is approximately 1
+        // Get the maximum value in each row and column
+        Dr.setZero(); Dc.setZero();
+        for (int k=0; k<m_matrix.outerSize(); ++k)
+        {
+          for (typename MatrixType::InnerIterator it(m_matrix, k); it; ++it)
+          {
+            if ( Dr(it.row()) < abs(it.value()) )
+              Dr(it.row()) = abs(it.value());
+            
+            if ( Dc(it.col()) < abs(it.value()) )
+              Dc(it.col()) = abs(it.value());
+          }
+        }
+        for (int i = 0; i < m; ++i) 
+        {
+          Dr(i) = std::sqrt(Dr(i));
+          Dc(i) = std::sqrt(Dc(i));
+        }
+        // Save the scaling factors 
+        for (int i = 0; i < m; ++i) 
+        {
+          m_left(i) /= Dr(i);
+          m_right(i) /= Dc(i);
+        }
+        // Scale the rows and the columns of the matrix
+        DrRes.setZero(); DcRes.setZero(); 
+        for (int k=0; k<m_matrix.outerSize(); ++k)
+        {
+          for (typename MatrixType::InnerIterator it(m_matrix, k); it; ++it)
+          {
+            it.valueRef() = it.value()/( Dr(it.row()) * Dc(it.col()) );
+            // Accumulate the norms of the row and column vectors   
+            if ( DrRes(it.row()) < abs(it.value()) )
+              DrRes(it.row()) = abs(it.value());
+            
+            if ( DcRes(it.col()) < abs(it.value()) )
+              DcRes(it.col()) = abs(it.value());
+          }
+        }  
+        DrRes.array() = (1-DrRes.array()).abs();
+        EpsRow = DrRes.maxCoeff();
+        DcRes.array() = (1-DcRes.array()).abs();
+        EpsCol = DcRes.maxCoeff();
+        its++;
+      }while ( (EpsRow >m_tol || EpsCol > m_tol) && (its < m_maxits) );
+      m_isInitialized = true;
+    }
+    /** Compute the left and right vectors to scale the vectors
+     * the input matrix is scaled with the computed vectors at output
+     * 
+     * \sa compute()
+     */
+    void computeRef (MatrixType& mat)
+    {
+      compute (mat);
+      mat = m_matrix;
+    }
+    /** Get the vector to scale the rows of the matrix 
+     */
+    VectorXd& LeftScaling()
+    {
+      return m_left;
+    }
+    
+    /** Get the vector to scale the columns of the matrix 
+     */
+    VectorXd& RightScaling()
+    {
+      return m_right;
+    }
+    
+    /** Set the tolerance for the convergence of the iterative scaling algorithm
+     */
+    void setTolerance(double tol)
+    {
+      m_tol = tol; 
+    }
+      
+  protected:
+    
+    void init()
+    {
+      m_tol = 1e-10;
+      m_maxits = 5;
+      m_isInitialized = false;
+    }
+    
+    MatrixType m_matrix;
+    mutable ComputationInfo m_info; 
+    bool m_isInitialized; 
+    VectorXd m_left; // Left scaling vector
+    VectorXd m_right; // m_right scaling vector
+    double m_tol; 
+    int m_maxits; // Maximum number of iterations allowed
+};
+}
+#endif